diff options
author | unknown <joreland@mysql.com> | 2005-04-24 20:37:31 +0200 |
---|---|---|
committer | unknown <joreland@mysql.com> | 2005-04-24 20:37:31 +0200 |
commit | 3087468f9f071cdd36fde55abd923e7b7cfb85e2 (patch) | |
tree | a0d6d6f47d67c64d53e4ed53d65b256370e92067 /ndb | |
parent | ad8e2dcf54f0921da68788290e6f56bd1de0106d (diff) | |
parent | 90df3f7ff7827f8b462a174d16b4dd588ab0eb94 (diff) | |
download | mariadb-git-3087468f9f071cdd36fde55abd923e7b7cfb85e2.tar.gz |
Merge joreland@bk-internal.mysql.com:/home/bk/mysql-5.0
into mysql.com:/home/jonas/src/mysql-5.0
ndb/src/ndbapi/ndberror.c:
Auto merged
Diffstat (limited to 'ndb')
-rw-r--r-- | ndb/src/kernel/blocks/ERROR_codes.txt | 3 | ||||
-rw-r--r-- | ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 8 | ||||
-rw-r--r-- | ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp | 80 | ||||
-rw-r--r-- | ndb/src/kernel/blocks/dbtup/Notes.txt | 25 | ||||
-rw-r--r-- | ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp | 11 | ||||
-rw-r--r-- | ndb/src/ndbapi/ndberror.c | 6 | ||||
-rw-r--r-- | ndb/test/ndbapi/testOIBasic.cpp | 180 |
7 files changed, 215 insertions, 98 deletions
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index a30021607cc..fedddb58c0d 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -10,7 +10,7 @@ Next DBTC 8035 Next CMVMI 9000 Next BACKUP 10022 Next DBUTIL 11002 -Next DBTUX 12007 +Next DBTUX 12008 Next SUMA 13001 TESTING NODE FAILURE, ARBITRATION @@ -443,6 +443,7 @@ Test routing of signals: Ordered index: -------------- +12007: Make next alloc node fail with no memory error Dbdict: ------- diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 6d169d20d16..a0103f56add 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -1779,6 +1779,10 @@ private: Operationrec* const regOperPtr, Tablerec* const regTabPtr); + int addTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr); + // these crash the node on error void executeTuxCommitTriggers(Signal* signal, @@ -1789,6 +1793,10 @@ private: Operationrec* regOperPtr, Tablerec* const regTabPtr); + void removeTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr); + // ***************************************************************** // Error Handling routines. // ***************************************************************** diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index 476a4b5724b..2b65a8402c2 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -973,25 +973,7 @@ Dbtup::executeTuxInsertTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpAdd; - // loop over index list - const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - ljam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - ljamEntry(); - if (req->errorCode != 0) { - ljam(); - terrorCode = req->errorCode; - return -1; - } - triggerList.next(triggerPtr); - } - return 0; + return addTuxEntries(signal, regOperPtr, regTabPtr); } int @@ -1012,9 +994,18 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpAdd; - // loop over index list + return addTuxEntries(signal, regOperPtr, regTabPtr); +} + +int +Dbtup::addTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr) +{ + TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers; TriggerPtr triggerPtr; + Uint32 failPtrI; triggerList.first(triggerPtr); while (triggerPtr.i != RNIL) { ljam(); @@ -1026,11 +1017,29 @@ Dbtup::executeTuxUpdateTriggers(Signal* signal, if (req->errorCode != 0) { ljam(); terrorCode = req->errorCode; - return -1; + failPtrI = triggerPtr.i; + goto fail; } triggerList.next(triggerPtr); } return 0; +fail: + req->opInfo = TuxMaintReq::OpRemove; + triggerList.first(triggerPtr); + while (triggerPtr.i != failPtrI) { + ljam(); + req->indexId = triggerPtr.p->indexId; + req->errorCode = RNIL; + EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, + signal, TuxMaintReq::SignalLength); + ljamEntry(); + ndbrequire(req->errorCode == 0); + triggerList.next(triggerPtr); + } +#ifdef VM_TRACE + ndbout << "aborted partial tux update: op " << hex << regOperPtr << endl; +#endif + return -1; } int @@ -1049,7 +1058,6 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, { TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); // get version - // XXX could add prevTupVersion to Operationrec Uint32 tupVersion; if (regOperPtr->optype == ZINSERT) { if (! regOperPtr->deleteInsertFlag) @@ -1087,21 +1095,7 @@ Dbtup::executeTuxCommitTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpRemove; - // loop over index list - const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers; - TriggerPtr triggerPtr; - triggerList.first(triggerPtr); - while (triggerPtr.i != RNIL) { - ljam(); - req->indexId = triggerPtr.p->indexId; - req->errorCode = RNIL; - EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, - signal, TuxMaintReq::SignalLength); - ljamEntry(); - // commit must succeed - ndbrequire(req->errorCode == 0); - triggerList.next(triggerPtr); - } + removeTuxEntries(signal, regOperPtr, regTabPtr); } void @@ -1132,7 +1126,15 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, req->pageOffset = regOperPtr->pageOffset; req->tupVersion = tupVersion; req->opInfo = TuxMaintReq::OpRemove; - // loop over index list + removeTuxEntries(signal, regOperPtr, regTabPtr); +} + +void +Dbtup::removeTuxEntries(Signal* signal, + Operationrec* regOperPtr, + Tablerec* regTabPtr) +{ + TuxMaintReq* const req = (TuxMaintReq*)signal->getDataPtrSend(); const ArrayList<TupTriggerData>& triggerList = regTabPtr->tuxCustomTriggers; TriggerPtr triggerPtr; triggerList.first(triggerPtr); @@ -1143,7 +1145,7 @@ Dbtup::executeTuxAbortTriggers(Signal* signal, EXECUTE_DIRECT(DBTUX, GSN_TUX_MAINT_REQ, signal, TuxMaintReq::SignalLength); ljamEntry(); - // abort must succeed + // must succeed ndbrequire(req->errorCode == 0); triggerList.next(triggerPtr); } diff --git a/ndb/src/kernel/blocks/dbtup/Notes.txt b/ndb/src/kernel/blocks/dbtup/Notes.txt index 9d47c591fe8..c2973bb0a76 100644 --- a/ndb/src/kernel/blocks/dbtup/Notes.txt +++ b/ndb/src/kernel/blocks/dbtup/Notes.txt @@ -135,6 +135,24 @@ abort DELETE none - 1) alternatively, store prevTupVersion in operation record. +Abort from ordered index error +------------------------------ + +Obviously, index update failure causes operation failure. +The operation is then aborted later by TC. + +The problem here is with multiple indexes. Some may have been +updated successfully before the one that failed. Therefore +the trigger code aborts the successful ones already in +the prepare phase. + +In other words, multiple indexes are treated as one. + +Abort from any cause +-------------------- + +[ hairy stuff ] + Read attributes, query status ----------------------------- @@ -170,14 +188,11 @@ used to decide if the scan can see the tuple. This signal may also be called during any phase since commit/abort of all operations is not done in one time-slice. -Commit and abort ----------------- - -[ hairy stuff ] - Problems -------- Current abort code can destroy a tuple version too early. This happens in test case "ticuur" (insert-commit-update-update-rollback), if abort of first update arrives before abort of second update. + +vim: set textwidth=68: diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp index 855a8ed1c29..68a3e78ce9e 100644 --- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp +++ b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp @@ -23,6 +23,11 @@ int Dbtux::allocNode(Signal* signal, NodeHandle& node) { + if (ERROR_INSERTED(12007)) { + jam(); + CLEAR_ERROR_INSERT_VALUE; + return TuxMaintReq::NoMemError; + } Frag& frag = node.m_frag; Uint32 pageId = NullTupLoc.getPageId(); Uint32 pageOffset = NullTupLoc.getPageOffset(); @@ -34,6 +39,12 @@ Dbtux::allocNode(Signal* signal, NodeHandle& node) node.m_loc = TupLoc(pageId, pageOffset); node.m_node = reinterpret_cast<TreeNode*>(node32); ndbrequire(node.m_loc != NullTupLoc && node.m_node != 0); + } else { + switch (errorCode) { + case 827: + errorCode = TuxMaintReq::NoMemError; + break; + } } return errorCode; } diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 3ec268e38c5..6bbd38c9bbb 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -179,11 +179,11 @@ ErrorBundle ErrorCodes[] = { */ { 623, IS, "623" }, { 624, IS, "624" }, - { 625, IS, "Out of memory in Ndb Kernel, index part (increase IndexMemory)" }, + { 625, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" }, { 640, IS, "Too many hash indexes (should not happen)" }, { 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" }, - { 827, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, - { 902, IS, "Out of memory in Ndb Kernel, data part (increase DataMemory)" }, + { 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" }, + { 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" }, { 903, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" }, { 904, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" }, { 905, IS, "Out of attribute records (increase MaxNoOfAttributes)" }, diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp index 9df983ebe99..c7c9f417d1a 100644 --- a/ndb/test/ndbapi/testOIBasic.cpp +++ b/ndb/test/ndbapi/testOIBasic.cpp @@ -164,6 +164,16 @@ irandom(unsigned n) return i; } +static bool +randompct(unsigned pct) +{ + if (pct == 0) + return false; + if (pct >= 100) + return true; + return urandom(100) < pct; +} + // log and error macros static NdbMutex *ndbout_mutex = NULL; @@ -259,6 +269,8 @@ struct Par : public Opt { bool m_verify; // deadlock possible bool m_deadlock; + // abort percentabge + unsigned m_abortpct; NdbOperation::LockMode m_lockmode; // ordered range scan bool m_ordered; @@ -281,6 +293,7 @@ struct Par : public Opt { m_randomkey(false), m_verify(false), m_deadlock(false), + m_abortpct(0), m_lockmode(NdbOperation::LM_Read), m_ordered(false), m_descending(false) { @@ -1143,7 +1156,7 @@ struct Con { NdbScanFilter* m_scanfilter; enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive }; ScanMode m_scanmode; - enum ErrType { ErrNone = 0, ErrDeadlock, ErrOther }; + enum ErrType { ErrNone = 0, ErrDeadlock, ErrNospace, ErrOther }; ErrType m_errtype; Con() : m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_indexop(0), @@ -1172,7 +1185,7 @@ struct Con { int endFilter(); int setFilter(int num, int cond, const void* value, unsigned len); int execute(ExecType t); - int execute(ExecType t, bool& deadlock); + int execute(ExecType t, bool& deadlock, bool& nospace); int readTuples(Par par); int readIndexTuples(Par par); int executeScan(); @@ -1354,17 +1367,21 @@ Con::execute(ExecType t) } int -Con::execute(ExecType t, bool& deadlock) +Con::execute(ExecType t, bool& deadlock, bool& nospace) { int ret = execute(t); - if (ret != 0) { - if (deadlock && m_errtype == ErrDeadlock) { - LL3("caught deadlock"); - ret = 0; - } + if (ret != 0 && deadlock && m_errtype == ErrDeadlock) { + LL3("caught deadlock"); + ret = 0; } else { deadlock = false; } + if (ret != 0 && nospace && m_errtype == ErrNospace) { + LL3("caught nospace"); + ret = 0; + } else { + nospace = false; + } CHK(ret == 0); return 0; } @@ -1475,6 +1492,8 @@ Con::printerror(NdbOut& out) // 631 is new, occurs only on 4 db nodes, needs to be checked out if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499 || code == 631) m_errtype = ErrDeadlock; + if (code == 826 || code == 827 || code == 902) + m_errtype = ErrNospace; } if (m_op && m_op->getNdbError().code != 0) { LL0(++any << " op : error " << m_op->getNdbError()); @@ -2480,8 +2499,8 @@ struct Set { void dbsave(unsigned i); void calc(Par par, unsigned i, unsigned mask = 0); bool pending(unsigned i, unsigned mask) const; - void notpending(unsigned i); - void notpending(const Lst& lst); + void notpending(unsigned i, ExecType et = Commit); + void notpending(const Lst& lst, ExecType et = Commit); void dbdiscard(unsigned i); void dbdiscard(const Lst& lst); const Row& dbrow(unsigned i) const; @@ -2620,26 +2639,30 @@ Set::pending(unsigned i, unsigned mask) const } void -Set::notpending(unsigned i) +Set::notpending(unsigned i, ExecType et) { assert(m_row[i] != 0); Row& row = *m_row[i]; - if (row.m_pending == Row::InsOp) { - row.m_exist = true; - } else if (row.m_pending == Row::UpdOp) { - ; - } else if (row.m_pending == Row::DelOp) { - row.m_exist = false; + if (et == Commit) { + if (row.m_pending == Row::InsOp) + row.m_exist = true; + if (row.m_pending == Row::DelOp) + row.m_exist = false; + } else { + if (row.m_pending == Row::InsOp) + row.m_exist = false; + if (row.m_pending == Row::DelOp) + row.m_exist = true; } row.m_pending = Row::NoOp; } void -Set::notpending(const Lst& lst) +Set::notpending(const Lst& lst, ExecType et) { for (unsigned j = 0; j < lst.m_cnt; j++) { unsigned i = lst.m_arr[j]; - notpending(i); + notpending(i, et); } } @@ -2831,8 +2854,6 @@ Set::putval(unsigned i, bool force, unsigned n) return 0; } -// verify - int Set::verify(Par par, const Set& set2) const { @@ -3213,14 +3234,20 @@ pkinsert(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { bool deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + bool nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); con.closeTransaction(); if (deadlock) { LL1("pkinsert: stop on deadlock [at 1]"); return 0; } + if (nospace) { + LL1("pkinsert: cnt=" << j << " stop on nospace"); + return 0; + } set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); @@ -3228,14 +3255,20 @@ pkinsert(Par par) } if (lst.cnt() != 0) { bool deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + bool nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); con.closeTransaction(); if (deadlock) { LL1("pkinsert: stop on deadlock [at 2]"); return 0; } + if (nospace) { + LL1("pkinsert: end: stop on nospace"); + return 0; + } set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); return 0; } @@ -3253,6 +3286,7 @@ pkupdate(Par par) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3269,28 +3303,38 @@ pkupdate(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkupdate: stop on deadlock [at 1]"); break; } + if (nospace) { + LL1("pkupdate: cnt=" << j << " stop on nospace [at 1]"); + break; + } con.closeTransaction(); set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.dbdiscard(lst); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); } } - if (! deadlock && lst.cnt() != 0) { + if (! deadlock && ! nospace && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { - LL1("pkupdate: stop on deadlock [at 1]"); + LL1("pkupdate: stop on deadlock [at 2]"); + } else if (nospace) { + LL1("pkupdate: end: stop on nospace [at 2]"); } else { set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.dbdiscard(lst); set.unlock(); } @@ -3309,6 +3353,7 @@ pkdelete(Par par) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3323,27 +3368,31 @@ pkdelete(Par par) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkdelete: stop on deadlock [at 1]"); break; } con.closeTransaction(); set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); lst.reset(); CHK(con.startTransaction() == 0); } } - if (! deadlock && lst.cnt() != 0) { + if (! deadlock && ! nospace && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + nospace = true; + ExecType et = randompct(par.m_abortpct) ? Rollback : Commit; + CHK(con.execute(et, deadlock, nospace) == 0); if (deadlock) { LL1("pkdelete: stop on deadlock [at 2]"); } else { set.lock(); - set.notpending(lst); + set.notpending(lst, et); set.unlock(); } } @@ -3418,6 +3467,7 @@ hashindexupdate(Par par, const ITab& itab) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3435,7 +3485,7 @@ hashindexupdate(Par par, const ITab& itab) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexupdate: stop on deadlock [at 1]"); break; @@ -3451,9 +3501,9 @@ hashindexupdate(Par par, const ITab& itab) } if (! deadlock && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { - LL1("hashindexupdate: stop on deadlock [at 1]"); + LL1("hashindexupdate: stop on deadlock [at 2]"); } else { set.lock(); set.notpending(lst); @@ -3474,6 +3524,7 @@ hashindexdelete(Par par, const ITab& itab) CHK(con.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; for (unsigned j = 0; j < par.m_rows; j++) { unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows); unsigned i = thrrow(par, j2); @@ -3488,7 +3539,7 @@ hashindexdelete(Par par, const ITab& itab) lst.push(i); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexdelete: stop on deadlock [at 1]"); break; @@ -3503,7 +3554,7 @@ hashindexdelete(Par par, const ITab& itab) } if (! deadlock && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con.execute(Commit, deadlock) == 0); + CHK(con.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("hashindexdelete: stop on deadlock [at 2]"); } else { @@ -3875,6 +3926,7 @@ scanupdatetable(Par par) CHK(con2.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; while (1) { int ret; deadlock = par.m_deadlock; @@ -3910,7 +3962,7 @@ scanupdatetable(Par par) set.unlock(); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdatetable: stop on deadlock [at 2]"); goto out; @@ -3927,7 +3979,7 @@ scanupdatetable(Par par) CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); if (ret == 2 && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdatetable: stop on deadlock [at 3]"); goto out; @@ -3974,6 +4026,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK(con2.startTransaction() == 0); Lst lst; bool deadlock = false; + bool nospace = false; while (1) { int ret; deadlock = par.m_deadlock; @@ -4009,7 +4062,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) set.unlock(); if (lst.cnt() == par.m_batch) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdateindex: stop on deadlock [at 2]"); goto out; @@ -4026,7 +4079,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset) CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2); if (ret == 2 && lst.cnt() != 0) { deadlock = par.m_deadlock; - CHK(con2.execute(Commit, deadlock) == 0); + CHK(con2.execute(Commit, deadlock, nospace) == 0); if (deadlock) { LL1("scanupdateindex: stop on deadlock [at 3]"); goto out; @@ -4094,6 +4147,10 @@ readverify(Par par) if (par.m_noverify) return 0; par.m_verify = true; + if (par.m_abortpct != 0) { + LL2("skip verify in this version"); // implement in 5.0 version + par.m_verify = false; + } par.m_lockmode = NdbOperation::LM_CommittedRead; CHK(pkread(par) == 0); CHK(scanreadall(par) == 0); @@ -4106,6 +4163,10 @@ readverifyfull(Par par) if (par.m_noverify) return 0; par.m_verify = true; + if (par.m_abortpct != 0) { + LL2("skip verify in this version"); // implement in 5.0 version + par.m_verify = false; + } par.m_lockmode = NdbOperation::LM_CommittedRead; const Tab& tab = par.tab(); if (par.m_no == 0) { @@ -4457,11 +4518,11 @@ runstep(Par par, const char* fname, TFunc func, unsigned mode) for (n = 0; n < threads; n++) { LL4("start " << n); Thr& thr = *g_thrlist[n]; - thr.m_par.m_tab = par.m_tab; - thr.m_par.m_set = par.m_set; - thr.m_par.m_tmr = par.m_tmr; - thr.m_par.m_lno = par.m_lno; - thr.m_par.m_slno = par.m_slno; + Par oldpar = thr.m_par; + // update parameters + thr.m_par = par; + thr.m_par.m_no = oldpar.m_no; + thr.m_par.m_con = oldpar.m_con; thr.m_func = func; thr.start(); } @@ -4591,6 +4652,24 @@ tbusybuild(Par par) } static int +trollback(Par par) +{ + par.m_abortpct = 50; + RUNSTEP(par, droptable, ST); + RUNSTEP(par, createtable, ST); + RUNSTEP(par, invalidatetable, MT); + RUNSTEP(par, pkinsert, MT); + RUNSTEP(par, createindex, ST); + RUNSTEP(par, invalidateindex, MT); + RUNSTEP(par, readverify, ST); + for (par.m_slno = 0; par.m_slno < par.m_subloop; par.m_slno++) { + RUNSTEP(par, mixedoperations, MT); + RUNSTEP(par, readverify, ST); + } + return 0; +} + +static int ttimebuild(Par par) { Tmr t1; @@ -4712,6 +4791,7 @@ tcaselist[] = { TCase("d", tpkopsread, "pk operations and scan reads"), TCase("e", tmixedops, "pk operations and scan operations"), TCase("f", tbusybuild, "pk operations and index build"), + TCase("g", trollback, "operations with random rollbacks"), TCase("t", ttimebuild, "time index build"), TCase("u", ttimemaint, "time index maintenance"), TCase("v", ttimescan, "time full scan table vs index on pk"), |