summaryrefslogtreecommitdiff
path: root/ndb
diff options
context:
space:
mode:
Diffstat (limited to 'ndb')
-rw-r--r--ndb/Makefile.am3
-rw-r--r--ndb/docs/Makefile.am36
-rw-r--r--ndb/docs/doxygen/Doxyfile.mgmapi15
-rw-r--r--ndb/docs/doxygen/Doxyfile.ndb26
-rw-r--r--ndb/docs/doxygen/Doxyfile.ndbapi16
-rw-r--r--ndb/docs/doxygen/Doxyfile.odbc14
-rw-r--r--ndb/docs/doxygen/Doxyfile.test14
-rwxr-xr-xndb/docs/doxygen/predoxy.pl2
-rw-r--r--ndb/examples/configurations/demos.tarbin40960 -> 0 bytes
-rw-r--r--ndb/examples/ndbapi_async_example/Makefile34
-rw-r--r--ndb/examples/ndbapi_example1/Makefile33
-rw-r--r--ndb/examples/ndbapi_example1/ndbapi_example1.cpp194
-rw-r--r--ndb/examples/ndbapi_example2/Makefile33
-rw-r--r--ndb/examples/ndbapi_example3/Makefile33
-rw-r--r--ndb/examples/ndbapi_example4/Makefile33
-rw-r--r--ndb/examples/ndbapi_example4/ndbapi_example4.cpp253
-rw-r--r--ndb/examples/ndbapi_example5/Makefile33
-rw-r--r--ndb/examples/ndbapi_scan_example/Makefile35
-rw-r--r--ndb/examples/select_all/Makefile33
-rw-r--r--ndb/examples/select_all/select_all.cpp259
-rw-r--r--ndb/include/Makefile.am11
-rw-r--r--ndb/include/debugger/EventLogger.hpp29
-rw-r--r--ndb/include/debugger/SignalLoggerManager.hpp2
-rw-r--r--ndb/include/kernel/AttributeDescriptor.hpp89
-rw-r--r--ndb/include/kernel/AttributeHeader.hpp10
-rw-r--r--ndb/include/kernel/AttributeList.hpp2
-rw-r--r--ndb/include/kernel/LogLevel.hpp4
-rw-r--r--ndb/include/kernel/NodeInfo.hpp2
-rw-r--r--ndb/include/kernel/ndb_limits.h15
-rw-r--r--ndb/include/kernel/signaldata/AccScan.hpp34
-rw-r--r--ndb/include/kernel/signaldata/CreateEvnt.hpp15
-rw-r--r--ndb/include/kernel/signaldata/CreateIndx.hpp3
-rw-r--r--ndb/include/kernel/signaldata/CreateTable.hpp1
-rw-r--r--ndb/include/kernel/signaldata/DictTabInfo.hpp220
-rw-r--r--ndb/include/kernel/signaldata/EventReport.hpp88
-rw-r--r--ndb/include/kernel/signaldata/GetTabInfo.hpp32
-rw-r--r--ndb/include/kernel/signaldata/NextScan.hpp2
-rw-r--r--ndb/include/kernel/signaldata/ScanFrag.hpp36
-rw-r--r--ndb/include/kernel/signaldata/ScanTab.hpp73
-rw-r--r--ndb/include/kernel/signaldata/SignalData.hpp11
-rw-r--r--ndb/include/kernel/signaldata/SumaImpl.hpp2
-rw-r--r--ndb/include/kernel/signaldata/TcCommit.hpp7
-rw-r--r--ndb/include/kernel/signaldata/TcHbRep.hpp2
-rw-r--r--ndb/include/kernel/signaldata/TcIndx.hpp406
-rw-r--r--ndb/include/kernel/signaldata/TcKeyConf.hpp2
-rw-r--r--ndb/include/kernel/signaldata/TcKeyFailConf.hpp2
-rw-r--r--ndb/include/kernel/signaldata/TcKeyReq.hpp16
-rw-r--r--ndb/include/kernel/signaldata/TcRollbackRep.hpp2
-rwxr-xr-xndb/include/kernel/signaldata/TransIdAI.hpp2
-rw-r--r--ndb/include/kernel/signaldata/TupFrag.hpp15
-rw-r--r--ndb/include/kernel/signaldata/TuxBound.hpp6
-rw-r--r--ndb/include/kernel/trigger_definitions.h1
-rw-r--r--ndb/include/mgmapi/mgmapi.h895
-rw-r--r--ndb/include/mgmapi/mgmapi_config_parameters.h2
-rw-r--r--ndb/include/mgmapi/mgmapi_debug.h1
-rw-r--r--ndb/include/mgmapi/ndb_logevent.h623
-rw-r--r--ndb/include/mgmcommon/ConfigRetriever.hpp3
-rw-r--r--ndb/include/ndb_constants.h72
-rw-r--r--ndb/include/ndb_global.h.in33
-rw-r--r--ndb/include/ndb_types.h.in81
-rw-r--r--ndb/include/ndbapi/Ndb.hpp1550
-rw-r--r--ndb/include/ndbapi/NdbApi.hpp3
-rw-r--r--ndb/include/ndbapi/NdbBlob.hpp47
-rw-r--r--ndb/include/ndbapi/NdbDictionary.hpp917
-rw-r--r--ndb/include/ndbapi/NdbError.hpp11
-rw-r--r--ndb/include/ndbapi/NdbEventOperation.hpp144
-rw-r--r--ndb/include/ndbapi/NdbIndexOperation.hpp53
-rw-r--r--ndb/include/ndbapi/NdbIndexScanOperation.hpp79
-rw-r--r--ndb/include/ndbapi/NdbOperation.hpp218
-rw-r--r--ndb/include/ndbapi/NdbRecAttr.hpp41
-rw-r--r--ndb/include/ndbapi/NdbReceiver.hpp14
-rw-r--r--ndb/include/ndbapi/NdbResultSet.hpp162
-rw-r--r--ndb/include/ndbapi/NdbScanFilter.hpp73
-rw-r--r--ndb/include/ndbapi/NdbScanOperation.hpp204
-rw-r--r--ndb/include/ndbapi/NdbTransaction.hpp (renamed from ndb/include/ndbapi/NdbConnection.hpp)414
-rw-r--r--ndb/include/ndbapi/ndb_cluster_connection.hpp52
-rw-r--r--ndb/include/ndbapi/ndb_opt_defaults.h4
-rw-r--r--ndb/include/ndbapi/ndbapi_limits.h4
-rw-r--r--ndb/include/ndbapi/ndberror.h7
-rw-r--r--ndb/include/portlib/NdbTCP.h2
-rw-r--r--ndb/include/transporter/TransporterCallback.hpp3
-rw-r--r--ndb/include/transporter/TransporterDefinitions.hpp98
-rw-r--r--ndb/include/transporter/TransporterRegistry.hpp46
-rw-r--r--ndb/include/util/Base64.hpp1
-rw-r--r--ndb/include/util/Bitmask.hpp57
-rw-r--r--ndb/include/util/NdbSqlUtil.hpp145
-rw-r--r--ndb/include/util/SimpleProperties.hpp4
-rw-r--r--ndb/include/util/SocketClient.hpp6
-rw-r--r--ndb/include/util/SocketServer.hpp2
-rw-r--r--ndb/include/util/Vector.hpp8
-rw-r--r--ndb/include/util/md5_hash.hpp11
-rw-r--r--ndb/include/util/ndb_opts.h66
-rw-r--r--ndb/ndbapi-examples/Makefile (renamed from ndb/examples/Makefile)18
-rw-r--r--ndb/ndbapi-examples/mgmapi_logevent_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp140
-rw-r--r--ndb/ndbapi-examples/ndbapi_async_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp (renamed from ndb/examples/ndbapi_async_example/ndbapi_async.cpp)194
-rw-r--r--ndb/ndbapi-examples/ndbapi_async_example/readme.txt (renamed from ndb/examples/ndbapi_async_example/readme.txt)0
-rw-r--r--ndb/ndbapi-examples/ndbapi_async_example1/Makefile21
-rw-r--r--ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp (renamed from ndb/examples/ndbapi_example2/ndbapi_example2.cpp)75
-rw-r--r--ndb/ndbapi-examples/ndbapi_event_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp (renamed from ndb/examples/ndbapi_example5/ndbapi_example5.cpp)210
-rw-r--r--ndb/ndbapi-examples/ndbapi_retries_example/Makefile21
-rw-r--r--ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp (renamed from ndb/examples/ndbapi_example3/ndbapi_example3.cpp)107
-rw-r--r--ndb/ndbapi-examples/ndbapi_scan_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp (renamed from ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp)587
-rw-r--r--ndb/ndbapi-examples/ndbapi_scan_example/readme.txt (renamed from ndb/examples/ndbapi_scan_example/readme.txt)0
-rw-r--r--ndb/ndbapi-examples/ndbapi_simple_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp278
-rw-r--r--ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile23
-rw-r--r--ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp254
-rw-r--r--ndb/src/Makefile.am2
-rw-r--r--ndb/src/common/debugger/EventLogger.cpp1542
-rw-r--r--ndb/src/common/debugger/SignalLoggerManager.cpp9
-rw-r--r--ndb/src/common/debugger/signaldata/DictTabInfo.cpp24
-rw-r--r--ndb/src/common/debugger/signaldata/ScanTab.cpp12
-rw-r--r--ndb/src/common/debugger/signaldata/SignalDataPrint.cpp13
-rw-r--r--ndb/src/common/debugger/signaldata/SignalNames.cpp3
-rw-r--r--ndb/src/common/debugger/signaldata/SumaImpl.cpp76
-rw-r--r--ndb/src/common/debugger/signaldata/TcIndx.cpp85
-rw-r--r--ndb/src/common/debugger/signaldata/TcKeyReq.cpp5
-rw-r--r--ndb/src/common/mgmcommon/ConfigRetriever.cpp6
-rw-r--r--ndb/src/common/mgmcommon/IPCConfig.cpp387
-rw-r--r--ndb/src/common/portlib/NdbTCP.cpp47
-rw-r--r--ndb/src/common/portlib/gcc.cpp2
-rw-r--r--ndb/src/common/portlib/win32/NdbTCP.c32
-rw-r--r--ndb/src/common/transporter/Makefile.am2
-rw-r--r--ndb/src/common/transporter/OSE_Transporter.cpp2
-rw-r--r--ndb/src/common/transporter/OSE_Transporter.hpp1
-rw-r--r--ndb/src/common/transporter/Packer.cpp1
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.cpp8
-rw-r--r--ndb/src/common/transporter/SCI_Transporter.hpp4
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.cpp7
-rw-r--r--ndb/src/common/transporter/SHM_Transporter.hpp4
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.cpp5
-rw-r--r--ndb/src/common/transporter/TCP_Transporter.hpp4
-rw-r--r--ndb/src/common/transporter/Transporter.cpp47
-rw-r--r--ndb/src/common/transporter/Transporter.hpp31
-rw-r--r--ndb/src/common/transporter/TransporterRegistry.cpp319
-rw-r--r--ndb/src/common/util/Base64.cpp13
-rw-r--r--ndb/src/common/util/Bitmask.cpp351
-rw-r--r--ndb/src/common/util/Makefile.am19
-rw-r--r--ndb/src/common/util/NdbSqlUtil.cpp1127
-rw-r--r--ndb/src/common/util/SimpleProperties.cpp34
-rw-r--r--ndb/src/common/util/SocketServer.cpp38
-rw-r--r--ndb/src/common/util/md5_hash.cpp20
-rw-r--r--ndb/src/common/util/new.cpp2
-rw-r--r--ndb/src/common/util/version.c4
-rw-r--r--ndb/src/cw/cpcd/APIService.cpp15
-rw-r--r--ndb/src/cw/cpcd/APIService.hpp1
-rw-r--r--ndb/src/cw/cpcd/main.cpp3
-rw-r--r--ndb/src/kernel/blocks/ERROR_codes.txt8
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.cpp28
-rw-r--r--ndb/src/kernel/blocks/backup/Backup.hpp3
-rw-r--r--ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp17
-rw-r--r--ndb/src/kernel/blocks/dbacc/Dbacc.hpp157
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccInit.cpp77
-rw-r--r--ndb/src/kernel/blocks/dbacc/DbaccMain.cpp2494
-rw-r--r--ndb/src/kernel/blocks/dbacc/Makefile.am2
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.cpp665
-rw-r--r--ndb/src/kernel/blocks/dbdict/Dbdict.hpp62
-rw-r--r--ndb/src/kernel/blocks/dbdict/SchemaFile.hpp44
-rw-r--r--ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp286
-rw-r--r--ndb/src/kernel/blocks/dbdih/DbdihMain.cpp255
-rw-r--r--ndb/src/kernel/blocks/dblqh/Dblqh.hpp35
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhInit.cpp2
-rw-r--r--ndb/src/kernel/blocks/dblqh/DblqhMain.cpp700
-rw-r--r--ndb/src/kernel/blocks/dbtc/Dbtc.hpp31
-rw-r--r--ndb/src/kernel/blocks/dbtc/DbtcMain.cpp305
-rw-r--r--ndb/src/kernel/blocks/dbtup/Dbtup.hpp85
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp117
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupGen.cpp18
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp26
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp53
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp368
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupScan.cpp315
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp14
-rw-r--r--ndb/src/kernel/blocks/dbtup/Makefile.am1
-rw-r--r--ndb/src/kernel/blocks/dbtux/Dbtux.hpp15
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp44
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp6
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp23
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp5
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp12
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp2
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp240
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp124
-rw-r--r--ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp3
-rw-r--r--ndb/src/kernel/blocks/dbtux/Times.txt10
-rw-r--r--ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp22
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp61
-rw-r--r--ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp3
-rw-r--r--ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp4
-rw-r--r--ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp12
-rw-r--r--ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp25
-rw-r--r--ndb/src/kernel/blocks/qmgr/Qmgr.hpp5
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrInit.cpp3
-rw-r--r--ndb/src/kernel/blocks/qmgr/QmgrMain.cpp87
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.cpp142
-rw-r--r--ndb/src/kernel/blocks/suma/Suma.hpp2
-rw-r--r--ndb/src/kernel/main.cpp18
-rw-r--r--ndb/src/kernel/vm/Configuration.cpp19
-rw-r--r--ndb/src/kernel/vm/Configuration.hpp6
-rw-r--r--ndb/src/kernel/vm/FastScheduler.cpp2
-rw-r--r--ndb/src/kernel/vm/KeyDescriptor.hpp (renamed from ndb/include/ndb_types.h)27
-rw-r--r--ndb/src/kernel/vm/MetaData.hpp7
-rw-r--r--ndb/src/kernel/vm/SimulatedBlock.cpp119
-rw-r--r--ndb/src/kernel/vm/SimulatedBlock.hpp20
-rw-r--r--ndb/src/kernel/vm/SuperPool.cpp442
-rw-r--r--ndb/src/kernel/vm/SuperPool.hpp561
-rw-r--r--ndb/src/kernel/vm/TransporterCallback.cpp14
-rw-r--r--ndb/src/kernel/vm/VMSignal.hpp10
-rw-r--r--ndb/src/kernel/vm/testSuperPool.cpp220
-rw-r--r--ndb/src/mgmapi/LocalConfig.cpp57
-rw-r--r--ndb/src/mgmapi/Makefile.am2
-rw-r--r--ndb/src/mgmapi/mgmapi.cpp382
-rw-r--r--ndb/src/mgmapi/mgmapi_configuration.hpp16
-rw-r--r--ndb/src/mgmapi/mgmapi_internal.h77
-rw-r--r--ndb/src/mgmapi/ndb_logevent.cpp503
-rw-r--r--ndb/src/mgmapi/ndb_logevent.hpp (renamed from ndb/include/ndbapi/NdbCursorOperation.hpp)18
-rw-r--r--ndb/src/mgmclient/CommandInterpreter.cpp59
-rw-r--r--ndb/src/mgmclient/main.cpp13
-rw-r--r--ndb/src/mgmsrv/CommandInterpreter.cpp345
-rw-r--r--ndb/src/mgmsrv/CommandInterpreter.hpp92
-rw-r--r--ndb/src/mgmsrv/ConfigInfo.cpp73
-rw-r--r--ndb/src/mgmsrv/Makefile.am13
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.cpp255
-rw-r--r--ndb/src/mgmsrv/MgmtSrvr.hpp13
-rw-r--r--ndb/src/mgmsrv/Services.cpp140
-rw-r--r--ndb/src/mgmsrv/Services.hpp9
-rw-r--r--ndb/src/mgmsrv/main.cpp131
-rw-r--r--ndb/src/ndbapi/ClusterMgr.cpp39
-rw-r--r--ndb/src/ndbapi/ClusterMgr.hpp10
-rw-r--r--ndb/src/ndbapi/DictCache.cpp108
-rw-r--r--ndb/src/ndbapi/DictCache.hpp2
-rw-r--r--ndb/src/ndbapi/Makefile.am5
-rw-r--r--ndb/src/ndbapi/Ndb.cpp345
-rw-r--r--ndb/src/ndbapi/NdbApiSignal.cpp2
-rw-r--r--ndb/src/ndbapi/NdbApiSignal.hpp2
-rw-r--r--ndb/src/ndbapi/NdbBlob.cpp484
-rw-r--r--ndb/src/ndbapi/NdbCursorOperation.cpp51
-rw-r--r--ndb/src/ndbapi/NdbDictionary.cpp165
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.cpp914
-rw-r--r--ndb/src/ndbapi/NdbDictionaryImpl.hpp143
-rw-r--r--ndb/src/ndbapi/NdbEventOperation.cpp20
-rw-r--r--ndb/src/ndbapi/NdbEventOperationImpl.cpp523
-rw-r--r--ndb/src/ndbapi/NdbEventOperationImpl.hpp31
-rw-r--r--ndb/src/ndbapi/NdbImpl.hpp19
-rw-r--r--ndb/src/ndbapi/NdbIndexOperation.cpp452
-rw-r--r--ndb/src/ndbapi/NdbLinHash.hpp5
-rw-r--r--ndb/src/ndbapi/NdbOperation.cpp47
-rw-r--r--ndb/src/ndbapi/NdbOperationDefine.cpp108
-rw-r--r--ndb/src/ndbapi/NdbOperationExec.cpp27
-rw-r--r--ndb/src/ndbapi/NdbOperationInt.cpp86
-rw-r--r--ndb/src/ndbapi/NdbOperationSearch.cpp363
-rw-r--r--ndb/src/ndbapi/NdbPool.cpp6
-rw-r--r--ndb/src/ndbapi/NdbPoolImpl.cpp13
-rw-r--r--ndb/src/ndbapi/NdbPoolImpl.hpp8
-rw-r--r--ndb/src/ndbapi/NdbRecAttr.cpp78
-rw-r--r--ndb/src/ndbapi/NdbReceiver.cpp29
-rw-r--r--ndb/src/ndbapi/NdbResultSet.cpp103
-rw-r--r--ndb/src/ndbapi/NdbScanFilter.cpp266
-rw-r--r--ndb/src/ndbapi/NdbScanOperation.cpp377
-rw-r--r--ndb/src/ndbapi/NdbTransaction.cpp (renamed from ndb/src/ndbapi/NdbConnection.cpp)439
-rw-r--r--ndb/src/ndbapi/NdbTransactionScan.cpp (renamed from ndb/src/ndbapi/NdbConnectionScan.cpp)18
-rw-r--r--ndb/src/ndbapi/Ndberr.cpp13
-rw-r--r--ndb/src/ndbapi/Ndbif.cpp178
-rw-r--r--ndb/src/ndbapi/Ndbinit.cpp85
-rw-r--r--ndb/src/ndbapi/Ndblist.cpp44
-rw-r--r--ndb/src/ndbapi/TransporterFacade.cpp65
-rw-r--r--ndb/src/ndbapi/TransporterFacade.hpp22
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection.cpp163
-rw-r--r--ndb/src/ndbapi/ndb_cluster_connection_impl.hpp18
-rw-r--r--ndb/src/ndbapi/ndberror.c55
-rw-r--r--ndb/test/include/HugoCalculator.hpp9
-rw-r--r--ndb/test/include/HugoOperations.hpp29
-rw-r--r--ndb/test/include/HugoTransactions.hpp8
-rw-r--r--ndb/test/include/NDBT_Error.hpp6
-rw-r--r--ndb/test/include/NDBT_ResultRow.hpp7
-rw-r--r--ndb/test/include/NDBT_Tables.hpp6
-rw-r--r--ndb/test/include/NDBT_Test.hpp20
-rw-r--r--ndb/test/include/NdbSchemaOp.hpp51
-rw-r--r--ndb/test/include/UtilTransactions.hpp30
-rw-r--r--ndb/test/ndbapi/Makefile.am12
-rw-r--r--ndb/test/ndbapi/ScanFunctions.hpp23
-rw-r--r--ndb/test/ndbapi/ScanInterpretTest.hpp17
-rw-r--r--ndb/test/ndbapi/bank/Bank.cpp64
-rw-r--r--ndb/test/ndbapi/bank/Bank.hpp2
-rw-r--r--ndb/test/ndbapi/bank/BankLoad.cpp7
-rw-r--r--ndb/test/ndbapi/bank/bankCreator.cpp8
-rw-r--r--ndb/test/ndbapi/bank/bankMakeGL.cpp8
-rw-r--r--ndb/test/ndbapi/bank/bankSumAccounts.cpp8
-rw-r--r--ndb/test/ndbapi/bank/bankTimer.cpp8
-rw-r--r--ndb/test/ndbapi/bank/bankTransactionMaker.cpp8
-rw-r--r--ndb/test/ndbapi/bank/bankValidateAllGLs.cpp8
-rw-r--r--ndb/test/ndbapi/bank/testBank.cpp12
-rw-r--r--ndb/test/ndbapi/bench/userInterface.cpp6
-rw-r--r--ndb/test/ndbapi/bench/userInterface.h2
-rw-r--r--ndb/test/ndbapi/create_all_tabs.cpp7
-rw-r--r--ndb/test/ndbapi/create_tab.cpp8
-rw-r--r--ndb/test/ndbapi/drop_all_tabs.cpp8
-rw-r--r--ndb/test/ndbapi/flexAsynch.cpp16
-rw-r--r--ndb/test/ndbapi/flexBench.cpp15
-rw-r--r--ndb/test/ndbapi/flexHammer.cpp13
-rw-r--r--ndb/test/ndbapi/flexTT.cpp13
-rw-r--r--ndb/test/ndbapi/flex_bench_mysql.cpp2
-rw-r--r--ndb/test/ndbapi/slow_select.cpp28
-rw-r--r--ndb/test/ndbapi/testBackup.cpp16
-rw-r--r--ndb/test/ndbapi/testBitfield.cpp198
-rw-r--r--ndb/test/ndbapi/testBlobs.cpp37
-rw-r--r--ndb/test/ndbapi/testDataBuffers.cpp21
-rw-r--r--ndb/test/ndbapi/testDeadlock.cpp27
-rw-r--r--ndb/test/ndbapi/testDict.cpp171
-rw-r--r--ndb/test/ndbapi/testIndex.cpp19
-rw-r--r--ndb/test/ndbapi/testLcp.cpp12
-rw-r--r--ndb/test/ndbapi/testNdbApi.cpp209
-rw-r--r--ndb/test/ndbapi/testNodeRestart.cpp2
-rw-r--r--ndb/test/ndbapi/testOIBasic.cpp2764
-rw-r--r--ndb/test/ndbapi/testPartitioning.cpp430
-rw-r--r--ndb/test/ndbapi/testReadPerf.cpp32
-rw-r--r--ndb/test/ndbapi/testSRBank.cpp14
-rw-r--r--ndb/test/ndbapi/testScan.cpp77
-rw-r--r--ndb/test/ndbapi/testScanPerf.cpp105
-rw-r--r--ndb/test/ndbapi/testTimeout.cpp62
-rw-r--r--ndb/test/ndbapi/test_event.cpp390
-rw-r--r--ndb/test/ndbapi/test_event_multi_table.cpp487
-rw-r--r--ndb/test/run-test/16node-tests.txt733
-rw-r--r--ndb/test/run-test/Makefile.am2
-rw-r--r--ndb/test/run-test/basic.txt2
-rw-r--r--ndb/test/run-test/daily-basic-tests.txt95
-rw-r--r--ndb/test/run-test/daily-devel-tests.txt31
-rw-r--r--ndb/test/src/HugoAsynchTransactions.cpp11
-rw-r--r--ndb/test/src/HugoCalculator.cpp294
-rw-r--r--ndb/test/src/HugoOperations.cpp360
-rw-r--r--ndb/test/src/HugoTransactions.cpp843
-rw-r--r--ndb/test/src/NDBT_ResultRow.cpp9
-rw-r--r--ndb/test/src/NDBT_Tables.cpp19
-rw-r--r--ndb/test/src/NDBT_Test.cpp48
-rw-r--r--ndb/test/src/NdbSchemaOp.cpp3
-rw-r--r--ndb/test/src/UtilTransactions.cpp755
-rw-r--r--ndb/test/tools/Makefile.am3
-rw-r--r--ndb/test/tools/copy_tab.cpp9
-rw-r--r--ndb/test/tools/create_index.cpp7
-rw-r--r--ndb/test/tools/hugoFill.cpp7
-rw-r--r--ndb/test/tools/hugoLoad.cpp7
-rw-r--r--ndb/test/tools/hugoLockRecords.cpp7
-rw-r--r--ndb/test/tools/hugoPkDelete.cpp7
-rw-r--r--ndb/test/tools/hugoPkRead.cpp7
-rw-r--r--ndb/test/tools/hugoPkReadRecord.cpp7
-rw-r--r--ndb/test/tools/hugoPkUpdate.cpp7
-rw-r--r--ndb/test/tools/hugoScanRead.cpp10
-rw-r--r--ndb/test/tools/hugoScanUpdate.cpp7
-rw-r--r--ndb/test/tools/verify_index.cpp7
-rw-r--r--ndb/tools/Makefile.am4
-rw-r--r--ndb/tools/delete_all.cpp47
-rw-r--r--ndb/tools/desc.cpp42
-rw-r--r--ndb/tools/drop_index.cpp31
-rw-r--r--ndb/tools/drop_tab.cpp31
-rw-r--r--ndb/tools/listTables.cpp24
-rw-r--r--ndb/tools/ndb_test_platform.cpp6
-rw-r--r--ndb/tools/restore/Restore.cpp15
-rw-r--r--ndb/tools/restore/consumer.cpp2
-rw-r--r--ndb/tools/restore/consumer_printer.hpp2
-rw-r--r--ndb/tools/restore/consumer_restore.cpp34
-rw-r--r--ndb/tools/restore/consumer_restore.hpp4
-rw-r--r--ndb/tools/restore/consumer_restorem.cpp17
-rw-r--r--ndb/tools/restore/restore_main.cpp18
-rw-r--r--ndb/tools/select_all.cpp68
-rw-r--r--ndb/tools/select_count.cpp50
-rw-r--r--ndb/tools/waiter.cpp15
372 files changed, 24002 insertions, 16610 deletions
diff --git a/ndb/Makefile.am b/ndb/Makefile.am
index 32c821383e6..3aac54b38ee 100644
--- a/ndb/Makefile.am
+++ b/ndb/Makefile.am
@@ -1,12 +1,13 @@
SUBDIRS = src tools . include @ndb_opt_subdirs@
DIST_SUBDIRS = src tools include test docs
-EXTRA_DIST = config
+EXTRA_DIST = config ndbapi-examples
include $(top_srcdir)/ndb/config/common.mk.am
dist-hook:
-rm -rf `find $(distdir) -type d -name SCCS`
-rm -rf `find $(distdir) -type d -name old_files`
+ -rm -rf `find $(distdir)/ndbapi-examples -name '*.o'`
list='$(SUBDIRS)'; for subdir in $$list; do \
if test "$$subdir" != "." -a "$$subdir" != "include"; then \
files="`find $$subdir -name '*\.h'` `find $$subdir -name '*\.hpp'`"; \
diff --git a/ndb/docs/Makefile.am b/ndb/docs/Makefile.am
index 1399ce3b6a5..afa91857771 100644
--- a/ndb/docs/Makefile.am
+++ b/ndb/docs/Makefile.am
@@ -1,18 +1,19 @@
DOXYDIR = doxygen
noinst_HEADERS = $(DOXYDIR)/predoxy.pl $(DOXYDIR)/postdoxy.pl $(DOXYDIR)/Doxyfile.ndbapi $(DOXYDIR)/Doxyfile.mgmapi $(DOXYDIR)/header.ndbapi.tex $(DOXYDIR)/header.mgmapi.tex
-all: do-check ndbapidoc mgmapidoc
+all: do-check-html ndbapidoc-html mgmapidoc-html
+all-pdf: do-check-pdf ndbapidoc-pdf mgmapidoc-pdf
DOXYTMP = .doxytmp
DOXYOUT = .doxyout
NDB_RELEASE = @NDB_VERSION_MAJOR@.@NDB_VERSION_MINOR@.@NDB_VERSION_BUILD@-@NDB_VERSION_STATUS@
-clean:
+clean-local:
rm -rf ndbapi.pdf ndbapi.html mgmapi.pdf mgmapi.html
rm -rf $(DOXYTMP) $(DOXYOUT)
-do-check:
+do-check-html:
@set -x; \
if test @PERL@ = no ; then \
echo "Perl needed to make docs"; \
@@ -21,7 +22,9 @@ do-check:
if test @DOXYGEN@ = no ; then \
echo "Doxygen needed to make docs"; \
exit 1; \
- fi; \
+ fi;
+
+do-check-pdf: do-check-html
if test @PDFLATEX@ = no ; then \
echo "Pdflatex needed to make docs"; \
exit 1; \
@@ -30,26 +33,30 @@ do-check:
echo "Makeindex needed to make docs"; \
exit 1; \
fi;
+
###
#
# NDB API Programmer's Guide
#
-ndbapidoc: ndbapi.pdf
+ndbapidoc-html: ndbapi.html
+ndbapidoc-pdf: ndbapi.pdf
-ndbapi.pdf: $(noinst_HEADERS)
+ndbapi.html: $(noinst_HEADERS)
@set -x; \
export NDB_RELEASE=$(NDB_RELEASE); \
@RM@ -f ndbapi.pdf ndbapi.html; \
@RM@ -rf $(DOXYTMP) $(DOXYOUT); \
mkdir -p $(DOXYTMP) $(DOXYOUT); \
@CP@ $(top_srcdir)/ndb/include/ndbapi/* $(DOXYTMP); \
- @CP@ $(top_srcdir)/ndb/examples/*/*.[ch]pp $(DOXYTMP); \
+ @CP@ $(top_srcdir)/ndb/ndbapi-examples/*/*.[ch]pp $(DOXYTMP); \
@PERL@ $(DOXYDIR)/predoxy.pl; \
mv footer.html $(DOXYTMP); \
(cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.ndbapi); \
- @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/ndbapi.latex "NDB API Programmer Guide"; \
+ @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/ndbapi.latex "MySQL Cluster NDB API Programmer Guide"; \
(cd $(DOXYOUT) && \
- find ndbapi.html -print | cpio -pdm ..); \
+ find ndbapi.html -print | cpio -pdm ..);
+
+ndbapi.pdf: ndbapi.html
(cd $(DOXYOUT)/ndbapi.latex && \
@PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \
cp -p refman.pdf ../../ndbapi.pdf);
@@ -58,9 +65,10 @@ ndbapi.pdf: $(noinst_HEADERS)
#
# MGM API Guide
#
-mgmapidoc: mgmapi.pdf
+mgmapidoc-html: mgmapi.html
+mgmapidoc-pdf: mgmapi.pdf
-mgmapi.pdf: $(noinst_HEADERS)
+mgmapi.html: $(noinst_HEADERS)
@set -x; \
export NDB_RELEASE=$(NDB_RELEASE); \
@RM@ -f mgmapi.pdf mgmapi.html; \
@@ -70,9 +78,11 @@ mgmapi.pdf: $(noinst_HEADERS)
@PERL@ $(DOXYDIR)/predoxy.pl; \
mv footer.html $(DOXYTMP); \
(cd $(DOXYTMP) ; @DOXYGEN@ ../$(DOXYDIR)/Doxyfile.mgmapi); \
- @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/mgmapi.latex "NDB Cluster MGM API Guide"; \
+ @PERL@ $(DOXYDIR)/postdoxy.pl $(DOXYOUT)/mgmapi.latex "MySQL Cluster MGM API Guide"; \
(cd $(DOXYOUT) && \
- find mgmapi.html -print | cpio -pdm ..); \
+ find mgmapi.html -print | cpio -pdm ..);
+
+mgmapi.pdf: mgmapi.html
(cd $(DOXYOUT)/mgmapi.latex && \
@PDFLATEX@ refman.tex && @MAKEINDEX@ refman && @PDFLATEX@ refman.tex && \
cp -p refman.pdf ../../mgmapi.pdf);
diff --git a/ndb/docs/doxygen/Doxyfile.mgmapi b/ndb/docs/doxygen/Doxyfile.mgmapi
index 4287b37fd97..1e743dcb60e 100644
--- a/ndb/docs/doxygen/Doxyfile.mgmapi
+++ b/ndb/docs/doxygen/Doxyfile.mgmapi
@@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
-EXTRACT_STATIC = NO
+EXTRACT_STATIC = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
# undocumented members of documented classes, files or namespaces.
@@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
-VERBATIM_HEADERS = YES
+VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation
@@ -190,7 +190,7 @@ DISTRIBUTE_GROUP_DOC = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
-TAB_SIZE = 8
+TAB_SIZE = 2
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
@@ -245,7 +245,7 @@ OPTIMIZE_OUTPUT_FOR_C = NO
# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
-SHOW_USED_FILES = YES
+SHOW_USED_FILES = NO
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
@@ -447,7 +447,7 @@ HTML_STYLESHEET =
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
-HTML_ALIGN_MEMBERS = YES
+HTML_ALIGN_MEMBERS = NO
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
@@ -477,7 +477,7 @@ TOC_EXPAND = NO
# top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it.
-DISABLE_INDEX = NO
+DISABLE_INDEX = YES
# This tag can be used to set the number of enum values (range [1..20])
# that doxygen will group on one line in the generated HTML documentation.
@@ -688,7 +688,8 @@ INCLUDE_FILE_PATTERNS =
# or name=definition (no spaces). If the definition and the = are
# omitted =1 is assumed.
-PREDEFINED = DOXYGEN_SHOULD_SKIP_DEPRECATED \
+PREDEFINED = DOXYGEN_FIX \
+ DOXYGEN_SHOULD_SKIP_DEPRECATED \
DOXYGEN_SHOULD_SKIP_INTERNAL \
protected=private
diff --git a/ndb/docs/doxygen/Doxyfile.ndb b/ndb/docs/doxygen/Doxyfile.ndb
index d43a66323f8..3986a7cd17f 100644
--- a/ndb/docs/doxygen/Doxyfile.ndb
+++ b/ndb/docs/doxygen/Doxyfile.ndb
@@ -14,6 +14,8 @@
# General configuration options
#---------------------------------------------------------------------------
+DETAILS_AT_TOP = YES
+
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
@@ -52,7 +54,7 @@ EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation.
-EXTRACT_PRIVATE = YES
+EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
@@ -105,7 +107,7 @@ ALWAYS_DETAILED_SEC = NO
# ordinary class members. Constructors, destructors and assignment operators of
# the base classes will not be shown.
-INLINE_INHERITED_MEMB = NO
+INLINE_INHERITED_MEMB = YES
# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
# path before files name in the file list and in the header files. If set
@@ -157,13 +159,13 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
-VERBATIM_HEADERS = YES
+VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation
# of that file.
-SHOW_INCLUDE_FILES = YES
+SHOW_INCLUDE_FILES = NO
# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
# will interpret the first line (until the first dot) of a JavaDoc-style
@@ -201,7 +203,7 @@ DISTRIBUTE_GROUP_DOC = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
-TAB_SIZE = 8
+TAB_SIZE = 2
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
@@ -256,7 +258,7 @@ OPTIMIZE_OUTPUT_FOR_C = NO
# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
-SHOW_USED_FILES = YES
+SHOW_USED_FILES = NO
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
@@ -485,7 +487,7 @@ HTML_STYLESHEET =
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
-HTML_ALIGN_MEMBERS = YES
+HTML_ALIGN_MEMBERS = NO
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
@@ -515,7 +517,7 @@ TOC_EXPAND = NO
# top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it.
-DISABLE_INDEX = NO
+DISABLE_INDEX = YES
# This tag can be used to set the number of enum values (range [1..20])
# that doxygen will group on one line in the generated HTML documentation.
@@ -794,21 +796,21 @@ PERL_PATH = /usr/bin/perl
# option is superceded by the HAVE_DOT option below. This is only a fallback. It is
# recommended to install and use dot, since it yield more powerful graphs.
-CLASS_DIAGRAMS = YES
+CLASS_DIAGRAMS = NO
# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
# available from the path. This tool is part of Graphviz, a graph visualization
# toolkit from AT&T and Lucent Bell Labs. The other options in this section
# have no effect if this option is set to NO (the default)
-HAVE_DOT = YES
+HAVE_DOT = NO
# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
# indirect inheritance relations. Setting this tag to YES will force the
# the CLASS_DIAGRAMS tag to NO.
-CLASS_GRAPH = YES
+CLASS_GRAPH = NO
# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
# will generate a graph for each documented class showing the direct and
@@ -820,7 +822,7 @@ COLLABORATION_GRAPH = YES
# If set to YES, the inheritance and collaboration graphs will show the
# relations between templates and their instances.
-TEMPLATE_RELATIONS = YES
+TEMPLATE_RELATIONS = NO
# If set to YES, the inheritance and collaboration graphs will hide
# inheritance and usage relations if the target is undocumented
diff --git a/ndb/docs/doxygen/Doxyfile.ndbapi b/ndb/docs/doxygen/Doxyfile.ndbapi
index 61d58d4fea3..da610148468 100644
--- a/ndb/docs/doxygen/Doxyfile.ndbapi
+++ b/ndb/docs/doxygen/Doxyfile.ndbapi
@@ -13,8 +13,8 @@
#---------------------------------------------------------------------------
# General configuration options
#---------------------------------------------------------------------------
-DETAILS_AT_TOP = yes
-HIDE_FRIEND_COMPOUNDS = yes
+DETAILS_AT_TOP = YES
+HIDE_FRIEND_COMPOUNDS = YES
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
@@ -59,7 +59,7 @@ EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
-EXTRACT_STATIC = NO
+EXTRACT_STATIC = YES
# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
# undocumented members of documented classes, files or namespaces.
@@ -146,7 +146,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
-VERBATIM_HEADERS = YES
+VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation
@@ -190,7 +190,7 @@ DISTRIBUTE_GROUP_DOC = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
-TAB_SIZE = 8
+TAB_SIZE = 2
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
@@ -245,7 +245,7 @@ OPTIMIZE_OUTPUT_FOR_C = NO
# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
-SHOW_USED_FILES = YES
+SHOW_USED_FILES = NO
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
@@ -447,7 +447,7 @@ HTML_STYLESHEET =
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
-HTML_ALIGN_MEMBERS = YES
+HTML_ALIGN_MEMBERS = NO
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
@@ -477,7 +477,7 @@ TOC_EXPAND = NO
# top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it.
-DISABLE_INDEX = NO
+DISABLE_INDEX = YES
# This tag can be used to set the number of enum values (range [1..20])
# that doxygen will group on one line in the generated HTML documentation.
diff --git a/ndb/docs/doxygen/Doxyfile.odbc b/ndb/docs/doxygen/Doxyfile.odbc
index 93e052d5b9d..262513852b7 100644
--- a/ndb/docs/doxygen/Doxyfile.odbc
+++ b/ndb/docs/doxygen/Doxyfile.odbc
@@ -14,6 +14,8 @@
# General configuration options
#---------------------------------------------------------------------------
+DETAILS_AT_TOP = YES
+
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
@@ -52,7 +54,7 @@ EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation.
-EXTRACT_PRIVATE = YES
+EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
@@ -157,7 +159,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
-VERBATIM_HEADERS = YES
+VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation
@@ -201,7 +203,7 @@ DISTRIBUTE_GROUP_DOC = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
-TAB_SIZE = 8
+TAB_SIZE = 2
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
@@ -256,7 +258,7 @@ OPTIMIZE_OUTPUT_FOR_C = NO
# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
-SHOW_USED_FILES = YES
+SHOW_USED_FILES = NO
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
@@ -469,7 +471,7 @@ HTML_STYLESHEET =
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
-HTML_ALIGN_MEMBERS = YES
+HTML_ALIGN_MEMBERS = NO
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
@@ -499,7 +501,7 @@ TOC_EXPAND = NO
# top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it.
-DISABLE_INDEX = NO
+DISABLE_INDEX = YES
# This tag can be used to set the number of enum values (range [1..20])
# that doxygen will group on one line in the generated HTML documentation.
diff --git a/ndb/docs/doxygen/Doxyfile.test b/ndb/docs/doxygen/Doxyfile.test
index 34ee21873ff..801c82cf380 100644
--- a/ndb/docs/doxygen/Doxyfile.test
+++ b/ndb/docs/doxygen/Doxyfile.test
@@ -14,6 +14,8 @@
# General configuration options
#---------------------------------------------------------------------------
+DETAILS_AT_TOP = YES
+
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
# by quotes) that should identify the project.
@@ -52,7 +54,7 @@ EXTRACT_ALL = YES
# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
# will be included in the documentation.
-EXTRACT_PRIVATE = YES
+EXTRACT_PRIVATE = NO
# If the EXTRACT_STATIC tag is set to YES all static members of a file
# will be included in the documentation.
@@ -157,7 +159,7 @@ HIDE_SCOPE_NAMES = NO
# will generate a verbatim copy of the header file for each class for
# which an include is specified. Set to NO to disable this.
-VERBATIM_HEADERS = YES
+VERBATIM_HEADERS = NO
# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
# will put list of the files that are included by a file in the documentation
@@ -201,7 +203,7 @@ DISTRIBUTE_GROUP_DOC = NO
# The TAB_SIZE tag can be used to set the number of spaces in a tab.
# Doxygen uses this value to replace tabs by spaces in code fragments.
-TAB_SIZE = 8
+TAB_SIZE = 2
# The GENERATE_TODOLIST tag can be used to enable (YES) or
# disable (NO) the todo list. This list is created by putting \todo
@@ -256,7 +258,7 @@ OPTIMIZE_OUTPUT_FOR_C = NO
# at the bottom of the documentation of classes and structs. If set to YES the
# list will mention the files that were used to generate the documentation.
-SHOW_USED_FILES = YES
+SHOW_USED_FILES = NO
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
@@ -469,7 +471,7 @@ HTML_STYLESHEET =
# files or namespaces will be aligned in HTML using tables. If set to
# NO a bullet list will be used.
-HTML_ALIGN_MEMBERS = YES
+HTML_ALIGN_MEMBERS = NO
# If the GENERATE_HTMLHELP tag is set to YES, additional index files
# will be generated that can be used as input for tools like the
@@ -499,7 +501,7 @@ TOC_EXPAND = NO
# top of each HTML page. The value NO (the default) enables the index and
# the value YES disables it.
-DISABLE_INDEX = NO
+DISABLE_INDEX = YES
# This tag can be used to set the number of enum values (range [1..20])
# that doxygen will group on one line in the generated HTML documentation.
diff --git a/ndb/docs/doxygen/predoxy.pl b/ndb/docs/doxygen/predoxy.pl
index 8dad1d964d0..3994054dcf6 100755
--- a/ndb/docs/doxygen/predoxy.pl
+++ b/ndb/docs/doxygen/predoxy.pl
@@ -18,7 +18,7 @@ print OUTFILE<<EOT;
<center>
EOT
print OUTFILE "Documentation generated " . localtime() .
- " from NDB Cluster source files.";
+ " from mysql source files.";
print OUTFILE<<EOT;
<br>
&copy; 2003-2004
diff --git a/ndb/examples/configurations/demos.tar b/ndb/examples/configurations/demos.tar
deleted file mode 100644
index d8cae90ec5b..00000000000
--- a/ndb/examples/configurations/demos.tar
+++ /dev/null
Binary files differ
diff --git a/ndb/examples/ndbapi_async_example/Makefile b/ndb/examples/ndbapi_async_example/Makefile
deleted file mode 100644
index f30398f9587..00000000000
--- a/ndb/examples/ndbapi_async_example/Makefile
+++ /dev/null
@@ -1,34 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#NDB_OS = LINUX
-#You need to set the NDB_OS variable here (LINUX, SOLARIS, MACOSX)
-TARGET = ndbapi_async
-SRCS = ndbapi_async.cpp
-OBJS = ndbapi_async.o
-CC = g++
-CFLAGS = -c -Wall -fno-rtti -D$(NDB_OS)
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB = -lpthread -lsocket -lnsl -lrt
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB = -lpthread
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CC) $(LFLAGS) -L$(LIB_DIR) -lNDB_API $(OBJS) $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CC) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example1/Makefile b/ndb/examples/ndbapi_example1/Makefile
deleted file mode 100644
index eb0142ce673..00000000000
--- a/ndb/examples/ndbapi_example1/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#You need to set the NDB_OS variable here
-TARGET = ndbapi_example1
-SRCS = ndbapi_example1.cpp
-OBJS = ndbapi_example1.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) -L$(LIB_DIR) $(OBJS) -lNDB_API $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp b/ndb/examples/ndbapi_example1/ndbapi_example1.cpp
deleted file mode 100644
index 03a84aa249b..00000000000
--- a/ndb/examples/ndbapi_example1/ndbapi_example1.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-//
-// ndbapi_example1.cpp: Using synchronous transactions in NDB API
-//
-// Correct output from this program is:
-//
-// ATTR1 ATTR2
-// 0 10
-// 1 1
-// 2 12
-// Detected that deleted tuple doesn't exist!
-// 4 14
-// 5 5
-// 6 16
-// 7 7
-// 8 18
-// 9 9
-
-#include <NdbApi.hpp>
-
-// Used for cout
-#include <stdio.h>
-#include <iostream>
-
-#define APIERROR(error) \
- { std::cout << "Error in " << __FILE__ << ", line:" << __LINE__ << ", code:" \
- << error.code << ", msg: " << error.message << "." << std::endl; \
- exit(-1); }
-
-int main()
-{
- ndb_init();
- Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
- NdbDictionary::Table myTable;
- NdbDictionary::Column myColumn;
-
- NdbConnection *myConnection; // For other transactions
- NdbOperation *myOperation; // For other operations
- NdbRecAttr *myRecAttr; // Result of reading attribute value
-
- /********************************************
- * Initialize NDB and wait until it's ready *
- ********************************************/
- if (myNdb->init() == -1) {
- APIERROR(myNdb->getNdbError());
- exit(-1);
- }
-
- if (myNdb->waitUntilReady(30) != 0) {
- std::cout << "NDB was not ready within 30 secs." << std::endl;
- exit(-1);
- }
-
- NdbDictionary::Dictionary* myDict = myNdb->getDictionary();
-
- /*********************************************************
- * Create a table named MYTABLENAME if it does not exist *
- *********************************************************/
- if (myDict->getTable("MYTABLENAME") != NULL) {
- std::cout << "NDB already has example table: MYTABLENAME." << std::endl;
- exit(-1);
- }
-
- myTable.setName("MYTABLENAME");
-
- myColumn.setName("ATTR1");
- myColumn.setType(NdbDictionary::Column::Unsigned);
- myColumn.setLength(1);
- myColumn.setPrimaryKey(true);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
- myColumn.setName("ATTR2");
- myColumn.setType(NdbDictionary::Column::Unsigned);
- myColumn.setLength(1);
- myColumn.setPrimaryKey(false);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
- if (myDict->createTable(myTable) == -1)
- APIERROR(myDict->getNdbError());
-
- /**************************************************************************
- * Using 5 transactions, insert 10 tuples in table: (0,0),(1,1),...,(9,9) *
- **************************************************************************/
- for (int i = 0; i < 5; i++) {
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myOperation->insertTuple();
- myOperation->equal("ATTR1", i);
- myOperation->setValue("ATTR2", i);
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myOperation->insertTuple();
- myOperation->equal("ATTR1", i+5);
- myOperation->setValue("ATTR2", i+5);
-
- if (myConnection->execute( Commit ) == -1)
- APIERROR(myConnection->getNdbError());
-
- myNdb->closeTransaction(myConnection);
- }
-
- /*****************************************************************
- * Update the second attribute in half of the tuples (adding 10) *
- *****************************************************************/
- for (int i = 0; i < 10; i+=2) {
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myOperation->updateTuple();
- myOperation->equal( "ATTR1", i );
- myOperation->setValue( "ATTR2", i+10);
-
- if( myConnection->execute( Commit ) == -1 )
- APIERROR(myConnection->getNdbError());
-
- myNdb->closeTransaction(myConnection);
- }
-
- /*************************************************
- * Delete one tuple (the one with primary key 3) *
- *************************************************/
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL)
- APIERROR(myConnection->getNdbError());
-
- myOperation->deleteTuple();
- myOperation->equal( "ATTR1", 3 );
-
- if (myConnection->execute(Commit) == -1)
- APIERROR(myConnection->getNdbError());
-
- myNdb->closeTransaction(myConnection);
-
- /*****************************
- * Read and print all tuples *
- *****************************/
- std::cout << "ATTR1 ATTR2" << std::endl;
-
- for (int i = 0; i < 10; i++) {
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myOperation->readTuple();
- myOperation->equal("ATTR1", i);
-
- myRecAttr = myOperation->getValue("ATTR2", NULL);
- if (myRecAttr == NULL) APIERROR(myConnection->getNdbError());
-
- if(myConnection->execute( Commit ) == -1)
- if (i == 3) {
- std::cout << "Detected that deleted tuple doesn't exist!" << std::endl;
- } else {
- APIERROR(myConnection->getNdbError());
- }
-
- if (i != 3) {
- printf(" %2d %2d\n", i, myRecAttr->u_32_value());
- }
- myNdb->closeTransaction(myConnection);
- }
- delete myNdb;
-}
diff --git a/ndb/examples/ndbapi_example2/Makefile b/ndb/examples/ndbapi_example2/Makefile
deleted file mode 100644
index 17b2b1528fc..00000000000
--- a/ndb/examples/ndbapi_example2/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#You need to set the NDB_OS variable here
-TARGET = ndbapi_example2
-SRCS = ndbapi_example2.cpp
-OBJS = ndbapi_example2.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) -L$(LIB_DIR) $(OBJS) -lNDB_API $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example3/Makefile b/ndb/examples/ndbapi_example3/Makefile
deleted file mode 100644
index bd6f0182aa4..00000000000
--- a/ndb/examples/ndbapi_example3/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#You need to set the NDB_OS variable here
-TARGET = ndbapi_example3
-SRCS = ndbapi_example3.cpp
-OBJS = ndbapi_example3.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) -L$(LIB_DIR) $(OBJS) -lNDB_API $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example4/Makefile b/ndb/examples/ndbapi_example4/Makefile
deleted file mode 100644
index b0ce852d347..00000000000
--- a/ndb/examples/ndbapi_example4/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#You need to set the NDB_OS variable here
-TARGET = ndbapi_example4
-SRCS = ndbapi_example4.cpp
-OBJS = ndbapi_example4.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) -L$(LIB_DIR) $(OBJS) -lNDB_API $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp b/ndb/examples/ndbapi_example4/ndbapi_example4.cpp
deleted file mode 100644
index fcb770d49e9..00000000000
--- a/ndb/examples/ndbapi_example4/ndbapi_example4.cpp
+++ /dev/null
@@ -1,253 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-//
-// ndbapi_example4.cpp: Using secondary indexes in NDB API
-//
-// Correct output from this program is:
-//
-// ATTR1 ATTR2
-// 0 10
-// 1 1
-// 2 12
-// Detected that deleted tuple doesn't exist!
-// 4 14
-// 5 5
-// 6 16
-// 7 7
-// 8 18
-// 9 9
-
-#include <NdbApi.hpp>
-
-// Used for cout
-#include <stdio.h>
-#include <iostream>
-
-#define APIERROR(error) \
- { std::cout << "Error in " << __FILE__ << ", line:" << __LINE__ << ", code:" \
- << error.code << ", msg: " << error.message << "." << std::endl; \
- exit(-1); }
-
-int main()
-{
- ndb_init();
- Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
- NdbDictionary::Table myTable;
- NdbDictionary::Column myColumn;
- NdbDictionary::Index myIndex;
-
- NdbConnection *myConnection; // For transactions
- NdbOperation *myOperation; // For primary key operations
- NdbIndexOperation *myIndexOperation; // For index operations
- NdbRecAttr *myRecAttr; // Result of reading attribute value
-
- /********************************************
- * Initialize NDB and wait until it's ready *
- ********************************************/
- if (myNdb->init() == -1) {
- APIERROR(myNdb->getNdbError());
- exit(-1);
- }
-
- if (myNdb->waitUntilReady(30) != 0) {
- std::cout << "NDB was not ready within 30 secs." << std::endl;
- exit(-1);
- }
-
- /*********************************************************
- * Create a table named MYTABLENAME if it does not exist *
- *********************************************************/
- NdbDictionary::Dictionary* myDict = myNdb->getDictionary();
- if (myDict->getTable("MYTABLENAME") != NULL) {
- std::cout << "NDB already has example table: MYTABLENAME." << std::endl;
- exit(-1);
- }
-
- myTable.setName("MYTABLENAME");
-
- myColumn.setName("ATTR1");
- myColumn.setType(NdbDictionary::Column::Unsigned);
- myColumn.setLength(1);
- myColumn.setPrimaryKey(true);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
- myColumn.setName("ATTR2");
- myColumn.setType(NdbDictionary::Column::Unsigned);
- myColumn.setLength(1);
- myColumn.setPrimaryKey(false);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
- if (myDict->createTable(myTable) == -1)
- APIERROR(myDict->getNdbError());
-
-
- /**********************************************************
- * Create an index named MYINDEXNAME if it does not exist *
- **********************************************************/
- if (myDict->getIndex("MYINDEXNAME", "MYTABLENAME") != NULL) {
- std::cout << "NDB already has example index: MYINDEXNAME." << std::endl;
- exit(-1);
- }
-
- myIndex.setName("MYINDEXNAME");
- myIndex.setTable("MYTABLENAME");
- myIndex.setType(NdbDictionary::Index::UniqueHashIndex);
- const char* attr_arr[] = {"ATTR2"};
- myIndex.addIndexColumns(1, attr_arr);
-
- if (myDict->createIndex(myIndex) == -1)
- APIERROR(myDict->getNdbError());
-
-
- /**************************************************************************
- * Using 5 transactions, insert 10 tuples in table: (0,0),(1,1),...,(9,9) *
- **************************************************************************/
- for (int i = 0; i < 5; i++) {
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myOperation->insertTuple();
- myOperation->equal("ATTR1", i);
- myOperation->setValue("ATTR2", i);
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myOperation->insertTuple();
- myOperation->equal("ATTR1", i+5);
- myOperation->setValue("ATTR2", i+5);
-
- if (myConnection->execute( Commit ) == -1)
- APIERROR(myConnection->getNdbError());
-
- myNdb->closeTransaction(myConnection);
- }
-
- /*****************************************
- * Read and print all tuples using index *
- *****************************************/
- std::cout << "ATTR1 ATTR2" << std::endl;
-
- for (int i = 0; i < 10; i++) {
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myIndexOperation = myConnection->getNdbIndexOperation("MYINDEXNAME",
- "MYTABLENAME");
- if (myIndexOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myIndexOperation->readTuple();
- myIndexOperation->equal("ATTR2", i);
-
- myRecAttr = myIndexOperation->getValue("ATTR1", NULL);
- if (myRecAttr == NULL) APIERROR(myConnection->getNdbError());
-
- if(myConnection->execute( Commit ) != -1)
- printf(" %2d %2d\n", myRecAttr->u_32_value(), i);
- }
- myNdb->closeTransaction(myConnection);
-
- /*****************************************************************
- * Update the second attribute in half of the tuples (adding 10) *
- *****************************************************************/
- for (int i = 0; i < 10; i+=2) {
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myIndexOperation = myConnection->getNdbIndexOperation("MYINDEXNAME",
- "MYTABLENAME");
- if (myIndexOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myIndexOperation->updateTuple();
- myIndexOperation->equal( "ATTR2", i );
- myIndexOperation->setValue( "ATTR2", i+10);
-
- if( myConnection->execute( Commit ) == -1 )
- APIERROR(myConnection->getNdbError());
-
- myNdb->closeTransaction(myConnection);
- }
-
- /*************************************************
- * Delete one tuple (the one with primary key 3) *
- *************************************************/
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myIndexOperation = myConnection->getNdbIndexOperation("MYINDEXNAME",
- "MYTABLENAME");
- if (myIndexOperation == NULL)
- APIERROR(myConnection->getNdbError());
-
- myIndexOperation->deleteTuple();
- myIndexOperation->equal( "ATTR2", 3 );
-
- if (myConnection->execute(Commit) == -1)
- APIERROR(myConnection->getNdbError());
-
- myNdb->closeTransaction(myConnection);
-
- /*****************************
- * Read and print all tuples *
- *****************************/
- std::cout << "ATTR1 ATTR2" << std::endl;
-
- for (int i = 0; i < 10; i++) {
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
- if (myOperation == NULL) APIERROR(myConnection->getNdbError());
-
- myOperation->readTuple();
- myOperation->equal("ATTR1", i);
-
- myRecAttr = myOperation->getValue("ATTR2", NULL);
- if (myRecAttr == NULL) APIERROR(myConnection->getNdbError());
-
- if(myConnection->execute( Commit ) == -1)
- if (i == 3) {
- std::cout << "Detected that deleted tuple doesn't exist!" << std::endl;
- } else {
- APIERROR(myConnection->getNdbError());
- }
-
- if (i != 3) {
- printf(" %2d %2d\n", i, myRecAttr->u_32_value());
- }
- myNdb->closeTransaction(myConnection);
- }
-
- /**************
- * Drop index *
- **************/
- if (myDict->dropIndex("MYINDEXNAME", "MYTABLENAME") == -1)
- APIERROR(myDict->getNdbError());
-
- /**************
- * Drop table *
- **************/
- if (myDict->dropTable("MYTABLENAME") == -1)
- APIERROR(myDict->getNdbError());
-
- delete myNdb;
-}
diff --git a/ndb/examples/ndbapi_example5/Makefile b/ndb/examples/ndbapi_example5/Makefile
deleted file mode 100644
index e2e3f06374a..00000000000
--- a/ndb/examples/ndbapi_example5/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#You need to set the NDB_OS variable here
-TARGET = ndbapi_example5
-SRCS = ndbapi_example5.cpp
-OBJS = ndbapi_example5.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) -L$(LIB_DIR) $(OBJS) -lNDB_API $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_scan_example/Makefile b/ndb/examples/ndbapi_scan_example/Makefile
deleted file mode 100644
index d7f08af4647..00000000000
--- a/ndb/examples/ndbapi_scan_example/Makefile
+++ /dev/null
@@ -1,35 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#You need to set the NDB_OS variable here (LINUX, SOLARIS, MACOSX)
-#NDB_OS = LINUX
-
-TARGET = ndbapi_scan
-SRCS = ndbapi_scan.cpp
-OBJS = ndbapi_scan.o
-CC = g++
-CFLAGS = -c -Wall -fno-rtti
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB = -lpthread -lsocket -lnsl -lrt
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB = -lpthread
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CC) $(LFLAGS) -L$(LIB_DIR) -lNDB_API $(OBJS) $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CC) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/select_all/Makefile b/ndb/examples/select_all/Makefile
deleted file mode 100644
index 2bec205fa99..00000000000
--- a/ndb/examples/select_all/Makefile
+++ /dev/null
@@ -1,33 +0,0 @@
--include .defs.mk
-#NDB_OS = OS_YOU_ARE_RUNNING_ON
-#You need to set the NDB_OS variable here
-TARGET = select_all
-SRCS = select_all.cpp
-OBJS = select_all.o
-CXX = g++
-CFLAGS = -c -Wall -fno-rtti -fno-exceptions
-DEBUG =
-LFLAGS = -Wall
-INCLUDE_DIR = ../../include
-LIB_DIR = ../../lib
-ifeq ($(NDB_OS), SOLARIS)
-# Here is the definition of system libraries necessary for Solaris 7
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), LINUX)
-# Here is the definition of system libraries necessary for Linux 2.4
-SYS_LIB =
-endif
-ifeq ($(NDB_OS), MACOSX)
-# Here is the definition of system libraries necessary for Mac OS X
-SYS_LIB =
-endif
-
-$(TARGET): $(OBJS)
- $(CXX) $(LFLAGS) -L$(LIB_DIR) $(OBJS) -lNDB_API $(SYS_LIB) -o $(TARGET)
-
-$(TARGET).o: $(SRCS)
- $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
-
-clean:
- rm -f *.o $(TARGET)
diff --git a/ndb/examples/select_all/select_all.cpp b/ndb/examples/select_all/select_all.cpp
deleted file mode 100644
index 24bb1214bd2..00000000000
--- a/ndb/examples/select_all/select_all.cpp
+++ /dev/null
@@ -1,259 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-//
-// select_all.cpp: Prints all rows of a table
-//
-// Usage: select_all <table_name>+
-
-#include <NdbApi.hpp>
-
-// Used for cout
-#include <iostream>
-using namespace std;
-#include <stdio.h>
-#include <string.h>
-
-#define APIERROR(error) \
- { cout << "Error in " << __FILE__ << ", line:" << __LINE__ << ", code:" \
- << error.code << ", msg: " << error.message << "." << endl; \
- exit(-1); }
-
-void usage(const char* prg) {
- cout << "Usage: " << prg << " <table name>" << endl;
- cout << "Prints all rows of table named <table name>" << endl;
- exit(0);
-}
-
-/*****************************************************************************
- *************************** Result Set Container ****************************
- *****************************************************************************/
-
-/*
- * Container of NdbRecAttr objects.
- * (NdbRecAttr objects are database rows read by a scan operation.)
- */
-class ResultSetContainer {
-public:
- /**
- * Initialize ResultSetContainer object for table named <tableName>
- * - Allocates memory
- * - Fetches attribute names from NDB Cluster
- */
- void init(NdbDictionary::Dictionary* dict, const char* tableName);
-
- /**
- * Get no of attributes for stored NdbRecAttr objects
- */
- int getNoOfAttributes() const;
-
- /**
- * Get NdbRecAttr object no i
- */
- NdbRecAttr* & getAttrStore(int i);
-
- /**
- * Get attribute name of attribute no i
- */
- const char* getAttrName(int i) const;
-
- /**
- * Print header of rows
- */
- void header() const;
-
-private:
- int m_cols; // No of attributes for stored NdbRecAttr objects
- char **m_names; // Names of attributes
- NdbRecAttr **m_data; // The actual stored NdbRecAttr objects
-};
-
-void ResultSetContainer::init(NdbDictionary::Dictionary * dict,
- const char* tableName)
-{
- // Get Table object from NDB (this contains metadata about all tables)
- const NdbDictionary::Table * tab = dict->getTable(tableName);
-
- // Get table id of the table we are interested in
- if (tab == 0) APIERROR(dict->getNdbError()); // E.g. table didn't exist
-
- // Get no of attributes and allocate memory
- m_cols = tab->getNoOfColumns();
- m_names = new char* [m_cols];
- m_data = new NdbRecAttr* [m_cols];
-
- // Store all attribute names for the table
- for (int i = 0; i < m_cols; i++) {
- m_names[i] = new char[255];
- BaseString::snprintf(m_names[i], 255, "%s", tab->getColumn(i)->getName());
- }
-}
-
-int ResultSetContainer::getNoOfAttributes() const {return m_cols;}
-NdbRecAttr*& ResultSetContainer::getAttrStore(int i) {return m_data[i];}
-const char* ResultSetContainer::getAttrName(int i) const {return m_names[i];}
-
-/*****************************************************************************
- ********************************** MAIN ***********************************
- *****************************************************************************/
-
-int main(int argc, const char** argv)
-{
- ndb_init();
- Ndb* myNdb = new Ndb("ndbapi_example4"); // Object representing the database
- NdbConnection* myNdbConnection; // For transactions
- NdbOperation* myNdbOperation; // For operations
- int check;
-
- if (argc != 2) {
- usage(argv[0]);
- exit(0);
- }
- const char* tableName = argv[1];
-
- /*******************************************
- * Initialize NDB and wait until its ready *
- *******************************************/
- if (myNdb->init() == -1) {
- APIERROR(myNdb->getNdbError());
- exit(-1);
- }
-
- if (myNdb->waitUntilReady(30) != 0) {
- cout << "NDB was not ready within 30 secs." << endl;
- exit(-1);
- }
-
- /***************************
- * Define and execute scan *
- ***************************/
- cout << "Select * from " << tableName << endl;
-
- ResultSetContainer * container = new ResultSetContainer;
- container->init(myNdb->getDictionary(), tableName);
-
- myNdbConnection = myNdb->startTransaction();
- if (myNdbConnection == NULL) APIERROR(myNdb->getNdbError());
-
- myNdbOperation = myNdbConnection->getNdbOperation(tableName);
- if (myNdbOperation == NULL) APIERROR(myNdbConnection->getNdbError());
-
- // Define the operation to be an 'openScanRead' operation.
- check = myNdbOperation->openScanRead(1);
- if (check == -1) APIERROR(myNdbConnection->getNdbError());
-
- // Set interpreted program to just be the single instruction
- // 'interpret_exit_ok'. (This approves all rows of the table.)
- if (myNdbOperation->interpret_exit_ok() == -1)
- APIERROR(myNdbConnection->getNdbError());
-
- // Get all attribute values of the row
- for(int i = 0; i < container->getNoOfAttributes(); i++){
- if((container->getAttrStore(i) =
- myNdbOperation->getValue(container->getAttrName(i))) == 0)
- APIERROR(myNdbConnection->getNdbError());
- }
-
- // Execute scan operation
- check = myNdbConnection->executeScan();
- if (check == -1) APIERROR(myNdbConnection->getNdbError());
-
- /****************
- * Print header *
- ****************/
- for (int i = 0; i < container->getNoOfAttributes(); i++)
- cout << container->getAttrName(i) << "\t";
-
- cout << endl;
- for (int i = 0; i < container->getNoOfAttributes(); i++) {
- for (int j = strlen(container->getAttrName(i)); j > 0; j--)
- cout << "-";
- cout << "\t";
- }
- cout << "\n";
-
- /**************
- * Scan table *
- **************/
- int eof;
- int rows = 0;
-
- // Print all rows of table
- while ((eof = myNdbConnection->nextScanResult()) == 0) {
- rows++;
-
- for (int i = 0; i < container->getNoOfAttributes(); i++) {
- if (container->getAttrStore(i)->isNULL()) {
- cout << "NULL";
- } else {
-
- // Element size of value (No of bits per element in attribute value)
- const int size = container->getAttrStore(i)->attrSize();
-
- // No of elements in an array attribute (Is 1 if non-array attribute)
- const int aSize = container->getAttrStore(i)->arraySize();
-
- switch(container->getAttrStore(i)->attrType()){
- case UnSigned:
- switch(size) {
- case 8: cout << container->getAttrStore(i)->u_64_value(); break;
- case 4: cout << container->getAttrStore(i)->u_32_value(); break;
- case 2: cout << container->getAttrStore(i)->u_short_value(); break;
- case 1: cout << (unsigned) container->getAttrStore(i)->u_char_value();
- break;
- default: cout << "Unknown size" << endl;
- }
- break;
-
- case Signed:
- switch(size) {
- case 8: cout << container->getAttrStore(i)->int64_value(); break;
- case 4: cout << container->getAttrStore(i)->int32_value(); break;
- case 2: cout << container->getAttrStore(i)->short_value(); break;
- case 1: cout << (int) container->getAttrStore(i)->char_value(); break;
- default: cout << "Unknown size" << endl;
- }
- break;
-
- case String:
- {
- char* buf = new char[aSize+1];
- memcpy(buf, container->getAttrStore(i)->aRef(), aSize);
- buf[aSize] = 0;
- cout << buf;
- delete [] buf;
- }
- break;
-
- case Float:
- cout << container->getAttrStore(i)->float_value();
- break;
-
- default:
- cout << "Unknown";
- break;
- }
- }
- cout << "\t";
- }
- cout << endl;
- }
- if (eof == -1) APIERROR(myNdbConnection->getNdbError());
-
- myNdb->closeTransaction(myNdbConnection);
-
- cout << "Selected " << rows << " rows." << endl;
-}
diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am
index ef4e9552566..10f297492e9 100644
--- a/ndb/include/Makefile.am
+++ b/ndb/include/Makefile.am
@@ -2,6 +2,7 @@
include $(top_srcdir)/ndb/config/common.mk.am
ndbinclude_HEADERS = \
+ndb_constants.h \
ndb_init.h \
ndb_types.h \
ndb_version.h
@@ -11,8 +12,7 @@ ndbapi/ndbapi_limits.h \
ndbapi/ndb_opt_defaults.h \
ndbapi/Ndb.hpp \
ndbapi/NdbApi.hpp \
-ndbapi/NdbConnection.hpp \
-ndbapi/NdbCursorOperation.hpp \
+ndbapi/NdbTransaction.hpp \
ndbapi/NdbDictionary.hpp \
ndbapi/NdbError.hpp \
ndbapi/NdbEventOperation.hpp \
@@ -23,7 +23,6 @@ ndbapi/NdbBlob.hpp \
ndbapi/NdbPool.hpp \
ndbapi/NdbRecAttr.hpp \
ndbapi/NdbReceiver.hpp \
-ndbapi/NdbResultSet.hpp \
ndbapi/NdbScanFilter.hpp \
ndbapi/NdbScanOperation.hpp \
ndbapi/NdbIndexScanOperation.hpp \
@@ -33,7 +32,8 @@ mgmapiinclude_HEADERS = \
mgmapi/mgmapi.h \
mgmapi/mgmapi_debug.h \
mgmapi/mgmapi_config_parameters.h \
-mgmapi/mgmapi_config_parameters_debug.h
+mgmapi/mgmapi_config_parameters_debug.h \
+mgmapi/ndb_logevent.h
noinst_HEADERS = \
ndb_global.h \
@@ -46,3 +46,6 @@ dist-hook:
-rm -rf `find $(distdir) -type d -name SCCS`
windoze-dsp:
+
+# Don't update the files from bitkeeper
+%::SCCS/s.%
diff --git a/ndb/include/debugger/EventLogger.hpp b/ndb/include/debugger/EventLogger.hpp
index ddf21b79f5f..6308cf25465 100644
--- a/ndb/include/debugger/EventLogger.hpp
+++ b/ndb/include/debugger/EventLogger.hpp
@@ -17,12 +17,12 @@
#ifndef EVENTLOGGER_H
#define EVENTLOGGER_H
-#include <Logger.hpp>
-#include <FileLogHandler.hpp>
-#include <GrepError.hpp>
-#include <kernel_types.h>
+#include <logger/Logger.hpp>
+#include <logger/FileLogHandler.hpp>
+#include "GrepError.hpp"
+#include <kernel/kernel_types.h>
#include <kernel/LogLevel.hpp>
-#include <signaldata/EventReport.hpp>
+#include <kernel/signaldata/EventReport.hpp>
class EventLoggerBase {
public:
@@ -39,11 +39,14 @@ public:
* threshold - is in range [0-15]
* severity - DEBUG to ALERT (Type of log message)
*/
+ typedef void (* EventTextFunction)(char *,size_t,const Uint32*);
+
struct EventRepLogLevelMatrix {
- EventReport::EventType eventType;
- LogLevel::EventCategory eventCategory;
- Uint32 threshold;
- Logger::LoggerLevel severity;
+ Ndb_logevent_type eventType;
+ LogLevel::EventCategory eventCategory;
+ Uint32 threshold;
+ Logger::LoggerLevel severity;
+ EventTextFunction textF;
};
static const EventRepLogLevelMatrix matrix[];
@@ -51,7 +54,8 @@ public:
static int event_lookup(int eventType,
LogLevel::EventCategory &cat,
Uint32 &threshold,
- Logger::LoggerLevel &severity);
+ Logger::LoggerLevel &severity,
+ EventTextFunction &textF);
};
/**
@@ -130,17 +134,18 @@ public:
* @param nodeId the node id of event origin.
*/
virtual void log(int, const Uint32*, NodeId = 0,const class LogLevel * = 0);
+
/**
* Returns the event text for the specified event report type.
*
- * @param type the event type.
+ * @param textF print function for the event
* @param theData the event data.
* @param nodeId a node id.
* @return the event report text.
*/
static const char* getText(char * dst, size_t dst_len,
- int type,
+ EventTextFunction textF,
const Uint32* theData, NodeId nodeId = 0);
/**
diff --git a/ndb/include/debugger/SignalLoggerManager.hpp b/ndb/include/debugger/SignalLoggerManager.hpp
index 742bf7d294e..d212329bf78 100644
--- a/ndb/include/debugger/SignalLoggerManager.hpp
+++ b/ndb/include/debugger/SignalLoggerManager.hpp
@@ -87,7 +87,7 @@ public:
/**
* Generic messages in the signal log
*/
- void log(BlockNumber bno, const char * msg);
+ void log(BlockNumber bno, const char * msg, ...);
/**
* LogModes
diff --git a/ndb/include/kernel/AttributeDescriptor.hpp b/ndb/include/kernel/AttributeDescriptor.hpp
index 071d45e2607..2fe7c9f0973 100644
--- a/ndb/include/kernel/AttributeDescriptor.hpp
+++ b/ndb/include/kernel/AttributeDescriptor.hpp
@@ -19,54 +19,49 @@
class AttributeDescriptor {
friend class Dbdict;
+ friend class Dbtc;
+ friend class Dbacc;
friend class Dbtup;
friend class Dbtux;
-
+ friend class SimulatedBlock;
+
private:
static void setType(Uint32 &, Uint32 type);
static void setSize(Uint32 &, Uint32 size);
static void setArray(Uint32 &, Uint32 arraySize);
- static void setOriginal(Uint32 &, Uint32 original);
static void setNullable(Uint32 &, Uint32 nullable);
- static void setDGroup(Uint32 &, Uint32 dgroup);
static void setDKey(Uint32 &, Uint32 dkey);
static void setPrimaryKey(Uint32 &, Uint32 dkey);
- static void setStoredInTup(Uint32 &, Uint32 storedInTup);
static void setDynamic(Uint32 &, Uint32 dynamicInd);
static Uint32 getType(const Uint32 &);
static Uint32 getSize(const Uint32 &);
+ static Uint32 getSizeInBytes(const Uint32 &);
static Uint32 getSizeInWords(const Uint32 &);
static Uint32 getArrayType(const Uint32 &);
static Uint32 getArraySize(const Uint32 &);
- static Uint32 getOriginal(const Uint32 &);
static Uint32 getNullable(const Uint32 &);
- static Uint32 getDGroup(const Uint32 &);
static Uint32 getDKey(const Uint32 &);
static Uint32 getPrimaryKey(const Uint32 &);
- static Uint32 getStoredInTup(const Uint32 &);
static Uint32 getDynamic(const Uint32 &);
};
/**
*
* a = Array type - 2 Bits -> Max 3 (Bit 0-1)
- * t = Attribute type - 2 Bits -> Max 3 (Bit 2-3)
- * s = Attribute size - 3 Bits -> Max 7 (Bit 4-6)
- * o = Original attribute - 1 Bit 7
- * n = Nullable - 1 Bit 8
- * ? = Stored in tup - 1 Bit 9
- * d = Disk based - 1 Bit 10
- * g = Distribution Group Ind- 1 Bit 11
- * k = Distribution Key Ind - 1 Bit 12
- * r = Distribution group sz - 1 Bit 13
+ * t = Attribute type - 5 Bits -> Max 31 (Bit 2-6)
+ * s = Attribute size - 3 Bits -> Max 7 (Bit 8-10)
+ * d = Disk based - 1 Bit 11
+ * n = Nullable - 1 Bit 12
+ * k = Distribution Key Ind - 1 Bit 13
* p = Primary key attribute - 1 Bit 14
* y = Dynamic attribute - 1 Bit 15
* z = Array size - 16 Bits -> Max 65535 (Bit 16-31)
*
* 1111111111222222222233
* 01234567890123456789012345678901
- * aattsss n dgkrpyzzzzzzzzzzzzzzzz
+ * aattttt sssdnkpyzzzzzzzzzzzzzzzz
+ * aattsss n d k pyzzzzzzzzzzzzzzzz [ old format ]
*
*/
@@ -74,21 +69,17 @@ private:
#define AD_ARRAY_TYPE_MASK (3)
#define AD_TYPE_SHIFT (2)
-#define AD_TYPE_MASK (3)
+#define AD_TYPE_MASK (31)
-#define AD_SIZE_SHIFT (4)
+#define AD_SIZE_SHIFT (8)
#define AD_SIZE_MASK (7)
+#define AD_SIZE_IN_BYTES_SHIFT (3)
#define AD_SIZE_IN_WORDS_OFFSET (31)
#define AD_SIZE_IN_WORDS_SHIFT (5)
-#define AD_ORIGINAL_SHIFT (8)
-#define AD_NULLABLE_SHIFT (8)
-#define AD_TUP_STORED_SHIFT (9)
-
-#define AD_DISTR_GROUP_SHIFT (11)
-#define AD_DISTR_KEY_SHIFT (12)
-#define AD_DISTR_GROUP_SZ (13)
+#define AD_NULLABLE_SHIFT (12)
+#define AD_DISTR_KEY_SHIFT (13)
#define AD_PRIMARY_KEY (14)
#define AD_DYNAMIC (15)
@@ -130,20 +121,6 @@ AttributeDescriptor::setNullable(Uint32 & desc, Uint32 nullable){
inline
void
-AttributeDescriptor::setOriginal(Uint32 & desc, Uint32 original){
- ASSERT_BOOL(original, "AttributeDescriptor::setOriginal");
- desc |= (original << AD_ORIGINAL_SHIFT);
-}
-
-inline
-void
-AttributeDescriptor::setDGroup(Uint32 & desc, Uint32 dgroup){
- ASSERT_BOOL(dgroup, "AttributeDescriptor::setDGroup");
- desc |= (dgroup << AD_DISTR_GROUP_SHIFT);
-}
-
-inline
-void
AttributeDescriptor::setDKey(Uint32 & desc, Uint32 dkey){
ASSERT_BOOL(dkey, "AttributeDescriptor::setDKey");
desc |= (dkey << AD_DISTR_KEY_SHIFT);
@@ -158,13 +135,6 @@ AttributeDescriptor::setPrimaryKey(Uint32 & desc, Uint32 dkey){
inline
void
-AttributeDescriptor::setStoredInTup(Uint32 & desc, Uint32 storedInTup){
- ASSERT_BOOL(storedInTup, "AttributeDescriptor::setStoredInTup");
- desc |= (storedInTup << AD_TUP_STORED_SHIFT);
-}
-
-inline
-void
AttributeDescriptor::setDynamic(Uint32 & desc, Uint32 dynamic){
ASSERT_BOOL(dynamic, "AttributeDescriptor::setDynamic");
desc |= (dynamic << AD_DYNAMIC);
@@ -187,6 +157,13 @@ AttributeDescriptor::getSize(const Uint32 & desc){
inline
Uint32
+AttributeDescriptor::getSizeInBytes(const Uint32 & desc){
+ return (getArraySize(desc) << getSize(desc))
+ >> AD_SIZE_IN_BYTES_SHIFT;
+}
+
+inline
+Uint32
AttributeDescriptor::getSizeInWords(const Uint32 & desc){
return ((getArraySize(desc) << getSize(desc))
+ AD_SIZE_IN_WORDS_OFFSET)
@@ -213,18 +190,6 @@ AttributeDescriptor::getNullable(const Uint32 & desc){
inline
Uint32
-AttributeDescriptor::getOriginal(const Uint32 & desc){
- return (desc >> AD_ORIGINAL_SHIFT) & 1;
-}
-
-inline
-Uint32
-AttributeDescriptor::getDGroup(const Uint32 & desc){
- return (desc >> AD_DISTR_GROUP_SHIFT) & 1;
-}
-
-inline
-Uint32
AttributeDescriptor::getDKey(const Uint32 & desc){
return (desc >> AD_DISTR_KEY_SHIFT) & 1;
}
@@ -241,10 +206,4 @@ AttributeDescriptor::getDynamic(const Uint32 & desc){
return (desc >> AD_DYNAMIC) & 1;
}
-inline
-Uint32
-AttributeDescriptor::getStoredInTup(const Uint32 & desc){
- return (desc >> AD_TUP_STORED_SHIFT) & 1;
-}
-
#endif
diff --git a/ndb/include/kernel/AttributeHeader.hpp b/ndb/include/kernel/AttributeHeader.hpp
index ed9085301be..3cb432067eb 100644
--- a/ndb/include/kernel/AttributeHeader.hpp
+++ b/ndb/include/kernel/AttributeHeader.hpp
@@ -33,10 +33,14 @@ public:
* Psuedo columns
*/
STATIC_CONST( PSUEDO = 0x8000 );
- STATIC_CONST( FRAGMENT = 0xFFFE );
- STATIC_CONST( ROW_COUNT = 0xFFFD );
- STATIC_CONST( COMMIT_COUNT = 0xFFFC );
+ STATIC_CONST( FRAGMENT = 0xFFFE ); // Read fragment no
+ STATIC_CONST( ROW_COUNT = 0xFFFD ); // Read row count (committed)
+ STATIC_CONST( COMMIT_COUNT = 0xFFFC ); // Read commit count
+ STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges)
+ STATIC_CONST( ROW_SIZE = 0xFFFA );
+ STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 );
+
/** Initialize AttributeHeader at location aHeaderPtr */
static AttributeHeader& init(void* aHeaderPtr, Uint32 anAttributeId,
Uint32 aDataSize);
diff --git a/ndb/include/kernel/AttributeList.hpp b/ndb/include/kernel/AttributeList.hpp
index 7c6f71df3d2..70b178c6c79 100644
--- a/ndb/include/kernel/AttributeList.hpp
+++ b/ndb/include/kernel/AttributeList.hpp
@@ -17,6 +17,8 @@
#ifndef ATTRIBUTE_LIST_HPP
#define ATTRIBUTE_LIST_HPP
+#include "ndb_limits.h"
+
/**
* Masks and lists used by index and trigger. Must be plain old Uint32 data.
* XXX depends on other headers XXX move to some common file
diff --git a/ndb/include/kernel/LogLevel.hpp b/ndb/include/kernel/LogLevel.hpp
index 3c2f349e0e1..60dcd36ab56 100644
--- a/ndb/include/kernel/LogLevel.hpp
+++ b/ndb/include/kernel/LogLevel.hpp
@@ -57,7 +57,7 @@ public:
llInfo = CFG_LOGLEVEL_INFO - CFG_MIN_LOGLEVEL,
llWarning = CFG_LOGLEVEL_WARNING - CFG_MIN_LOGLEVEL,
llError = CFG_LOGLEVEL_ERROR - CFG_MIN_LOGLEVEL,
- llGrep = CFG_LOGLEVEL_GREP - CFG_MIN_LOGLEVEL,
+ llCongestion = CFG_LOGLEVEL_CONGESTION - CFG_MIN_LOGLEVEL,
llDebug = CFG_LOGLEVEL_DEBUG - CFG_MIN_LOGLEVEL
,llBackup = CFG_LOGLEVEL_BACKUP - CFG_MIN_LOGLEVEL
};
@@ -147,7 +147,7 @@ LogLevel::set_max(const LogLevel & org){
return * this;
}
-#include <signaldata/EventSubscribeReq.hpp>
+#include "signaldata/EventSubscribeReq.hpp"
inline
LogLevel&
diff --git a/ndb/include/kernel/NodeInfo.hpp b/ndb/include/kernel/NodeInfo.hpp
index 5377f001949..622185323a3 100644
--- a/ndb/include/kernel/NodeInfo.hpp
+++ b/ndb/include/kernel/NodeInfo.hpp
@@ -41,6 +41,7 @@ public:
Uint32 m_type; ///< Node type
Uint32 m_connectCount; ///< No of times connected
bool m_connected; ///< Node is connected
+ Uint32 m_heartbeat_cnt; ///< Missed heartbeats
friend NdbOut & operator<<(NdbOut&, const NodeInfo&);
};
@@ -52,6 +53,7 @@ NodeInfo::NodeInfo(){
m_signalVersion = 0;
m_type = INVALID;
m_connectCount = 0;
+ m_heartbeat_cnt= 0;
}
inline
diff --git a/ndb/include/kernel/ndb_limits.h b/ndb/include/kernel/ndb_limits.h
index 48a56c019bb..e60153e60ec 100644
--- a/ndb/include/kernel/ndb_limits.h
+++ b/ndb/include/kernel/ndb_limits.h
@@ -17,6 +17,8 @@
#ifndef NDB_LIMITS_H
#define NDB_LIMITS_H
+#include <mysql.h>
+
#define RNIL 0xffffff00
/**
@@ -50,17 +52,17 @@
**/
#define MAX_TUPLES_PER_PAGE 8191
#define MAX_TUPLES_BITS 13 /* 13 bits = 8191 tuples per page */
-/*#define MAX_NO_OF_TUPLEKEY 16 Not currently used */
-#define MAX_TABLES 1600
+#define MAX_TABLES 20320 /* SchemaFile.hpp */
#define MAX_TAB_NAME_SIZE 128
-#define MAX_ATTR_NAME_SIZE 32
+#define MAX_ATTR_NAME_SIZE NAME_LEN /* From mysql_com.h */
#define MAX_ATTR_DEFAULT_VALUE_SIZE 128
#define MAX_ATTRIBUTES_IN_TABLE 128
#define MAX_ATTRIBUTES_IN_INDEX 32
#define MAX_TUPLE_SIZE_IN_WORDS 2013
-#define MAX_FIXED_KEY_LENGTH_IN_WORDS 8
#define MAX_KEY_SIZE_IN_WORDS 1023
#define MAX_FRM_DATA_SIZE 6000
+#define MAX_NULL_BITS 4096
+#define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES))
#define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1)
/*
@@ -118,6 +120,11 @@
#define NDB_BLOB_HEAD_SIZE 2 /* sizeof(NdbBlob::Head) >> 2 */
/*
+ * Character sets.
+ */
+#define MAX_XFRM_MULTIPLY 8 /* max expansion when normalizing */
+
+/*
* Long signals
*/
#define NDB_SECTION_SEGMENT_SZ 60
diff --git a/ndb/include/kernel/signaldata/AccScan.hpp b/ndb/include/kernel/signaldata/AccScan.hpp
index eab1c3262fc..d94d4da8cca 100644
--- a/ndb/include/kernel/signaldata/AccScan.hpp
+++ b/ndb/include/kernel/signaldata/AccScan.hpp
@@ -34,6 +34,7 @@ class AccScanReq {
*/
friend class Dbacc;
friend class Dbtux;
+ friend class Dbtup;
public:
STATIC_CONST( SignalLength = 8 );
@@ -51,30 +52,29 @@ private:
* Previously there where also a scan type
*/
static Uint32 getLockMode(const Uint32 & requestInfo);
- static Uint32 getKeyinfoFlag(const Uint32 & requestInfo);
static Uint32 getReadCommittedFlag(const Uint32 & requestInfo);
+ static Uint32 getDescendingFlag(const Uint32 & requestInfo);
static void setLockMode(Uint32 & requestInfo, Uint32 lockMode);
- static void setKeyinfoFlag(Uint32 & requestInfo, Uint32 keyinfo);
static void setReadCommittedFlag(Uint32 & requestInfo, Uint32 readCommitted);
+ static void setDescendingFlag(Uint32 & requestInfo, Uint32 descending);
};
/**
* Request Info
*
* l = Lock Mode - 1 Bit 2
- * k = Keyinfo - 1 Bit 4
* h = Read Committed - 1 Bit 5
+ * z = Descending (TUX) - 1 Bit 6
*
* 1111111111222222222233
* 01234567890123456789012345678901
- * l kh
+ * l hz
*/
#define AS_LOCK_MODE_SHIFT (2)
#define AS_LOCK_MODE_MASK (1)
-
-#define AS_KEYINFO_SHIFT (4)
#define AS_READ_COMMITTED_SHIFT (5)
+#define AS_DESCENDING_SHIFT (6)
inline
Uint32
@@ -84,14 +84,14 @@ AccScanReq::getLockMode(const Uint32 & requestInfo){
inline
Uint32
-AccScanReq::getKeyinfoFlag(const Uint32 & requestInfo){
- return (requestInfo >> AS_KEYINFO_SHIFT) & 1;
+AccScanReq::getReadCommittedFlag(const Uint32 & requestInfo){
+ return (requestInfo >> AS_READ_COMMITTED_SHIFT) & 1;
}
inline
Uint32
-AccScanReq::getReadCommittedFlag(const Uint32 & requestInfo){
- return (requestInfo >> AS_READ_COMMITTED_SHIFT) & 1;
+AccScanReq::getDescendingFlag(const Uint32 & requestInfo){
+ return (requestInfo >> AS_DESCENDING_SHIFT) & 1;
}
inline
@@ -103,16 +103,16 @@ AccScanReq::setLockMode(UintR & requestInfo, UintR val){
inline
void
-AccScanReq::setKeyinfoFlag(UintR & requestInfo, UintR val){
- ASSERT_BOOL(val, "AccScanReq::setKeyinfoFlag");
- requestInfo |= (val << AS_KEYINFO_SHIFT);
+AccScanReq::setReadCommittedFlag(UintR & requestInfo, UintR val){
+ ASSERT_BOOL(val, "AccScanReq::setReadCommittedFlag");
+ requestInfo |= (val << AS_READ_COMMITTED_SHIFT);
}
inline
void
-AccScanReq::setReadCommittedFlag(UintR & requestInfo, UintR val){
- ASSERT_BOOL(val, "AccScanReq::setReadCommittedFlag");
- requestInfo |= (val << AS_READ_COMMITTED_SHIFT);
+AccScanReq::setDescendingFlag(UintR & requestInfo, UintR val){
+ ASSERT_BOOL(val, "AccScanReq::setDescendingFlag");
+ requestInfo |= (val << AS_DESCENDING_SHIFT);
}
class AccScanConf {
@@ -121,6 +121,7 @@ class AccScanConf {
*/
friend class Dbacc;
friend class Dbtux;
+ friend class Dbtup;
/**
* Reciver(s)
@@ -149,6 +150,7 @@ private:
class AccCheckScan {
friend class Dbacc;
friend class Dbtux;
+ friend class Dbtup;
friend class Dblqh;
enum {
ZCHECK_LCP_STOP = 0,
diff --git a/ndb/include/kernel/signaldata/CreateEvnt.hpp b/ndb/include/kernel/signaldata/CreateEvnt.hpp
index e911fa36ce6..8712ce8890c 100644
--- a/ndb/include/kernel/signaldata/CreateEvnt.hpp
+++ b/ndb/include/kernel/signaldata/CreateEvnt.hpp
@@ -17,6 +17,7 @@
#ifndef CREATE_EVNT_HPP
#define CREATE_EVNT_HPP
+#include <ndberror.h>
#include "SignalData.hpp"
#include <NodeBitmask.hpp>
#include <signaldata/DictTabInfo.hpp>
@@ -101,7 +102,7 @@ public:
Busy = 701,
NotMaster = 702,
SeizeError = 703,
- EventNotFound = 4238,
+ EventNotFound = 4710,
EventNameTooLong = 4241,
TooManyEvents = 4242,
BadRequestType = 4247,
@@ -363,12 +364,10 @@ struct CreateEvntRef {
Busy = 701,
NotMaster = 702,
SeizeError = 703,
- EventNotFound = 4238,
- EventExists = 4239,
- EventNameTooLong = 4241,
- TooManyEvents = 4242,
- // EventExists = 4244,
- AttributeNotStored = 4245,
+ TooManyEvents = 4707,
+ EventNameTooLong = 4708,
+ EventNameExists = 746,
+ EventNotFound = 4731,
AttributeNullable = 4246,
BadRequestType = 4247,
InvalidName = 4248,
@@ -376,7 +375,7 @@ struct CreateEvntRef {
InvalidEventType = 4250,
NotUnique = 4251,
AllocationError = 4252,
- CreateEventTableFailed = 4253,
+ CreateEventTableFailed = 4711,
InvalidAttributeOrder = 4255,
Temporary = 0x1 << 16
};
diff --git a/ndb/include/kernel/signaldata/CreateIndx.hpp b/ndb/include/kernel/signaldata/CreateIndx.hpp
index 5563f80a555..a9dc653f349 100644
--- a/ndb/include/kernel/signaldata/CreateIndx.hpp
+++ b/ndb/include/kernel/signaldata/CreateIndx.hpp
@@ -198,7 +198,6 @@ public:
IndexNameTooLong = 4241,
TooManyIndexes = 4242,
IndexExists = 4244,
- AttributeNotStored = 4245,
AttributeNullable = 4246,
BadRequestType = 4247,
InvalidName = 4248,
@@ -207,7 +206,7 @@ public:
NotUnique = 4251,
AllocationError = 4252,
CreateIndexTableFailed = 4253,
- InvalidAttributeOrder = 4255
+ DuplicateAttributes = 4258
};
CreateIndxConf m_conf;
diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/ndb/include/kernel/signaldata/CreateTable.hpp
index 67e510d2ed0..481b323fdb0 100644
--- a/ndb/include/kernel/signaldata/CreateTable.hpp
+++ b/ndb/include/kernel/signaldata/CreateTable.hpp
@@ -86,6 +86,7 @@ public:
NoMoreAttributeRecords = 708,
AttributeNameTwice = 720,
TableAlreadyExist = 721,
+ InvalidArraySize = 736,
ArraySizeTooBig = 737,
RecordTooBig = 738,
InvalidPrimaryKeySize = 739,
diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp
index 48c24125ae4..bc4817f0cf3 100644
--- a/ndb/include/kernel/signaldata/DictTabInfo.hpp
+++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp
@@ -24,6 +24,28 @@
#include <trigger_definitions.h>
#include <NdbSqlUtil.hpp>
+#ifndef my_decimal_h
+
+// sql/my_decimal.h requires many more sql/*.h new to ndb
+// for now, copy the bit we need TODO proper fix
+
+#define DECIMAL_MAX_LENGTH ((8 * 9) - 8)
+
+#ifndef NOT_FIXED_DEC
+#define NOT_FIXED_DEC 31
+#endif
+
+C_MODE_START
+extern int decimal_bin_size(int, int);
+C_MODE_END
+
+inline int my_decimal_get_binary_size(uint precision, uint scale)
+{
+ return decimal_bin_size((int)precision, (int)scale);
+}
+
+#endif
+
#define DTIMAP(x, y, z) \
{ DictTabInfo::y, offsetof(x, z), SimpleProperties::Uint32Value, 0, (~0), 0 }
@@ -85,10 +107,6 @@ public:
MaxLoadFactor = 11, //Default 80
KeyLength = 12, //Default 1 (No of words in primary key)
FragmentTypeVal = 13, //Default AllNodesSmallTable
- TableStorageVal = 14, //Default StorageType::MainMemory
- ScanOptimised = 15, //Default updateOptimised
- FragmentKeyTypeVal = 16, //Default PrimaryKey
- SecondTableId = 17, //Mandatory between DICT's otherwise not allowed
TableTypeVal = 18, //Default TableType::UserTable
PrimaryTable = 19, //Mandatory for index otherwise RNIL
PrimaryTableId = 20, //ditto
@@ -100,21 +118,20 @@ public:
FrmLen = 26,
FrmData = 27,
FragmentCount = 128, // No of fragments in table (!fragment replicas)
+ FragmentDataLen = 129,
+ FragmentData = 130, // CREATE_FRAGMENTATION reply
TableEnd = 999,
AttributeName = 1000, // String, Mandatory
AttributeId = 1001, //Mandatory between DICT's otherwise not allowed
- AttributeType = 1002, //Default UnSignedType
+ AttributeType = 1002, //for osu 4.1->5.0.x
AttributeSize = 1003, //Default DictTabInfo::a32Bit
AttributeArraySize = 1005, //Default 1
AttributeKeyFlag = 1006, //Default noKey
AttributeStorage = 1007, //Default MainMemory
AttributeNullableFlag = 1008, //Default NotNullable
- AttributeDGroup = 1009, //Default NotDGroup
AttributeDKey = 1010, //Default NotDKey
- AttributeStoredInd = 1011, //Default NotStored
- AttributeGroup = 1012, //Default 0
- AttributeExtType = 1013, //Default 0 (undefined)
+ AttributeExtType = 1013, //Default ExtUnsigned
AttributeExtPrecision = 1014, //Default 0
AttributeExtScale = 1015, //Default 0
AttributeExtLength = 1016, //Default 0
@@ -127,12 +144,7 @@ public:
// have a default value. Thus the default values are part of the protocol.
// ----------------------------------------------------------------------
- // FragmentKeyType constants
- enum FragmentKeyType {
- PrimaryKey = 0,
- DistributionKey = 1,
- DistributionGroup = 2
- };
+
// FragmentType constants
enum FragmentType {
@@ -142,12 +154,6 @@ public:
SingleFragment = 3
};
- // TableStorage AND AttributeStorage constants
- enum StorageType {
- MainMemory = 0,
- DiskMemory = 1
- };
-
// TableType constants + objects
enum TableType {
UndefTableType = 0,
@@ -220,40 +226,18 @@ public:
StorePermanent = 2
};
- // ScanOptimised constants
- STATIC_CONST( updateOptimised = 0 );
- STATIC_CONST( scanOptimised = 1 );
-
- // AttributeType constants
- STATIC_CONST( SignedType = 0 );
- STATIC_CONST( UnSignedType = 1 );
- STATIC_CONST( FloatingPointType = 2 );
- STATIC_CONST( StringType = 3 );
-
// AttributeSize constants
+ STATIC_CONST( aBit = 0 );
STATIC_CONST( an8Bit = 3 );
STATIC_CONST( a16Bit = 4 );
STATIC_CONST( a32Bit = 5 );
STATIC_CONST( a64Bit = 6 );
STATIC_CONST( a128Bit = 7 );
-
- // AttributeDGroup constants
- STATIC_CONST( NotDGroup = 0 );
- STATIC_CONST( DGroup = 1 );
-
- // AttributeDKey constants
- STATIC_CONST( NotDKey = 0 );
- STATIC_CONST( DKey = 1 );
-
- // AttributeStoredInd constants
- STATIC_CONST( NotStored = 0 );
- STATIC_CONST( Stored = 1 );
-
+
// Table data interpretation
struct Table {
char TableName[MAX_TAB_NAME_SIZE];
Uint32 TableId;
- Uint32 SecondTableId;
char PrimaryTable[MAX_TAB_NAME_SIZE]; // Only used when "index"
Uint32 PrimaryTableId;
Uint32 TableLoggedFlag;
@@ -267,8 +251,6 @@ public:
Uint32 KeyLength;
Uint32 FragmentType;
Uint32 TableStorage;
- Uint32 ScanOptimised;
- Uint32 FragmentKeyType;
Uint32 TableType;
Uint32 TableVersion;
Uint32 IndexState;
@@ -279,7 +261,9 @@ public:
Uint32 FrmLen;
char FrmData[MAX_FRM_DATA_SIZE];
Uint32 FragmentCount;
-
+ Uint32 FragmentDataLen;
+ Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2];
+
void init();
};
@@ -305,6 +289,8 @@ public:
ExtDouble = NdbSqlUtil::Type::Double,
ExtOlddecimal = NdbSqlUtil::Type::Olddecimal,
ExtOlddecimalunsigned = NdbSqlUtil::Type::Olddecimalunsigned,
+ ExtDecimal = NdbSqlUtil::Type::Decimal,
+ ExtDecimalunsigned = NdbSqlUtil::Type::Decimalunsigned,
ExtChar = NdbSqlUtil::Type::Char,
ExtVarchar = NdbSqlUtil::Type::Varchar,
ExtBinary = NdbSqlUtil::Type::Binary,
@@ -313,6 +299,9 @@ public:
ExtDate = NdbSqlUtil::Type::Date,
ExtBlob = NdbSqlUtil::Type::Blob,
ExtText = NdbSqlUtil::Type::Text,
+ ExtBit = NdbSqlUtil::Type::Bit,
+ ExtLongvarchar = NdbSqlUtil::Type::Longvarchar,
+ ExtLongvarbinary = NdbSqlUtil::Type::Longvarbinary,
ExtTime = NdbSqlUtil::Type::Time,
ExtYear = NdbSqlUtil::Type::Year,
ExtTimestamp = NdbSqlUtil::Type::Timestamp
@@ -322,16 +311,12 @@ public:
struct Attribute {
char AttributeName[MAX_TAB_NAME_SIZE];
Uint32 AttributeId;
- Uint32 AttributeType;
+ Uint32 AttributeType; // for osu 4.1->5.0.x
Uint32 AttributeSize;
Uint32 AttributeArraySize;
Uint32 AttributeKeyFlag;
- Uint32 AttributeStorage;
Uint32 AttributeNullableFlag;
- Uint32 AttributeDGroup;
Uint32 AttributeDKey;
- Uint32 AttributeStoredInd;
- Uint32 AttributeGroup;
Uint32 AttributeExtType;
Uint32 AttributeExtPrecision;
Uint32 AttributeExtScale;
@@ -347,132 +332,125 @@ public:
return ((1 << AttributeSize) * AttributeArraySize + 31) >> 5;
}
- // translate to old kernel types and sizes
+ // compute old-sty|e attribute size and array size
inline bool
translateExtType() {
switch (AttributeExtType) {
case DictTabInfo::ExtUndefined:
- break;
+ return false;
case DictTabInfo::ExtTinyint:
- AttributeType = DictTabInfo::SignedType;
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = AttributeExtLength;
- return true;
case DictTabInfo::ExtTinyunsigned:
- AttributeType = DictTabInfo::UnSignedType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtSmallint:
- AttributeType = DictTabInfo::SignedType;
- AttributeSize = DictTabInfo::a16Bit;
- AttributeArraySize = AttributeExtLength;
- return true;
case DictTabInfo::ExtSmallunsigned:
- AttributeType = DictTabInfo::UnSignedType;
AttributeSize = DictTabInfo::a16Bit;
AttributeArraySize = AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtMediumint:
- AttributeType = DictTabInfo::SignedType;
- AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = 3 * AttributeExtLength;
- return true;
case DictTabInfo::ExtMediumunsigned:
- AttributeType = DictTabInfo::UnSignedType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = 3 * AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtInt:
- AttributeType = DictTabInfo::SignedType;
- AttributeSize = DictTabInfo::a32Bit;
- AttributeArraySize = AttributeExtLength;
- return true;
case DictTabInfo::ExtUnsigned:
- AttributeType = DictTabInfo::UnSignedType;
AttributeSize = DictTabInfo::a32Bit;
AttributeArraySize = AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtBigint:
- AttributeType = DictTabInfo::SignedType;
- AttributeSize = DictTabInfo::a64Bit;
- AttributeArraySize = AttributeExtLength;
- return true;
case DictTabInfo::ExtBigunsigned:
- AttributeType = DictTabInfo::UnSignedType;
AttributeSize = DictTabInfo::a64Bit;
AttributeArraySize = AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtFloat:
- AttributeType = DictTabInfo::FloatingPointType;
AttributeSize = DictTabInfo::a32Bit;
AttributeArraySize = AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtDouble:
- AttributeType = DictTabInfo::FloatingPointType;
AttributeSize = DictTabInfo::a64Bit;
AttributeArraySize = AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtOlddecimal:
- AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize =
(1 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) *
AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtOlddecimalunsigned:
- AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize =
(0 + AttributeExtPrecision + (int(AttributeExtScale) > 0)) *
AttributeExtLength;
- return true;
+ break;
+ case DictTabInfo::ExtDecimal:
+ case DictTabInfo::ExtDecimalunsigned:
+ {
+ // copy from Field_new_decimal ctor
+ uint precision = AttributeExtPrecision;
+ uint scale = AttributeExtScale;
+ if (precision > DECIMAL_MAX_LENGTH || scale >= NOT_FIXED_DEC)
+ precision = DECIMAL_MAX_LENGTH;
+ uint bin_size = my_decimal_get_binary_size(precision, scale);
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = bin_size * AttributeExtLength;
+ }
+ break;
case DictTabInfo::ExtChar:
case DictTabInfo::ExtBinary:
- AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtVarchar:
case DictTabInfo::ExtVarbinary:
- AttributeType = DictTabInfo::StringType;
+ if (AttributeExtLength > 0xff)
+ return false;
AttributeSize = DictTabInfo::an8Bit;
- AttributeArraySize = AttributeExtLength + 2;
- return true;
+ AttributeArraySize = AttributeExtLength + 1;
+ break;
case DictTabInfo::ExtDatetime:
- AttributeType = DictTabInfo::StringType;
+ // to fix
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = 8 * AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtDate:
- AttributeType = DictTabInfo::StringType;
+ // to fix
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = 3 * AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtBlob:
case DictTabInfo::ExtText:
- AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
- // head + inline part [ attr precision lower half ]
+ // head + inline part (length in precision lower half)
AttributeArraySize = (NDB_BLOB_HEAD_SIZE << 2) + (AttributeExtPrecision & 0xFFFF);
- return true;
+ break;
+ case DictTabInfo::ExtBit:
+ AttributeSize = DictTabInfo::aBit;
+ AttributeArraySize = AttributeExtLength;
+ break;
+ case DictTabInfo::ExtLongvarchar:
+ case DictTabInfo::ExtLongvarbinary:
+ if (AttributeExtLength > 0xffff)
+ return false;
+ AttributeSize = DictTabInfo::an8Bit;
+ AttributeArraySize = AttributeExtLength + 2;
+ break;
case DictTabInfo::ExtTime:
- AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = 3 * AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtYear:
- AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = 1 * AttributeExtLength;
- return true;
+ break;
case DictTabInfo::ExtTimestamp:
- AttributeType = DictTabInfo::StringType;
AttributeSize = DictTabInfo::an8Bit;
AttributeArraySize = 4 * AttributeExtLength;
- return true;
+ break;
+ default:
+ return false;
};
- return false;
+ return true;
}
inline void print(FILE *out) {
@@ -483,9 +461,7 @@ public:
fprintf(out, "AttributeKeyFlag = %d\n", AttributeKeyFlag);
fprintf(out, "AttributeStorage = %d\n", AttributeStorage);
fprintf(out, "AttributeNullableFlag = %d\n", AttributeNullableFlag);
- fprintf(out, "AttributeDGroup = %d\n", AttributeDGroup);
fprintf(out, "AttributeDKey = %d\n", AttributeDKey);
- fprintf(out, "AttributeStoredInd = %d\n", AttributeStoredInd);
fprintf(out, "AttributeGroup = %d\n", AttributeGroup);
fprintf(out, "AttributeAutoIncrement = %d\n", AttributeAutoIncrement);
fprintf(out, "AttributeExtType = %d\n", AttributeExtType);
@@ -519,6 +495,22 @@ private:
*/
Uint32 tabInfoData[DataLength];
+
+public:
+ enum Depricated
+ {
+ AttributeDGroup = 1009, //Default NotDGroup
+ AttributeStoredInd = 1011, //Default NotStored
+ SecondTableId = 17, //Mandatory between DICT's otherwise not allowed
+ FragmentKeyTypeVal = 16 //Default PrimaryKey
+ };
+
+ enum Unimplemented
+ {
+ TableStorageVal = 14, //Default StorageType::MainMemory
+ ScanOptimised = 15, //Default updateOptimised
+ AttributeGroup = 1012 //Default 0
+ };
};
#endif
diff --git a/ndb/include/kernel/signaldata/EventReport.hpp b/ndb/include/kernel/signaldata/EventReport.hpp
index 67595648f34..9822a0539cf 100644
--- a/ndb/include/kernel/signaldata/EventReport.hpp
+++ b/ndb/include/kernel/signaldata/EventReport.hpp
@@ -17,6 +17,7 @@
#ifndef SD_EVENT_REPORT_H
#define SD_EVENT_REPORT_H
+#include <ndb_logevent.h>
#include "SignalData.hpp"
/**
@@ -67,98 +68,21 @@ public:
4) Add SentHeartbeat in EventLogger::getText()
*/
- enum EventType {
- // CONNECTION
- Connected = 0,
- Disconnected = 1,
- CommunicationClosed = 2,
- CommunicationOpened = 3,
- ConnectedApiVersion = 51,
- // CHECKPOINT
- GlobalCheckpointStarted = 4,
- GlobalCheckpointCompleted = 5,
- LocalCheckpointStarted = 6,
- LocalCheckpointCompleted = 7,
- LCPStoppedInCalcKeepGci = 8,
- LCPFragmentCompleted = 9,
- // STARTUP
- NDBStartStarted = 10,
- NDBStartCompleted = 11,
- STTORRYRecieved = 12,
- StartPhaseCompleted = 13,
- CM_REGCONF = 14,
- CM_REGREF = 15,
- FIND_NEIGHBOURS = 16,
- NDBStopStarted = 17,
- NDBStopAborted = 18,
- StartREDOLog = 19,
- StartLog = 20,
- UNDORecordsExecuted = 21,
-
- // NODERESTART
- NR_CopyDict = 22,
- NR_CopyDistr = 23,
- NR_CopyFragsStarted = 24,
- NR_CopyFragDone = 25,
- NR_CopyFragsCompleted = 26,
-
- // NODEFAIL
- NodeFailCompleted = 27,
- NODE_FAILREP = 28,
- ArbitState = 29,
- ArbitResult = 30,
- GCP_TakeoverStarted = 31,
- GCP_TakeoverCompleted = 32,
- LCP_TakeoverStarted = 33,
- LCP_TakeoverCompleted = 34,
-
- // STATISTIC
- TransReportCounters = 35,
- OperationReportCounters = 36,
- TableCreated = 37,
- UndoLogBlocked = 38,
- JobStatistic = 39,
- SendBytesStatistic = 40,
- ReceiveBytesStatistic = 41,
- MemoryUsage = 50,
-
- // ERROR
- TransporterError = 42,
- TransporterWarning = 43,
- MissedHeartbeat = 44,
- DeadDueToHeartbeat = 45,
- WarningEvent = 46,
- // INFO
- SentHeartbeat = 47,
- CreateLogBytes = 48,
- InfoEvent = 49,
-
- // SINGLE USER
- SingleUser = 52,
- /* unused 53 */
-
- //BACKUP
- BackupStarted = 54,
- BackupFailedToStart = 55,
- BackupCompleted = 56,
- BackupAborted = 57
- };
-
- void setEventType(EventType type);
- EventType getEventType() const;
+ void setEventType(Ndb_logevent_type type);
+ Ndb_logevent_type getEventType() const;
UintR eventType; // DATA 0
};
inline
void
-EventReport::setEventType(EventType type){
+EventReport::setEventType(Ndb_logevent_type type){
eventType = (UintR) type;
}
inline
-EventReport::EventType
+Ndb_logevent_type
EventReport::getEventType() const {
- return (EventType)eventType;
+ return (Ndb_logevent_type)eventType;
}
#endif
diff --git a/ndb/include/kernel/signaldata/GetTabInfo.hpp b/ndb/include/kernel/signaldata/GetTabInfo.hpp
index cb6e38872d3..6b223cab119 100644
--- a/ndb/include/kernel/signaldata/GetTabInfo.hpp
+++ b/ndb/include/kernel/signaldata/GetTabInfo.hpp
@@ -39,23 +39,16 @@ class GetTabInfoReq {
friend bool printGET_TABINFO_REQ(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 5 );
- // STATIC_CONST( MaxTableNameLengthInWords = 20 );
public:
- Uint32 senderData;
+ Uint32 senderData;
Uint32 senderRef;
-
- /**
- * 0 = request by id, 1 = request by name
- */
- Uint32 requestType;
-
+ Uint32 requestType; // Bitmask of GetTabInfoReq::RequestType
union {
- Uint32 tableId;
+ Uint32 tableId;
Uint32 tableNameLen;
};
Uint32 unused; // This is located here so that Req & Ref have the same format
- // Uint32 tableName[MaxTableNameLengthInWords];
-
+
enum RequestType {
RequestById = 0,
RequestByName = 1,
@@ -79,22 +72,21 @@ class GetTabInfoRef {
friend bool printGET_TABINFO_REF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 5 );
-
public:
- Uint32 senderData;
+ Uint32 senderData;
Uint32 senderRef;
- Uint32 requestType; // 0 = request by id, 1 = request by name
+ Uint32 requestType; // Bitmask of GetTabInfoReq::RequestType
union {
- Uint32 tableId;
+ Uint32 tableId;
Uint32 tableNameLen;
};
- Uint32 errorCode;
+ Uint32 errorCode;
enum ErrorCode {
- InvalidTableId = 709,
+ InvalidTableId = 709,
TableNotDefined = 723,
TableNameTooLong = 702,
- Busy = 701
+ Busy = 701
};
};
@@ -114,10 +106,10 @@ class GetTabInfoConf {
friend bool printGET_TABINFO_CONF(FILE *, const Uint32 *, Uint32, Uint16);
public:
STATIC_CONST( SignalLength = 4 );
-
+
SECTION( DICT_TAB_INFO = 0 );
public:
- Uint32 senderData;
+ Uint32 senderData;
Uint32 tableId;
Uint32 gci; // For table
Uint32 totalLen; // In words
diff --git a/ndb/include/kernel/signaldata/NextScan.hpp b/ndb/include/kernel/signaldata/NextScan.hpp
index 3a1882f94e8..a502a89108c 100644
--- a/ndb/include/kernel/signaldata/NextScan.hpp
+++ b/ndb/include/kernel/signaldata/NextScan.hpp
@@ -23,6 +23,7 @@ class NextScanReq {
friend class Dblqh;
friend class Dbacc;
friend class Dbtux;
+ friend class Dbtup;
public:
// two sets of defs picked from lqh/acc
enum ScanFlag {
@@ -50,6 +51,7 @@ private:
class NextScanConf {
friend class Dbacc;
friend class Dbtux;
+ friend class Dbtup;
friend class Dblqh;
public:
// length is less if no keyinfo or no next result
diff --git a/ndb/include/kernel/signaldata/ScanFrag.hpp b/ndb/include/kernel/signaldata/ScanFrag.hpp
index 41ea569c45d..f21a3eef7ac 100644
--- a/ndb/include/kernel/signaldata/ScanFrag.hpp
+++ b/ndb/include/kernel/signaldata/ScanFrag.hpp
@@ -56,6 +56,8 @@ public:
static Uint32 getKeyinfoFlag(const Uint32 & requestInfo);
static Uint32 getReadCommittedFlag(const Uint32 & requestInfo);
static Uint32 getRangeScanFlag(const Uint32 & requestInfo);
+ static Uint32 getDescendingFlag(const Uint32 & requestInfo);
+ static Uint32 getTupScanFlag(const Uint32 & requestInfo);
static Uint32 getAttrLen(const Uint32 & requestInfo);
static Uint32 getScanPrio(const Uint32 & requestInfo);
@@ -64,6 +66,8 @@ public:
static void setKeyinfoFlag(Uint32 & requestInfo, Uint32 keyinfo);
static void setReadCommittedFlag(Uint32 & requestInfo, Uint32 readCommitted);
static void setRangeScanFlag(Uint32 & requestInfo, Uint32 rangeScan);
+ static void setDescendingFlag(Uint32 & requestInfo, Uint32 descending);
+ static void setTupScanFlag(Uint32 & requestInfo, Uint32 tupScan);
static void setAttrLen(Uint32 & requestInfo, Uint32 attrLen);
static void setScanPrio(Uint32& requestInfo, Uint32 prio);
};
@@ -197,11 +201,13 @@ public:
* k = Keyinfo - 1 Bit 8
* r = read committed - 1 Bit 9
* x = range scan - 1 Bit 6
+ * z = descending - 1 Bit 10
+ * t = tup scan -1 Bit 11 (implies x=z=0)
* p = Scan prio - 4 Bits (12-15) -> max 15
*
* 1111111111222222222233
* 01234567890123456789012345678901
- * lxhkr ppppaaaaaaaaaaaaaaaa
+ * lxhkrztppppaaaaaaaaaaaaaaaa
*/
#define SF_LOCK_MODE_SHIFT (5)
#define SF_LOCK_MODE_MASK (1)
@@ -210,6 +216,8 @@ public:
#define SF_KEYINFO_SHIFT (8)
#define SF_READ_COMMITTED_SHIFT (9)
#define SF_RANGE_SCAN_SHIFT (6)
+#define SF_DESCENDING_SHIFT (10)
+#define SF_TUP_SCAN_SHIFT (11)
#define SF_ATTR_LEN_SHIFT (16)
#define SF_ATTR_LEN_MASK (65535)
@@ -243,6 +251,18 @@ ScanFragReq::getRangeScanFlag(const Uint32 & requestInfo){
inline
Uint32
+ScanFragReq::getDescendingFlag(const Uint32 & requestInfo){
+ return (requestInfo >> SF_DESCENDING_SHIFT) & 1;
+}
+
+inline
+Uint32
+ScanFragReq::getTupScanFlag(const Uint32 & requestInfo){
+ return (requestInfo >> SF_TUP_SCAN_SHIFT) & 1;
+}
+
+inline
+Uint32
ScanFragReq::getReadCommittedFlag(const Uint32 & requestInfo){
return (requestInfo >> SF_READ_COMMITTED_SHIFT) & 1;
}
@@ -303,6 +323,20 @@ ScanFragReq::setRangeScanFlag(UintR & requestInfo, UintR val){
inline
void
+ScanFragReq::setDescendingFlag(UintR & requestInfo, UintR val){
+ ASSERT_BOOL(val, "ScanFragReq::setDescendingFlag");
+ requestInfo |= (val << SF_DESCENDING_SHIFT);
+}
+
+inline
+void
+ScanFragReq::setTupScanFlag(UintR & requestInfo, UintR val){
+ ASSERT_BOOL(val, "ScanFragReq::setTupScanFlag");
+ requestInfo |= (val << SF_TUP_SCAN_SHIFT);
+}
+
+inline
+void
ScanFragReq::setAttrLen(UintR & requestInfo, UintR val){
ASSERT_MAX(val, SF_ATTR_LEN_MASK, "ScanFragReq::setAttrLen");
requestInfo |= (val << SF_ATTR_LEN_SHIFT);
diff --git a/ndb/include/kernel/signaldata/ScanTab.hpp b/ndb/include/kernel/signaldata/ScanTab.hpp
index 2029b16197e..8cb282270ff 100644
--- a/ndb/include/kernel/signaldata/ScanTab.hpp
+++ b/ndb/include/kernel/signaldata/ScanTab.hpp
@@ -33,8 +33,9 @@ class ScanTabReq {
/**
* Sender(s)
*/
- friend class NdbConnection;
- friend class NdbScanOperation;
+ friend class NdbTransaction;
+ friend class NdbScanOperation;
+ friend class NdbIndexScanOperation;
/**
* For printing
@@ -65,7 +66,12 @@ private:
UintR buddyConPtr; // DATA 8
UintR batch_byte_size; // DATA 9
UintR first_batch_size; // DATA 10
-
+
+ /**
+ * Optional
+ */
+ Uint32 distributionKey;
+
/**
* Get:ers for requestInfo
*/
@@ -74,8 +80,11 @@ private:
static Uint8 getHoldLockFlag(const UintR & requestInfo);
static Uint8 getReadCommittedFlag(const UintR & requestInfo);
static Uint8 getRangeScanFlag(const UintR & requestInfo);
+ static Uint8 getDescendingFlag(const UintR & requestInfo);
+ static Uint8 getTupScanFlag(const UintR & requestInfo);
static Uint8 getKeyinfoFlag(const UintR & requestInfo);
static Uint16 getScanBatch(const UintR & requestInfo);
+ static Uint8 getDistributionKeyFlag(const UintR & requestInfo);
/**
* Set:ers for requestInfo
@@ -86,8 +95,11 @@ private:
static void setHoldLockFlag(UintR & requestInfo, Uint32 flag);
static void setReadCommittedFlag(UintR & requestInfo, Uint32 flag);
static void setRangeScanFlag(UintR & requestInfo, Uint32 flag);
+ static void setDescendingFlag(UintR & requestInfo, Uint32 flag);
+ static void setTupScanFlag(UintR & requestInfo, Uint32 flag);
static void setKeyinfoFlag(UintR & requestInfo, Uint32 flag);
static void setScanBatch(Uint32& requestInfo, Uint32 sz);
+ static void setDistributionKeyFlag(Uint32& requestInfo, Uint32 flag);
};
/**
@@ -98,12 +110,15 @@ private:
h = Hold lock mode - 1 Bit 10
c = Read Committed - 1 Bit 11
k = Keyinfo - 1 Bit 12
+ t = Tup scan - 1 Bit 13
+ z = Descending (TUX) - 1 Bit 14
x = Range Scan (TUX) - 1 Bit 15
b = Scan batch - 10 Bit 16-25 (max 1023)
+ d = Distribution key flag
1111111111222222222233
01234567890123456789012345678901
- ppppppppl hck xbbbbbbbbbb
+ ppppppppl hcktzxbbbbbbbbbb
*/
#define PARALLELL_SHIFT (0)
@@ -124,9 +139,17 @@ private:
#define RANGE_SCAN_SHIFT (15)
#define RANGE_SCAN_MASK (1)
+#define DESCENDING_SHIFT (14)
+#define DESCENDING_MASK (1)
+
+#define TUP_SCAN_SHIFT (13)
+#define TUP_SCAN_MASK (1)
+
#define SCAN_BATCH_SHIFT (16)
#define SCAN_BATCH_MASK (1023)
+#define SCAN_DISTR_KEY_SHIFT (26)
+
inline
Uint8
ScanTabReq::getParallelism(const UintR & requestInfo){
@@ -158,6 +181,18 @@ ScanTabReq::getRangeScanFlag(const UintR & requestInfo){
}
inline
+Uint8
+ScanTabReq::getDescendingFlag(const UintR & requestInfo){
+ return (Uint8)((requestInfo >> DESCENDING_SHIFT) & DESCENDING_MASK);
+}
+
+inline
+Uint8
+ScanTabReq::getTupScanFlag(const UintR & requestInfo){
+ return (Uint8)((requestInfo >> TUP_SCAN_SHIFT) & TUP_SCAN_MASK);
+}
+
+inline
Uint16
ScanTabReq::getScanBatch(const Uint32 & requestInfo){
return (Uint16)((requestInfo >> SCAN_BATCH_SHIFT) & SCAN_BATCH_MASK);
@@ -205,6 +240,20 @@ ScanTabReq::setRangeScanFlag(UintR & requestInfo, Uint32 flag){
}
inline
+void
+ScanTabReq::setDescendingFlag(UintR & requestInfo, Uint32 flag){
+ ASSERT_BOOL(flag, "ScanTabReq::setDescendingFlag");
+ requestInfo |= (flag << DESCENDING_SHIFT);
+}
+
+inline
+void
+ScanTabReq::setTupScanFlag(UintR & requestInfo, Uint32 flag){
+ ASSERT_BOOL(flag, "ScanTabReq::setTupScanFlag");
+ requestInfo |= (flag << TUP_SCAN_SHIFT);
+}
+
+inline
void
ScanTabReq::setScanBatch(Uint32 & requestInfo, Uint32 flag){
ASSERT_MAX(flag, SCAN_BATCH_MASK, "ScanTabReq::setScanBatch");
@@ -225,6 +274,18 @@ ScanTabReq::setKeyinfoFlag(UintR & requestInfo, Uint32 flag){
requestInfo |= (flag << KEYINFO_SHIFT);
}
+inline
+Uint8
+ScanTabReq::getDistributionKeyFlag(const UintR & requestInfo){
+ return (Uint8)((requestInfo >> SCAN_DISTR_KEY_SHIFT) & 1);
+}
+
+inline
+void
+ScanTabReq::setDistributionKeyFlag(UintR & requestInfo, Uint32 flag){
+ ASSERT_BOOL(flag, "ScanTabReq::setKeyinfoFlag");
+ requestInfo |= (flag << SCAN_DISTR_KEY_SHIFT);
+}
/**
*
@@ -235,7 +296,7 @@ class ScanTabConf {
/**
* Reciver(s)
*/
- friend class NdbConnection; // Reciver
+ friend class NdbTransaction; // Reciver
/**
* Sender(s)
@@ -303,7 +364,7 @@ class ScanTabRef {
/**
* Reciver(s)
*/
- friend class NdbConnection; // Reciver
+ friend class NdbTransaction; // Reciver
/**
* Sender(s)
diff --git a/ndb/include/kernel/signaldata/SignalData.hpp b/ndb/include/kernel/signaldata/SignalData.hpp
index b0cfbc1540c..0591a85d6e6 100644
--- a/ndb/include/kernel/signaldata/SignalData.hpp
+++ b/ndb/include/kernel/signaldata/SignalData.hpp
@@ -18,8 +18,8 @@
#define SIGNAL_DATA_H
#include <ndb_global.h>
-#include <ndb_limits.h>
-#include <kernel_types.h>
+#include <kernel/ndb_limits.h>
+#include <kernel/kernel_types.h>
#include <BaseString.hpp>
#define ASSERT_BOOL(flag, message) assert(flag<=1)
@@ -177,9 +177,16 @@ GSN_PRINT_SIGNATURE(printFAIL_REP);
GSN_PRINT_SIGNATURE(printDISCONNECT_REP);
GSN_PRINT_SIGNATURE(printSUB_CREATE_REQ);
GSN_PRINT_SIGNATURE(printSUB_CREATE_CONF);
+GSN_PRINT_SIGNATURE(printSUB_CREATE_REF);
+GSN_PRINT_SIGNATURE(printSUB_REMOVE_REQ);
+GSN_PRINT_SIGNATURE(printSUB_REMOVE_CONF);
+GSN_PRINT_SIGNATURE(printSUB_REMOVE_REF);
GSN_PRINT_SIGNATURE(printSUB_START_REQ);
GSN_PRINT_SIGNATURE(printSUB_START_REF);
GSN_PRINT_SIGNATURE(printSUB_START_CONF);
+GSN_PRINT_SIGNATURE(printSUB_STOP_REQ);
+GSN_PRINT_SIGNATURE(printSUB_STOP_REF);
+GSN_PRINT_SIGNATURE(printSUB_STOP_CONF);
GSN_PRINT_SIGNATURE(printSUB_SYNC_REQ);
GSN_PRINT_SIGNATURE(printSUB_SYNC_REF);
GSN_PRINT_SIGNATURE(printSUB_SYNC_CONF);
diff --git a/ndb/include/kernel/signaldata/SumaImpl.hpp b/ndb/include/kernel/signaldata/SumaImpl.hpp
index 89ade067dcd..75fb65e1ad2 100644
--- a/ndb/include/kernel/signaldata/SumaImpl.hpp
+++ b/ndb/include/kernel/signaldata/SumaImpl.hpp
@@ -592,11 +592,11 @@ public:
Uint32 subscriptionId;
Uint32 subscriptionKey;
- Uint32 err;
union { // Haven't decide what to call it
Uint32 senderData;
Uint32 subscriberData;
};
+ Uint32 err;
};
class SumaStartMe {
diff --git a/ndb/include/kernel/signaldata/TcCommit.hpp b/ndb/include/kernel/signaldata/TcCommit.hpp
index b7f3fbbb361..dcbca0cb6f2 100644
--- a/ndb/include/kernel/signaldata/TcCommit.hpp
+++ b/ndb/include/kernel/signaldata/TcCommit.hpp
@@ -33,10 +33,10 @@ class TcCommitConf {
* Reciver(s)
*/
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
public:
- STATIC_CONST( SignalLength = 3 );
+ STATIC_CONST( SignalLength = 4 );
private:
/**
@@ -49,6 +49,7 @@ private:
Uint32 transId1;
Uint32 transId2;
+ Uint32 gci;
};
class TcCommitRef {
@@ -60,7 +61,7 @@ class TcCommitRef {
/**
* Reciver(s)
*/
- friend class NdbConnection;
+ friend class NdbTransaction;
public:
STATIC_CONST( SignalLength = 4 );
diff --git a/ndb/include/kernel/signaldata/TcHbRep.hpp b/ndb/include/kernel/signaldata/TcHbRep.hpp
index 58ab015917a..7e701b510f9 100644
--- a/ndb/include/kernel/signaldata/TcHbRep.hpp
+++ b/ndb/include/kernel/signaldata/TcHbRep.hpp
@@ -36,7 +36,7 @@ class TcHbRep {
/**
* Sender(s)
*/
- friend class NdbConnection;
+ friend class NdbTransaction;
/**
* For printing
diff --git a/ndb/include/kernel/signaldata/TcIndx.hpp b/ndb/include/kernel/signaldata/TcIndx.hpp
index 764d4e9fcd7..c5e7d2489ba 100644
--- a/ndb/include/kernel/signaldata/TcIndx.hpp
+++ b/ndb/include/kernel/signaldata/TcIndx.hpp
@@ -18,379 +18,7 @@
#define TC_INDX_H
#include "SignalData.hpp"
-
-class TcIndxReq {
- /**
- * Reciver(s)
- */
- friend class Dbtc; // Reciver
-
- /**
- * Sender(s)
- */
- friend class NdbIndexOperation;
-
- /**
- * For printing
- */
- friend bool printTCINDXREQ(FILE *, const Uint32 *, Uint32, Uint16);
-
-public:
- /**
- * Length of signal
- */
- STATIC_CONST( StaticLength = 8 );
- STATIC_CONST( SignalLength = 25 );
- STATIC_CONST( MaxKeyInfo = 8 );
- STATIC_CONST( MaxAttrInfo = 5 );
-
-private:
-
- enum CommitType {
- CommitIfFailFree = 0,
- TryCommit = 1,
- CommitAsMuchAsPossible = 2
- };
-
- /**
- * DATA VARIABLES
- */
-//-------------------------------------------------------------
-// Unconditional part. First 8 words
-//-------------------------------------------------------------
- UintR apiConnectPtr; // DATA 0
- UintR senderData; // DATA 1
- UintR attrLen; // DATA 2 (including API Version)
- UintR indexId; // DATA 3
- UintR requestInfo; // DATA 4
- UintR indexSchemaVersion; // DATA 5
- UintR transId1; // DATA 6
- UintR transId2; // DATA 7
-//-------------------------------------------------------------
-// Conditional part. Those four words will be sent only if their
-// indicator is set.
-//-------------------------------------------------------------
- UintR scanInfo; // DATA 8
- UintR distrGroupHashValue; // DATA 9
- UintR distributionKeySize; // DATA 10
- UintR storedProcId; // DATA 11
-
-//-------------------------------------------------------------
-// Variable sized key and attrinfo part. Those will be placed to
-// pack the signal in an appropriate manner.
-//-------------------------------------------------------------
- UintR keyInfo[MaxKeyInfo]; // DATA 12 - 19
- UintR attrInfo[MaxAttrInfo]; // DATA 20 - 24
-
- static Uint8 getAPIVersion(const UintR & attrLen);
-
- /**
- * Get:ers for requestInfo
- */
- static Uint8 getCommitFlag(const UintR & requestInfo);
- static Uint8 getCommitType(const UintR & requestInfo);
- static Uint8 getStartFlag(const UintR & requestInfo);
- static Uint8 getSimpleFlag(const UintR & requestInfo);
- static Uint8 getDirtyFlag(const UintR & requestInfo);
- static Uint8 getInterpretedFlag(const UintR & requestInfo);
- static Uint8 getDistributionGroupFlag(const UintR & requestInfo);
- static Uint8 getDistributionGroupTypeFlag(const UintR & requestInfo);
- static Uint8 getDistributionKeyFlag(const UintR & requestInfo);
- static Uint8 getScanIndFlag(const UintR & requestInfo);
-
- static Uint8 getOperationType(const UintR & requestInfo);
-
- static Uint16 getIndexLength(const UintR & requestInfo);
- static Uint8 getAIInTcIndxReq(const UintR & requestInfo);
-
- /**
- * Get:ers for scanInfo
- */
-
- static void setAPIVersion(UintR & attrLen, Uint16 apiVersion);
-
- /**
- * Set:ers for requestInfo
- */
- static void clearRequestInfo(UintR & requestInfo);
- static void setCommitType(UintR & requestInfo, Uint32 type);
- static void setCommitFlag(UintR & requestInfo, Uint32 flag);
- static void setStartFlag(UintR & requestInfo, Uint32 flag);
- static void setSimpleFlag(UintR & requestInfo, Uint32 flag);
- static void setDirtyFlag(UintR & requestInfo, Uint32 flag);
- static void setInterpretedFlag(UintR & requestInfo, Uint32 flag);
- static void setDistributionGroupFlag(UintR & requestInfo, Uint32 flag);
- static void setDistributionGroupTypeFlag(UintR & requestInfo, Uint32 flag);
- static void setDistributionKeyFlag(UintR & requestInfo, Uint32 flag);
- static void setScanIndFlag(UintR & requestInfo, Uint32 flag);
-
- static void setOperationType(UintR & requestInfo, Uint32 type);
-
- static void setIndexLength(UintR & requestInfo, Uint32 len);
- static void setAIInTcIndxReq(UintR & requestInfo, Uint32 len);
-
- /**
- * Set:ers for scanInfo
- */
-
-};
-
-#define API_VER_NO_SHIFT (16)
-#define API_VER_NO_MASK (65535)
-
-/**
- * Request Info
- *
- a = Attr Info in TCINDXREQ - 3 Bits -> Max 7 (Bit 16-18)
- b = Distribution Key Ind - 1 Bit 2
- c = Commit Indicator - 1 Bit 4
- d = Dirty Indicator - 1 Bit 0
- e = Scan Indicator - 1 Bit 14
- g = Distribution Group Ind - 1 Bit 1
- i = Interpreted Indicator - 1 Bit 15
- k = Index lengt - 12 Bits -> Max 4095 (Bit 20 - 31)
- o = Operation Type - 3 Bits -> Max 7 (Bit 5-7)
- p = Simple Indicator - 1 Bit 8
- s = Start Indicator - 1 Bit 11
- t = Distribution GroupType - 1 Bit 3
- y = Commit Type - 2 Bit 12-13
- x = Last Op in execute - 1 Bit 19
-
- 1111111111222222222233
- 01234567890123456789012345678901
- dgbtcooop syyeiaaa-kkkkkkkkkkkk
-*/
-
-#define COMMIT_SHIFT (4)
-#define START_SHIFT (11)
-#define SIMPLE_SHIFT (8)
-#define DIRTY_SHIFT (0)
-#define INTERPRETED_SHIFT (15)
-#define DISTR_GROUP_SHIFT (1)
-#define DISTR_GROUP_TYPE_SHIFT (3)
-#define DISTR_KEY_SHIFT (2)
-#define SCAN_SHIFT (14)
-
-#define OPERATION_SHIFT (5)
-#define OPERATION_MASK (7)
-
-#define AINFO_SHIFT (16)
-#define AINFO_MASK (7)
-
-#define INDEX_LEN_SHIFT (20)
-#define INDEX_LEN_MASK (4095)
-
-#define COMMIT_TYPE_SHIFT (12)
-#define COMMIT_TYPE_MASK (3)
-
-#define LAST_OP_IN_EXEC_SHIFT (19)
-
-/**
- * Scan Info
- *
-
-
- 1111111111222222222233
- 01234567890123456789012345678901
-
-*/
-
-inline
-Uint8
-TcIndxReq::getCommitFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> COMMIT_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getCommitType(const UintR & requestInfo){
- return (Uint8)((requestInfo >> COMMIT_TYPE_SHIFT) & COMMIT_TYPE_MASK);
-}
-
-inline
-Uint8
-TcIndxReq::getStartFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> START_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getSimpleFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> SIMPLE_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getDirtyFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> DIRTY_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getInterpretedFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> INTERPRETED_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getDistributionGroupFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> DISTR_GROUP_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getDistributionGroupTypeFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> DISTR_GROUP_TYPE_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getDistributionKeyFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> DISTR_KEY_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getScanIndFlag(const UintR & requestInfo){
- return (Uint8)((requestInfo >> SCAN_SHIFT) & 1);
-}
-
-inline
-Uint8
-TcIndxReq::getOperationType(const UintR & requestInfo){
- return (Uint8)((requestInfo >> OPERATION_SHIFT) & OPERATION_MASK);
-}
-
-inline
-Uint16
-TcIndxReq::getIndexLength(const UintR & requestInfo){
- return (Uint16)((requestInfo >> INDEX_LEN_SHIFT) & INDEX_LEN_MASK);
-}
-
-inline
-Uint8
-TcIndxReq::getAIInTcIndxReq(const UintR & requestInfo){
- return (Uint8)((requestInfo >> AINFO_SHIFT) & AINFO_MASK);
-}
-
-inline
-void
-TcIndxReq::clearRequestInfo(UintR & requestInfo){
- requestInfo = 0;
-}
-
-inline
-void
-TcIndxReq::setCommitType(UintR & requestInfo, Uint32 type){
- ASSERT_MAX(type, COMMIT_TYPE_MASK, "TcIndxReq::setCommitType");
- requestInfo |= (type << COMMIT_TYPE_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setCommitFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setCommitFlag");
- requestInfo &= ~(1 << COMMIT_SHIFT);
- requestInfo |= (flag << COMMIT_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setStartFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setStartFlag");
- requestInfo &= ~(1 << START_SHIFT);
- requestInfo |= (flag << START_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setSimpleFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setSimpleFlag");
- requestInfo &= ~(1 << SIMPLE_SHIFT);
- requestInfo |= (flag << SIMPLE_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setDirtyFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setDirtyFlag");
- requestInfo &= ~(1 << DIRTY_SHIFT);
- requestInfo |= (flag << DIRTY_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setInterpretedFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setInterpretedFlag");
- requestInfo &= ~(1 << INTERPRETED_SHIFT);
- requestInfo |= (flag << INTERPRETED_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setDistributionGroupTypeFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setDistributionGroupTypeFlag");
- requestInfo &= ~(1 << DISTR_GROUP_TYPE_SHIFT);
- requestInfo |= (flag << DISTR_GROUP_TYPE_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setDistributionGroupFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setDistributionGroupFlag");
- requestInfo &= ~(1 << DISTR_GROUP_SHIFT);
- requestInfo |= (flag << DISTR_GROUP_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setDistributionKeyFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setDistributionKeyFlag");
- requestInfo &= ~(1 << DISTR_KEY_SHIFT);
- requestInfo |= (flag << DISTR_KEY_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setScanIndFlag(UintR & requestInfo, Uint32 flag){
- ASSERT_BOOL(flag, "TcIndxReq::setScanIndFlag");
- requestInfo &= ~(1 << SCAN_SHIFT);
- requestInfo |= (flag << SCAN_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setOperationType(UintR & requestInfo, Uint32 type){
- ASSERT_MAX(type, OPERATION_MASK, "TcIndxReq::setOperationType");
- requestInfo |= (type << OPERATION_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setIndexLength(UintR & requestInfo, Uint32 len){
- ASSERT_MAX(len, INDEX_LEN_MASK, "TcIndxReq::setKeyLength");
- requestInfo |= (len << INDEX_LEN_SHIFT);
-}
-
-inline
-void
-TcIndxReq::setAIInTcIndxReq(UintR & requestInfo, Uint32 len){
- ASSERT_MAX(len, AINFO_MASK, "TcIndxReq::setAIInTcIndxReq");
- requestInfo |= (len << AINFO_SHIFT);
-}
-
-inline
-Uint8
-TcIndxReq::getAPIVersion(const UintR & anAttrLen){
- return (Uint16)((anAttrLen >> API_VER_NO_SHIFT) & API_VER_NO_MASK);
-}
-
-inline
-void
-TcIndxReq::setAPIVersion(UintR & anAttrLen, Uint16 apiVersion){
-// ASSERT_MAX(apiVersion, API_VER_NO_MASK, "TcIndxReq::setAPIVersion");
- anAttrLen |= (apiVersion << API_VER_NO_SHIFT);
-}
+#include "TcKeyReq.hpp"
class TcIndxConf {
@@ -398,7 +26,7 @@ class TcIndxConf {
* Reciver(s)
*/
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
/**
* Sender(s)
@@ -495,34 +123,4 @@ TcIndxConf::setMarkerFlag(Uint32 & confInfo, Uint32 flag){
confInfo |= (flag << 17);
}
-class TcIndxRef {
-
- /**
- * Reciver(s)
- */
- friend class NdbIndexOperation;
-
- /**
- * Sender(s)
- */
- friend class Dbtc;
-
- /**
- * For printing
- */
- friend bool printTCINDXREF(FILE *, const Uint32 *, Uint32, Uint16);
-
-public:
- /**
- * Length of signal
- */
-public:
- STATIC_CONST( SignalLength = 4 );
-
-private:
- Uint32 connectPtr;
- Uint32 transId[2];
- Uint32 errorCode;
-};
-
#endif
diff --git a/ndb/include/kernel/signaldata/TcKeyConf.hpp b/ndb/include/kernel/signaldata/TcKeyConf.hpp
index 277872b990b..c23e94951dc 100644
--- a/ndb/include/kernel/signaldata/TcKeyConf.hpp
+++ b/ndb/include/kernel/signaldata/TcKeyConf.hpp
@@ -27,7 +27,7 @@ class TcKeyConf {
* Reciver(s)
*/
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class Ndbcntr;
friend class DbUtil;
diff --git a/ndb/include/kernel/signaldata/TcKeyFailConf.hpp b/ndb/include/kernel/signaldata/TcKeyFailConf.hpp
index d8207b63262..7c0a766df40 100644
--- a/ndb/include/kernel/signaldata/TcKeyFailConf.hpp
+++ b/ndb/include/kernel/signaldata/TcKeyFailConf.hpp
@@ -33,7 +33,7 @@ class TcKeyFailConf {
* Reciver(s)
*/
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
public:
STATIC_CONST( SignalLength = 3 );
diff --git a/ndb/include/kernel/signaldata/TcKeyReq.hpp b/ndb/include/kernel/signaldata/TcKeyReq.hpp
index 9e42f2a70d5..d7c11ca773c 100644
--- a/ndb/include/kernel/signaldata/TcKeyReq.hpp
+++ b/ndb/include/kernel/signaldata/TcKeyReq.hpp
@@ -142,7 +142,7 @@ private:
* Get:ers for scanInfo
*/
static Uint8 getTakeOverScanFlag(const UintR & scanInfo);
- static Uint16 getTakeOverScanNode(const UintR & scanInfo);
+ static Uint16 getTakeOverScanFragment(const UintR & scanInfo);
static Uint32 getTakeOverScanInfo(const UintR & scanInfo);
@@ -171,7 +171,7 @@ private:
* Set:ers for scanInfo
*/
static void setTakeOverScanFlag(UintR & scanInfo, Uint8 flag);
- static void setTakeOverScanNode(UintR & scanInfo, Uint16 node);
+ static void setTakeOverScanFragment(UintR & scanInfo, Uint16 fragment);
static void setTakeOverScanInfo(UintR & scanInfo, Uint32 aScanInfo);
};
@@ -238,8 +238,8 @@ private:
#define TAKE_OVER_SHIFT (0)
-#define TAKE_OVER_NODE_SHIFT (20)
-#define TAKE_OVER_NODE_MASK (4095)
+#define TAKE_OVER_FRAG_SHIFT (20)
+#define TAKE_OVER_FRAG_MASK (4095)
#define SCAN_INFO_SHIFT (1)
#define SCAN_INFO_MASK (262143)
@@ -485,8 +485,8 @@ TcKeyReq::getTakeOverScanFlag(const UintR & scanInfo){
inline
Uint16
-TcKeyReq::getTakeOverScanNode(const UintR & scanInfo){
- return (Uint16)((scanInfo >> TAKE_OVER_NODE_SHIFT) & TAKE_OVER_NODE_MASK);
+TcKeyReq::getTakeOverScanFragment(const UintR & scanInfo){
+ return (Uint16)((scanInfo >> TAKE_OVER_FRAG_SHIFT) & TAKE_OVER_FRAG_MASK);
}
inline
@@ -505,9 +505,9 @@ TcKeyReq::setTakeOverScanFlag(UintR & scanInfo, Uint8 flag){
inline
void
-TcKeyReq::setTakeOverScanNode(UintR & scanInfo, Uint16 node){
+TcKeyReq::setTakeOverScanFragment(UintR & scanInfo, Uint16 node){
// ASSERT_MAX(node, TAKE_OVER_NODE_MASK, "TcKeyReq::setTakeOverScanNode");
- scanInfo |= (node << TAKE_OVER_NODE_SHIFT);
+ scanInfo |= (node << TAKE_OVER_FRAG_SHIFT);
}
inline
diff --git a/ndb/include/kernel/signaldata/TcRollbackRep.hpp b/ndb/include/kernel/signaldata/TcRollbackRep.hpp
index b00731a04a6..febbd4f86b1 100644
--- a/ndb/include/kernel/signaldata/TcRollbackRep.hpp
+++ b/ndb/include/kernel/signaldata/TcRollbackRep.hpp
@@ -23,7 +23,7 @@ class TcRollbackRep {
/**
* Sender(s)
*/
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class DbUtil;
/**
diff --git a/ndb/include/kernel/signaldata/TransIdAI.hpp b/ndb/include/kernel/signaldata/TransIdAI.hpp
index 4df7bf2a126..5beaf6eba4b 100755
--- a/ndb/include/kernel/signaldata/TransIdAI.hpp
+++ b/ndb/include/kernel/signaldata/TransIdAI.hpp
@@ -28,7 +28,7 @@ class TransIdAI {
/**
* Receiver(s)
*/
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class Dbtc;
friend class Dbutil;
friend class Dblqh;
diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp
index c132b19c50a..8acb3d28bd6 100644
--- a/ndb/include/kernel/signaldata/TupFrag.hpp
+++ b/ndb/include/kernel/signaldata/TupFrag.hpp
@@ -104,9 +104,9 @@ public:
STATIC_CONST( SignalLength = 2 );
enum ErrorCode {
NoError = 0,
- InvalidRequest = 800,
- NoFreeFragment = 604,
- NoFreeAttributes = 827
+ InvalidRequest = 903,
+ NoFreeFragment = 904,
+ NoFreeAttributes = 905
};
private:
Uint32 userPtr;
@@ -145,7 +145,8 @@ public:
STATIC_CONST( SignalLength = 2 );
enum ErrorCode {
NoError = 0,
- InvalidCharset = 743
+ InvalidCharset = 743,
+ TooManyBitsUsed = 831
};
private:
Uint32 userPtr;
@@ -185,9 +186,9 @@ public:
STATIC_CONST( SignalLength = 2 );
enum ErrorCode {
NoError = 0,
- InvalidAttributeType = 742,
- InvalidCharset = 743,
- InvalidNodeSize = 832
+ InvalidAttributeType = 906,
+ InvalidCharset = 907,
+ InvalidNodeSize = 908
};
private:
Uint32 userPtr;
diff --git a/ndb/include/kernel/signaldata/TuxBound.hpp b/ndb/include/kernel/signaldata/TuxBound.hpp
index 87ce3c3c098..7e12897407b 100644
--- a/ndb/include/kernel/signaldata/TuxBound.hpp
+++ b/ndb/include/kernel/signaldata/TuxBound.hpp
@@ -34,7 +34,9 @@ public:
enum ErrorCode {
InvalidAttrInfo = 4110,
InvalidBounds = 4259,
- OutOfBuffers = 873
+ OutOfBuffers = 873,
+ InvalidCharFormat = 744,
+ TooMuchAttrInfo = 823
};
STATIC_CONST( SignalLength = 3 );
private:
@@ -50,6 +52,8 @@ private:
* Number of words of bound info included after fixed signal data.
*/
Uint32 boundAiLength;
+
+ Uint32 data[1];
};
#endif
diff --git a/ndb/include/kernel/trigger_definitions.h b/ndb/include/kernel/trigger_definitions.h
index 7ce74877de4..11410654a15 100644
--- a/ndb/include/kernel/trigger_definitions.h
+++ b/ndb/include/kernel/trigger_definitions.h
@@ -56,6 +56,7 @@ struct TriggerActionTime {
};
struct TriggerEvent {
+ /** TableEvent must match 1 << TriggerEvent */
enum Value {
TE_INSERT = 0,
TE_DELETE = 1,
diff --git a/ndb/include/mgmapi/mgmapi.h b/ndb/include/mgmapi/mgmapi.h
index 26b9dc65947..924d65c2847 100644
--- a/ndb/include/mgmapi/mgmapi.h
+++ b/ndb/include/mgmapi/mgmapi.h
@@ -18,31 +18,121 @@
#define MGMAPI_H
/**
- * @mainpage NDB Cluster Management API
+ * @mainpage MySQL Cluster Management API
*
- * The NDB Cluster Management API (MGM API) is a C API
- * that is used to:
- * - Start/stop database nodes (DB nodes)
- * - Start/stop NDB Cluster backups
- * - Control the NDB Cluster log
- * - Other administrative tasks
+ * The MySQL Cluster Management API (MGM API) is a C language API
+ * that is used for:
+ * - Starting and stopping database nodes (ndbd processes)
+ * - Starting and stopping Cluster backups
+ * - Controlling the NDB Cluster log
+ * - Performing other administrative tasks
*
- * @section General Concepts
+ * @section secMgmApiGeneral General Concepts
*
- * Each MGM API function needs a management server handle
- * (of type Mgm_C_Api::NdbMgmHandle).
- * This handle is initally is created by calling the
- * function ndb_mgm_create_handle().
+ * Each MGM API function needs a management server handle
+ * of type @ref NdbMgmHandle.
+ * This handle is created by calling the function
+ * function ndb_mgm_create_handle() and freed by calling
+ * ndb_mgm_destroy_handle().
*
- * A function can return:
- * -# An integer value.
- * If it returns -1 then this indicates an error, and then
- * -# A pointer value. If it returns NULL then check the latest error.
- * If it didn't return NULL, then a "something" is returned.
- * This "something" has to be free:ed by the user of the MGM API.
+ * A function can return any of the following:
+ * -# An integer value, with
+ * a value of <b>-1</b> indicating an error.
+ * -# A non-constant pointer value. A <var>NULL</var> value indicates an error;
+ * otherwise, the return value must be freed
+ * by the programmer
+ * -# A constant pointer value, with a <var>NULL</var> value indicating an error.
+ * The returned value should <em>not</em> be freed.
*
- * If there are an error, then the get latest error functions
- * can be used to check what the error was.
+ * Error conditions can be identified by using the appropriate
+ * error-reporting functions ndb_mgm_get_latest_error() and
+ * @ref ndb_mgm_error.
+ *
+ * Here is an example using the MGM API (without error handling for brevity's sake).
+ * @code
+ * NdbMgmHandle handle= ndb_mgm_create_handle();
+ * ndb_mgm_connect(handle,0,0,0);
+ * struct ndb_mgm_cluster_state *state= ndb_mgm_get_status(handle);
+ * for(int i=0; i < state->no_of_nodes; i++)
+ * {
+ * struct ndb_mgm_node_state *node_state= &state->node_states[i];
+ * printf("node with ID=%d ", node_state->node_id);
+ * if(node_state->version != 0)
+ * printf("connected\n");
+ * else
+ * printf("not connected\n");
+ * }
+ * free((void*)state);
+ * ndb_mgm_destroy_handle(&handle);
+ * @endcode
+ *
+ * @section secLogEvents Log Events
+ *
+ * The database nodes and management server(s) regularly and on specific
+ * occations report on various log events that occurs in the cluster. These
+ * log events are written to the cluster log. Optionally a mgmapi client
+ * may listen to these events by using the method ndb_mgm_listen_event().
+ * Each log event belongs to a category, @ref ndb_mgm_event_category, and
+ * has a severity, @ref ndb_mgm_event_severity, associated with it. Each
+ * log event also has a level (0-15) associated with it.
+ *
+ * Which log events that come out is controlled with ndb_mgm_listen_event(),
+ * ndb_mgm_set_clusterlog_loglevel(), and
+ * ndb_mgm_set_clusterlog_severity_filter().
+ *
+ * Below is an example of how to listen to events related to backup.
+ *
+ * @code
+ * int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
+ * int fd = ndb_mgm_listen_event(handle, filter);
+ * @endcode
+ *
+ *
+ * @section secSLogEvents Structured Log Events
+ *
+ * The following steps are involved:
+ * - Create a NdbEventLogHandle using ndb_mgm_create_logevent_handle()
+ * - Wait and store log events using ndb_logevent_get_next()
+ * - The log event data is available in the struct ndb_logevent. The
+ * data which is specific to a particular event is stored in a union
+ * between structs so use ndb_logevent::type to decide which struct
+ * is valid.
+ *
+ * Sample code for listening to Backup related events. The availaable log
+ * events are listed in @ref ndb_logevent.h
+ *
+ * @code
+ * int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 };
+ * NdbEventLogHandle le_handle= ndb_mgm_create_logevent_handle(handle, filter);
+ * struct ndb_logevent le;
+ * int r= ndb_logevent_get_next(le_handle,&le,0);
+ * if (r < 0) error
+ * else if (r == 0) no event
+ *
+ * switch (le.type)
+ * {
+ * case NDB_LE_BackupStarted:
+ * ... le.BackupStarted.starting_node;
+ * ... le.BackupStarted.backup_id;
+ * break;
+ * case NDB_LE_BackupFailedToStart:
+ * ... le.BackupFailedToStart.error;
+ * break;
+ * case NDB_LE_BackupCompleted:
+ * ... le.BackupCompleted.stop_gci;
+ * break;
+ * case NDB_LE_BackupAborted:
+ * ... le.BackupStarted.backup_id;
+ * break;
+ * default:
+ * break;
+ * }
+ * @endcode
+ */
+
+/*
+ * @page ndb_logevent.h ndb_logevent.h
+ * @include ndb_logevent.h
*/
/** @addtogroup MGM_C_API
@@ -51,6 +141,7 @@
#include <stdio.h>
#include <ndb_types.h>
+#include "ndb_logevent.h"
#include "mgmapi_config_parameters.h"
#ifdef __cplusplus
@@ -66,82 +157,116 @@ extern "C" {
* NDB Cluster node types
*/
enum ndb_mgm_node_type {
- NDB_MGM_NODE_TYPE_UNKNOWN = -1, /*< Node type not known*/
- NDB_MGM_NODE_TYPE_API = NODE_TYPE_API,/*< An application node (API)*/
- NDB_MGM_NODE_TYPE_NDB = NODE_TYPE_DB, /*< A database node (DB)*/
- NDB_MGM_NODE_TYPE_MGM = NODE_TYPE_MGM,/*< A mgmt server node (MGM)*/
- NDB_MGM_NODE_TYPE_REP = NODE_TYPE_REP,/*< A replication node */
-
- NDB_MGM_NODE_TYPE_MIN = 0, /*< Min valid value*/
- NDB_MGM_NODE_TYPE_MAX = 3 /*< Max valid value*/
+ NDB_MGM_NODE_TYPE_UNKNOWN = -1 /** Node type not known*/
+ ,NDB_MGM_NODE_TYPE_API /** An application (NdbApi) node */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = NODE_TYPE_API
+#endif
+ ,NDB_MGM_NODE_TYPE_NDB /** A database node */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = NODE_TYPE_DB
+#endif
+ ,NDB_MGM_NODE_TYPE_MGM /** A management server node */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = NODE_TYPE_MGM
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ ,NDB_MGM_NODE_TYPE_REP = NODE_TYPE_REP /** A replication node */
+ ,NDB_MGM_NODE_TYPE_MIN = 0 /** Min valid value*/
+ ,NDB_MGM_NODE_TYPE_MAX = 3 /** Max valid value*/
+#endif
};
/**
* Database node status
*/
enum ndb_mgm_node_status {
- NDB_MGM_NODE_STATUS_UNKNOWN = 0, /*< Node status not known*/
- NDB_MGM_NODE_STATUS_NO_CONTACT = 1, /*< No contact with node*/
- NDB_MGM_NODE_STATUS_NOT_STARTED = 2, /*< Has not run starting protocol*/
- NDB_MGM_NODE_STATUS_STARTING = 3, /*< Is running starting protocol*/
- NDB_MGM_NODE_STATUS_STARTED = 4, /*< Running*/
- NDB_MGM_NODE_STATUS_SHUTTING_DOWN = 5, /*< Is shutting down*/
- NDB_MGM_NODE_STATUS_RESTARTING = 6, /*< Is restarting*/
- NDB_MGM_NODE_STATUS_SINGLEUSER = 7, /*< Maintenance mode*/
- NDB_MGM_NODE_STATUS_RESUME = 8, /*< Resume mode*/
-
- NDB_MGM_NODE_STATUS_MIN = 0, /*< Min valid value*/
- NDB_MGM_NODE_STATUS_MAX = 6 /*< Max valid value*/
+ /** Node status not known*/
+ NDB_MGM_NODE_STATUS_UNKNOWN = 0,
+ /** No contact with node*/
+ NDB_MGM_NODE_STATUS_NO_CONTACT = 1,
+ /** Has not run starting protocol*/
+ NDB_MGM_NODE_STATUS_NOT_STARTED = 2,
+ /** Is running starting protocol*/
+ NDB_MGM_NODE_STATUS_STARTING = 3,
+ /** Running*/
+ NDB_MGM_NODE_STATUS_STARTED = 4,
+ /** Is shutting down*/
+ NDB_MGM_NODE_STATUS_SHUTTING_DOWN = 5,
+ /** Is restarting*/
+ NDB_MGM_NODE_STATUS_RESTARTING = 6,
+ /** Maintenance mode*/
+ NDB_MGM_NODE_STATUS_SINGLEUSER = 7,
+ /** Resume mode*/
+ NDB_MGM_NODE_STATUS_RESUME = 8,
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /** Min valid value*/
+ NDB_MGM_NODE_STATUS_MIN = 0,
+ /** Max valid value*/
+ NDB_MGM_NODE_STATUS_MAX = 8
+#endif
};
/**
* Error codes
*/
enum ndb_mgm_error {
+ /** Not an error */
NDB_MGM_NO_ERROR = 0,
/* Request for service errors */
+ /** Supplied connectstring is illegal */
NDB_MGM_ILLEGAL_CONNECT_STRING = 1001,
- NDB_MGM_ILLEGAL_PORT_NUMBER = 1002,
- NDB_MGM_ILLEGAL_SOCKET = 1003,
- NDB_MGM_ILLEGAL_IP_ADDRESS = 1004,
+ /** Supplied NdbMgmHandle is illegal */
NDB_MGM_ILLEGAL_SERVER_HANDLE = 1005,
+ /** Illegal reply from server */
NDB_MGM_ILLEGAL_SERVER_REPLY = 1006,
+ /** Illegal number of nodes */
NDB_MGM_ILLEGAL_NUMBER_OF_NODES = 1007,
+ /** Illegal node status */
NDB_MGM_ILLEGAL_NODE_STATUS = 1008,
+ /** Memory allocation error */
NDB_MGM_OUT_OF_MEMORY = 1009,
+ /** Management server not connected */
NDB_MGM_SERVER_NOT_CONNECTED = 1010,
+ /** Could not connect to socker */
NDB_MGM_COULD_NOT_CONNECT_TO_SOCKET = 1011,
/* Service errors - Start/Stop Node or System */
+ /** Start failed */
NDB_MGM_START_FAILED = 2001,
+ /** Stop failed */
NDB_MGM_STOP_FAILED = 2002,
+ /** Restart failed */
NDB_MGM_RESTART_FAILED = 2003,
/* Service errors - Backup */
+ /** Unable to start backup */
NDB_MGM_COULD_NOT_START_BACKUP = 3001,
+ /** Unable to abort backup */
NDB_MGM_COULD_NOT_ABORT_BACKUP = 3002,
/* Service errors - Single User Mode */
+ /** Unable to enter single user mode */
NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE = 4001,
+ /** Unable to exit single user mode */
NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE = 4002,
/* Usage errors */
+ /** Usage error */
NDB_MGM_USAGE_ERROR = 5001
};
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
struct Ndb_Mgm_Error_Msg {
enum ndb_mgm_error code;
- const char * msg;
+ const char * msg;
};
-
const struct Ndb_Mgm_Error_Msg ndb_mgm_error_msgs[] = {
{ NDB_MGM_NO_ERROR, "No error" },
+ /* Request for service errors */
{ NDB_MGM_ILLEGAL_CONNECT_STRING, "Illegal connect string" },
- { NDB_MGM_ILLEGAL_PORT_NUMBER, "Illegal port number" },
- { NDB_MGM_ILLEGAL_SOCKET, "Illegal socket" },
- { NDB_MGM_ILLEGAL_IP_ADDRESS, "Illegal IP address" },
{ NDB_MGM_ILLEGAL_SERVER_HANDLE, "Illegal server handle" },
{ NDB_MGM_ILLEGAL_SERVER_REPLY, "Illegal reply from server" },
{ NDB_MGM_ILLEGAL_NUMBER_OF_NODES, "Illegal number of nodes" },
@@ -158,77 +283,102 @@ extern "C" {
/* Service errors - Backup */
{ NDB_MGM_COULD_NOT_START_BACKUP, "Could not start backup" },
{ NDB_MGM_COULD_NOT_ABORT_BACKUP, "Could not abort backup" },
-
+
/* Service errors - Single User Mode */
- { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
+ { NDB_MGM_COULD_NOT_ENTER_SINGLE_USER_MODE,
"Could not enter single user mode" },
- { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
+ { NDB_MGM_COULD_NOT_EXIT_SINGLE_USER_MODE,
"Could not exit single user mode" },
/* Usage errors */
{ NDB_MGM_USAGE_ERROR,
"Usage error" }
};
-
- const int ndb_mgm_noOfErrorMsgs =
+ const int ndb_mgm_noOfErrorMsgs =
sizeof(ndb_mgm_error_msgs)/sizeof(struct Ndb_Mgm_Error_Msg);
+#endif
/**
- * Structure returned by ndb_mgm_get_status
+ * Status of a node in the cluster.
+ *
+ * Sub-structure in enum ndb_mgm_cluster_state
+ * returned by ndb_mgm_get_status().
+ *
+ * @note <var>node_status</var>, <var>start_phase</var>,
+ * <var>dynamic_id</var>
+ * and <var>node_group</var> are relevant only for database nodes,
+ * i.e. <var>node_type</var> == @ref NDB_MGM_NODE_TYPE_NDB.
*/
struct ndb_mgm_node_state {
- int node_id; /*< NDB Cluster node id*/
- enum ndb_mgm_node_type node_type; /*< Type of NDB Cluster node*/
- enum ndb_mgm_node_status node_status; /*< State of node*/
- int start_phase; /*< Start phase.
- *< @note Start phase is only
- *< valid if
- *< node_type is
- *< NDB_MGM_NODE_TYPE_NDB and
- *< node_status is
- *< NDB_MGM_NODE_STATUS_STARTING
- */
- int dynamic_id; /*< Id for heartbeats and
- *< master take-over
- *< (only valid for DB nodes)
- */
- int node_group; /*< Node group of node
- *< (only valid for DB nodes)*/
- int version; /*< Internal version number*/
- int connect_count; /*< No of times node has connected
- *< or disconnected to the mgm srv
- */
- char connect_address[sizeof("000.000.000.000")+1];
+ /** NDB Cluster node ID*/
+ int node_id;
+ /** Type of NDB Cluster node*/
+ enum ndb_mgm_node_type node_type;
+ /** State of node*/
+ enum ndb_mgm_node_status node_status;
+ /** Start phase.
+ *
+ * @note Start phase is only valid if the <var>node_type</var> is
+ * NDB_MGM_NODE_TYPE_NDB and the <var>node_status</var> is
+ * NDB_MGM_NODE_STATUS_STARTING
+ */
+ int start_phase;
+ /** ID for heartbeats and master take-over (only valid for DB nodes)
+ */
+ int dynamic_id;
+ /** Node group of node (only valid for DB nodes)*/
+ int node_group;
+ /** Internal version number*/
+ int version;
+ /** Number of times node has connected or disconnected to the
+ * management server
+ */
+ int connect_count;
+ /** IP address of node when it connected to the management server.
+ * @note This value will be empty if the management server has restarted
+ * since the node last connected.
+ */
+ char connect_address[
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ sizeof("000.000.000.000")+1
+#endif
+ ];
};
/**
- * Cluster status
+ * State of all nodes in the cluster; returned from
+ * ndb_mgm_get_status()
*/
struct ndb_mgm_cluster_state {
- int no_of_nodes; /*< No of entries in the
- *< node_states array
- */
- struct ndb_mgm_node_state /*< An array with node_states*/
- node_states[1];
- const char *hostname;
+ /** Number of entries in the node_states array */
+ int no_of_nodes;
+ /** An array with node_states*/
+ struct ndb_mgm_node_state node_states[
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ 1
+#endif
+ ];
};
/**
- * Default reply from the server
+ * Default reply from the server (reserved for future use)
*/
struct ndb_mgm_reply {
- int return_code; /*< 0 if successful,
- *< otherwise error code.
- */
- char message[256]; /*< Error or reply message.*/
+ /** 0 if successful, otherwise error code. */
+ int return_code;
+ /** Error or reply message.*/
+ char message[256];
};
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Default information types
*/
enum ndb_mgm_info {
- NDB_MGM_INFO_CLUSTER, /*< ?*/
- NDB_MGM_INFO_CLUSTERLOG /*< Cluster log*/
+ /** ?*/
+ NDB_MGM_INFO_CLUSTER,
+ /** Cluster log*/
+ NDB_MGM_INFO_CLUSTERLOG
};
/**
@@ -236,86 +386,26 @@ extern "C" {
* (Used only in the development of NDB Cluster.)
*/
enum ndb_mgm_signal_log_mode {
- NDB_MGM_SIGNAL_LOG_MODE_IN, /*< Log receiving signals */
- NDB_MGM_SIGNAL_LOG_MODE_OUT, /*< Log sending signals*/
- NDB_MGM_SIGNAL_LOG_MODE_INOUT, /*< Log both sending/receiving*/
- NDB_MGM_SIGNAL_LOG_MODE_OFF /*< Log off*/
+ /** Log receiving signals */
+ NDB_MGM_SIGNAL_LOG_MODE_IN,
+ /** Log sending signals*/
+ NDB_MGM_SIGNAL_LOG_MODE_OUT,
+ /** Log both sending/receiving*/
+ NDB_MGM_SIGNAL_LOG_MODE_INOUT,
+ /** Log off*/
+ NDB_MGM_SIGNAL_LOG_MODE_OFF
};
+#endif
- /**
- * Log severities (used to filter the cluster log)
- */
- enum ndb_mgm_clusterlog_level {
- NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL = -1,
- /* must range from 0 and up, indexes into an array */
- NDB_MGM_CLUSTERLOG_ON = 0, /*< Cluster log on*/
- NDB_MGM_CLUSTERLOG_DEBUG = 1, /*< Used in NDB Cluster
- *< developement
- */
- NDB_MGM_CLUSTERLOG_INFO = 2, /*< Informational messages*/
- NDB_MGM_CLUSTERLOG_WARNING = 3, /*< Conditions that are not
- *< error condition, but
- *< might require handling
- */
- NDB_MGM_CLUSTERLOG_ERROR = 4, /*< Conditions that should be
- *< corrected
- */
- NDB_MGM_CLUSTERLOG_CRITICAL = 5, /*< Critical conditions, like
- *< device errors or out of
- *< resources
- */
- NDB_MGM_CLUSTERLOG_ALERT = 6, /*< A condition that should be
- *< corrected immediately,
- *< such as a corrupted system
- */
- /* must be next number, works as bound in loop */
- NDB_MGM_CLUSTERLOG_ALL = 7 /*< All severities */
- };
-
- /**
- * Log categories
- */
- enum ndb_mgm_event_category {
- /**
- * Invalid
- */
- NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1,
- /**
- * Events during all kinds of startups
- */
- NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP,
-
- /**
- * Events during shutdown
- */
- NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN,
-
- /**
- * Transaction statistics (Job level, TCP/IP speed)
- */
- NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS,
- NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT,
- NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART,
- NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION,
- NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG,
- NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO,
- NDB_MGM_EVENT_CATEGORY_WARNING = CFG_LOGLEVEL_WARNING,
- NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR,
- NDB_MGM_EVENT_CATEGORY_GREP = CFG_LOGLEVEL_GREP,
- NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP,
-
- NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL,
- NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL
- };
-
/***************************************************************************/
- /**
+ /**
* @name Functions: Error Handling
* @{
*/
/**
- * Get latest error associated with a management server handle
+ * Get the most recent error associated with the management server whose handle
+ * is used as the value of <var>handle</var>.
*
* @param handle Management handle
* @return Latest error code
@@ -323,7 +413,7 @@ extern "C" {
int ndb_mgm_get_latest_error(const NdbMgmHandle handle);
/**
- * Get latest main error message associated with a handle
+ * Get the most recent general error message associated with a handle
*
* @param handle Management handle.
* @return Latest error message
@@ -331,9 +421,9 @@ extern "C" {
const char * ndb_mgm_get_latest_error_msg(const NdbMgmHandle handle);
/**
- * Get latest error description associated with a handle
+ * Get the most recent error description associated with a handle
*
- * The error description gives some additional information to
+ * The error description gives some additional information regarding
* the error message.
*
* @param handle Management handle.
@@ -343,11 +433,11 @@ extern "C" {
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
- * Get latest internal source code error line associated with a handle
+ * Get the most recent internal source code error line associated with a handle
*
* @param handle Management handle.
* @return Latest internal source code line of latest error
- * @deprecated
+ * @deprecated
*/
int ndb_mgm_get_latest_error_line(const NdbMgmHandle handle);
#endif
@@ -359,26 +449,48 @@ extern "C" {
/** @} *********************************************************************/
- /**
+ /**
* @name Functions: Create/Destroy Management Server Handles
* @{
*/
- /**
- * Create a handle to a management server
+ /**
+ * Create a handle to a management server.
*
* @return A management handle<br>
- * or NULL if no management handle could be created.
+ * or <var>NULL</var> if no management handle could be created.
*/
NdbMgmHandle ndb_mgm_create_handle();
-
- /**
- * Set connecst string to management server
+
+ /**
+ * Destroy a management server handle.
+ *
+ * @param handle Management handle
+ */
+ void ndb_mgm_destroy_handle(NdbMgmHandle * handle);
+
+ /** @} *********************************************************************/
+ /**
+ * @name Functions: Connect/Disconnect Management Server
+ * @{
+ */
+
+ /**
+ * Sets the connectstring for a management server
*
* @param handle Management handle
- * @param connect_string Connect string to the management server,
+ * @param connect_string Connect string to the management server,
*
* @return -1 on error.
+ *
+ * @code
+ * <connectstring> := [<nodeid-specification>,]<host-specification>[,<host-specification>]
+ * <nodeid-specification> := nodeid=<id>
+ * <host-specification> := <host>[:<port>]
+ * <id> is an integer greater than 0 identifying a node in config.ini
+ * <port> is an integer referring to a regular unix port
+ * <host> is a string containing a valid network host address
+ * @endcode
*/
int ndb_mgm_set_connectstring(NdbMgmHandle handle,
const char *connect_string);
@@ -390,43 +502,88 @@ extern "C" {
const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz);
/**
- * Destroy a management server handle
+ * Gets the connectstring used for a connection
*
- * @param handle Management handle
- */
- void ndb_mgm_destroy_handle(NdbMgmHandle * handle);
-
- /** @} *********************************************************************/
- /**
- * @name Functions: Connect/Disconnect Management Server
- * @{
+ * @note This function returns the default connectstring if no call to
+ * ndb_mgm_set_connectstring() has been performed. Also, the
+ * returned connectstring may be formatted differently.
+ *
+ * @param handle Management handle
+ * @param buf Buffer to hold result
+ * @param buf_sz Size of buffer.
+ *
+ * @return connectstring (same as <var>buf</var>)
*/
+ const char *ndb_mgm_get_connectstring(NdbMgmHandle handle, char *buf, int buf_sz);
/**
- * Connect to a management server
+ * Connects to a management server. Connectstring is set by
+ * ndb_mgm_set_connectstring().
*
* @param handle Management handle.
+ * @param no_retries Number of retries to connect
+ * (0 means connect once).
+ * @param retry_delay_in_seconds
+ * How long to wait until retry is performed.
+ * @param verbose Make printout regarding connect retries.
+ *
* @return -1 on error.
*/
int ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
int retry_delay_in_seconds, int verbose);
-
/**
- * Disconnect from a management server
+ * Return true if connected.
+ *
+ * @param handle Management handle
+ * @return 0 if not connected, non-zero if connected.
+ */
+ int ndb_mgm_is_connected(NdbMgmHandle handle);
+
+ /**
+ * Disconnects from a management server
*
* @param handle Management handle.
* @return -1 on error.
*/
int ndb_mgm_disconnect(NdbMgmHandle handle);
-
+
+ /**
+ * Gets connection node ID
+ *
+ * @param handle Management handle
+ *
+ * @return Node ID; 0 indicates that no node ID has been
+ * specified
+ */
+ int ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle);
+
+ /**
+ * Gets connection port
+ *
+ * @param handle Management handle
+ *
+ * @return port
+ */
+ int ndb_mgm_get_connected_port(NdbMgmHandle handle);
+
+ /**
+ * Gets connection host
+ *
+ * @param handle Management handle
+ *
+ * @return hostname
+ */
+ const char *ndb_mgm_get_connected_host(NdbMgmHandle handle);
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** @} *********************************************************************/
- /**
- * @name Functions: Convert between different data formats
+ /**
+ * @name Functions: Used to convert between different data formats
* @{
*/
/**
- * Convert a string to a ndb_mgm_node_type
+ * Converts a string to an <var>ndb_mgm_node_type</var> value
*
* @param type Node type as string.
* @return NDB_MGM_NODE_TYPE_UNKNOWN if invalid string.
@@ -434,23 +591,24 @@ extern "C" {
enum ndb_mgm_node_type ndb_mgm_match_node_type(const char * type);
/**
- * Convert an ndb_mgm_node_type to a string
+ * Converts an ndb_mgm_node_type to a string
*
* @param type Node type.
- * @return NULL if invalid id.
+ * @return <var>NULL</var> if invalid ID.
*/
const char * ndb_mgm_get_node_type_string(enum ndb_mgm_node_type type);
/**
- * Convert an ndb_mgm_node_type to a alias string
+ * Converts an ndb_mgm_node_type to a alias string
*
* @param type Node type.
- * @return NULL if invalid id.
+ * @return <var>NULL</var> if the ID is invalid.
*/
- const char * ndb_mgm_get_node_type_alias_string(enum ndb_mgm_node_type type, const char **str);
+ const char * ndb_mgm_get_node_type_alias_string(enum ndb_mgm_node_type type,
+ const char **str);
/**
- * Convert a string to a ndb_mgm_node_status
+ * Converts a string to a <var>ndb_mgm_node_status</var> value
*
* @param status NDB node status string.
* @return NDB_MGM_NODE_STATUS_UNKNOWN if invalid string.
@@ -458,67 +616,72 @@ extern "C" {
enum ndb_mgm_node_status ndb_mgm_match_node_status(const char * status);
/**
- * Convert an id to a string
+ * Converts an ID to a string
*
* @param status NDB node status.
- * @return NULL if invalid id.
+ * @return <var>NULL</var> if invalid ID.
*/
const char * ndb_mgm_get_node_status_string(enum ndb_mgm_node_status status);
+ const char * ndb_mgm_get_event_severity_string(enum ndb_mgm_event_severity);
ndb_mgm_event_category ndb_mgm_match_event_category(const char *);
const char * ndb_mgm_get_event_category_string(enum ndb_mgm_event_category);
+#endif
/** @} *********************************************************************/
- /**
- * @name Functions: State of cluster
+ /**
+ * @name Functions: Cluster status
* @{
*/
/**
- * Get status of the nodes in an NDB Cluster
+ * Gets status of the nodes in an NDB Cluster
*
- * Note the caller must free the pointer returned.
+ * @note The caller must free the pointer returned by this function.
*
* @param handle Management handle.
- * @return Cluster state (or NULL on error).
+ *
+ * @return Cluster state (or <var>NULL</var> on error).
*/
struct ndb_mgm_cluster_state * ndb_mgm_get_status(NdbMgmHandle handle);
/** @} *********************************************************************/
- /**
- * @name Functions: Start/stop nodes
+ /**
+ * @name Functions: Start/stop nodes
* @{
*/
/**
- * Stop database nodes
+ * Stops database nodes
*
* @param handle Management handle.
- * @param no_of_nodes no of database nodes<br>
- * 0 - means all database nodes in cluster<br>
- * n - Means stop n node(s) specified in the
+ * @param no_of_nodes Number of database nodes to be stopped<br>
+ * 0: All database nodes in cluster<br>
+ * n: Stop the <var>n</var> node(s) specified in the
* array node_list
- * @param node_list List of node ids of database nodes to be stopped
- * @return No of nodes stopped (or -1 on error)
+ * @param node_list List of node IDs for database nodes to be stopped
+ *
+ * @return Number of nodes stopped (-1 on error)
*
- * @note The function is equivalent
- * to ndb_mgm_stop2(handle, no_of_nodes, node_list, 0)
+ * @note This function is equivalent
+ * to calling ndb_mgm_stop2(handle, no_of_nodes, node_list, 0)
*/
- int ndb_mgm_stop(NdbMgmHandle handle, int no_of_nodes,
+ int ndb_mgm_stop(NdbMgmHandle handle, int no_of_nodes,
const int * node_list);
/**
- * Stop database nodes
+ * Stops database nodes
*
* @param handle Management handle.
- * @param no_of_nodes No of database nodes<br>
- * 0 - means all database nodes in cluster<br>
- * n - Means stop n node(s) specified in
+ * @param no_of_nodes Number of database nodes to stop<br>
+ * 0: All database nodes in cluster<br>
+ * n: Stop the <var>n</var> node(s) specified in
* the array node_list
- * @param node_list List of node ids of database nodes to be stopped
- * @param abort Don't perform gracefull stop,
- * but rather stop immediatly
- * @return No of nodes stopped (or -1 on error).
+ * @param node_list List of node IDs of database nodes to be stopped
+ * @param abort Don't perform graceful stop,
+ * but rather stop immediately
+ *
+ * @return Number of nodes stopped (-1 on error).
*/
int ndb_mgm_stop2(NdbMgmHandle handle, int no_of_nodes,
const int * node_list, int abort);
@@ -527,123 +690,142 @@ extern "C" {
* Restart database nodes
*
* @param handle Management handle.
- * @param no_of_nodes No of database nodes<br>
- * 0 - means all database nodes in cluster<br>
- * n - Means stop n node(s) specified in the
+ * @param no_of_nodes Number of database nodes to restart<br>
+ * 0: All database nodes in cluster<br>
+ * n: Restart the <var>n</var> node(s) specified in the
* array node_list
- * @param node_list List of node ids of database nodes to be stopped
- * @return No of nodes stopped (or -1 on error).
+ * @param node_list List of node IDs of database nodes to be restarted
+ *
+ * @return Number of nodes restarted (-1 on error).
*
- * @note The function is equivalent to
+ * @note This function is equivalent to calling
* ndb_mgm_restart2(handle, no_of_nodes, node_list, 0, 0, 0);
*/
- int ndb_mgm_restart(NdbMgmHandle handle, int no_of_nodes,
+ int ndb_mgm_restart(NdbMgmHandle handle, int no_of_nodes,
const int * node_list);
/**
* Restart database nodes
*
* @param handle Management handle.
- * @param no_of_nodes No of database nodes<br>
- * 0 - means all database nodes in cluster<br>
- * n - Means stop n node(s) specified in the
+ * @param no_of_nodes Number of database nodes to be restarted:<br>
+ * 0: Restart all database nodes in the cluster<br>
+ * n: Restart the <var>n</var> node(s) specified in the
* array node_list
- * @param node_list List of node ids of database nodes to be stopped
- * @param initial Remove filesystem from node(s) restarting
- * @param nostart Don't actually start node(s) but leave them
+ * @param node_list List of node IDs of database nodes to be restarted
+ * @param initial Remove filesystem from restarting node(s)
+ * @param nostart Don't actually start node(s) but leave them
* waiting for start command
- * @param abort Don't perform gracefull restart,
- * but rather restart immediatly
- * @return No of nodes stopped (or -1 on error).
+ * @param abort Don't perform graceful restart,
+ * but rather restart immediately
+ *
+ * @return Number of nodes stopped (-1 on error).
*/
int ndb_mgm_restart2(NdbMgmHandle handle, int no_of_nodes,
const int * node_list, int initial,
int nostart, int abort);
-
+
/**
* Start database nodes
*
* @param handle Management handle.
- * @param no_of_nodes No of database nodes<br>
- * 0 - means all database nodes in cluster<br>
- * n - Means start n node(s) specified in
+ * @param no_of_nodes Number of database nodes to be started<br>
+ * 0: Start all database nodes in the cluster<br>
+ * n: Start the <var>n</var> node(s) specified in
* the array node_list
- * @param node_list List of node ids of database nodes to be started
- * @return No of nodes started (or -1 on error).
+ * @param node_list List of node IDs of database nodes to be started
+ *
+ * @return Number of nodes actually started (-1 on error).
*
- * @note The nodes to start must have been started with nostart(-n)
+ * @note The nodes to be started must have been started with nostart(-n)
* argument.
- * This means that the database node binary is started and
- * waiting for a START management command which will
- * actually start the database node functionality
+ * This means that the database node binary is started and
+ * waiting for a START management command which will
+ * actually enable the database node
*/
int ndb_mgm_start(NdbMgmHandle handle,
int no_of_nodes,
const int * node_list);
/** @} *********************************************************************/
- /**
- * @name Functions: Logging and Statistics
+ /**
+ * @name Functions: Controlling Clusterlog output
* @{
*/
/**
- * Filter cluster log
+ * Filter cluster log severities
*
* @param handle NDB management handle.
- * @param level A cluster log level to filter.
- * @param enable set 1=enable 0=disable
+ * @param severity A cluster log severity to filter.
+ * @param enable set 1=enable o 0=disable
* @param reply Reply message.
+ *
* @return -1 on error.
*/
- int ndb_mgm_filter_clusterlog(NdbMgmHandle handle,
- enum ndb_mgm_clusterlog_level level,
- int enable,
- struct ndb_mgm_reply* reply);
-
+ int ndb_mgm_set_clusterlog_severity_filter(NdbMgmHandle handle,
+ enum ndb_mgm_event_severity severity,
+ int enable,
+ struct ndb_mgm_reply* reply);
/**
- * Get log filter
- *
+ * Get clusterlog severity filter
+ *
* @param handle NDB management handle
- * @return A vector of seven elements,
+ *
+ * @return A vector of seven elements,
* where each element contains
- * 1 if a severity is enabled and 0 if not.
- * A severity is stored at position
- * ndb_mgm_clusterlog_level,
+ * 1 if a severity indicator is enabled and 0 if not.
+ * A severity level is stored at position
+ * ndb_mgm_clusterlog_level;
* for example the "error" level is stored in position
- * [NDB_MGM_CLUSTERLOG_ERROR-1].
- * The first element in the vector signals
- * whether the clusterlog
+ * [NDB_MGM_EVENT_SEVERITY_ERROR].
+ * The first element [NDB_MGM_EVENT_SEVERITY_ON] in
+ * the vector signals
+ * whether the cluster log
* is disabled or enabled.
*/
- unsigned int *ndb_mgm_get_logfilter(NdbMgmHandle handle);
+ const unsigned int *ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle);
/**
* Set log category and levels for the cluster log
*
* @param handle NDB management handle.
- * @param nodeId Node id.
+ * @param nodeId Node ID.
* @param category Event category.
* @param level Log level (0-15).
* @param reply Reply message.
* @return -1 on error.
*/
- int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle,
+ int ndb_mgm_set_clusterlog_loglevel(NdbMgmHandle handle,
int nodeId,
enum ndb_mgm_event_category category,
int level,
struct ndb_mgm_reply* reply);
- ndb_mgm_clusterlog_level
- ndb_mgm_match_clusterlog_level(const char * name);
- const char *
- ndb_mgm_get_clusterlog_level_string(enum ndb_mgm_clusterlog_level level);
+ /** @} *********************************************************************/
+ /**
+ * @name Functions: Listening to log events
+ * @{
+ */
+
+ /**
+ * Listen to log events. They are read from the return file descriptor
+ * and the format is textual, and the same as in the cluster log.
+ *
+ * @param handle NDB management handle.
+ * @param filter pairs of { level, ndb_mgm_event_category } that will be
+ * pushed to fd, level=0 ends list.
+ *
+ * @return fd filedescriptor to read events from
+ */
+ int ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[]);
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Set log category and levels for the Node
*
* @param handle NDB management handle.
- * @param nodeId Node id.
+ * @param nodeId Node ID.
* @param category Event category.
* @param level Log level (0-15).
* @param reply Reply message.
@@ -664,9 +846,68 @@ extern "C" {
*/
int ndb_mgm_get_stat_port(NdbMgmHandle handle,
struct ndb_mgm_reply* reply);
+#endif
+
+ /**
+ * The NdbLogEventHandle
+ */
+ typedef struct ndb_logevent_handle * NdbLogEventHandle;
+
+ /**
+ * Listen to log events.
+ *
+ * @param handle NDB management handle.
+ * @param filter pairs of { level, ndb_mgm_event_category } that will be
+ * pushed to fd, level=0 ends list.
+ *
+ * @return NdbLogEventHandle
+ */
+ NdbLogEventHandle ndb_mgm_create_logevent_handle(NdbMgmHandle,
+ const int filter[]);
+ void ndb_mgm_destroy_logevent_handle(NdbLogEventHandle*);
+
+ /**
+ * Retrieve filedescriptor from NdbLogEventHandle. May be used in
+ * e.g. an application select() statement.
+ *
+ * @note Do not attemt to read from it, it will corrupt the parsing.
+ *
+ * @return filedescriptor, -1 on failure.
+ */
+ int ndb_logevent_get_fd(const NdbLogEventHandle);
+
+ /**
+ * Attempt to retrieve next log event and will fill in the supplied
+ * struct dst
+ *
+ * @param dst Pointer to struct to fill in event information
+ * @param timeout_in_milliseconds Timeout for waiting for event
+ *
+ * @return >0 if event exists, 0 no event (timed out), or -1 on error.
+ *
+ * @note Return value <=0 will leave dst untouched
+ */
+ int ndb_logevent_get_next(const NdbLogEventHandle,
+ struct ndb_logevent *dst,
+ unsigned timeout_in_milliseconds);
+
+ /**
+ * Retrieve laterst error code
+ *
+ * @return error code
+ */
+ int ndb_logevent_get_latest_error(const NdbLogEventHandle);
+
+ /**
+ * Retrieve laterst error message
+ *
+ * @return error message
+ */
+ const char *ndb_logevent_get_latest_error_msg(const NdbLogEventHandle);
+
/** @} *********************************************************************/
- /**
+ /**
* @name Functions: Backup
* @{
*/
@@ -674,13 +915,15 @@ extern "C" {
/**
* Start backup
*
- * @param handle NDB management handle.
- * @param wait_completed 0=don't wait for confirmation
- 1=wait for backup started
- 2=wait for backup completed
- * @param backup_id Backup id is returned from function.
- * @param reply Reply message.
- * @return -1 on error.
+ * @param handle NDB management handle.
+ * @param wait_completed 0: Don't wait for confirmation<br>
+ * 1: Wait for backup to be started<br>
+ * 2: Wait for backup to be completed
+ * @param backup_id Backup ID is returned from function.
+ * @param reply Reply message.
+ * @return -1 on error.
+ * @note backup_id will not be returned if
+ * wait_completed == 0
*/
int ndb_mgm_start_backup(NdbMgmHandle handle, int wait_completed,
unsigned int* backup_id,
@@ -690,7 +933,7 @@ extern "C" {
* Abort backup
*
* @param handle NDB management handle.
- * @param backup_id Backup Id.
+ * @param backup_id Backup ID.
* @param reply Reply message.
* @return -1 on error.
*/
@@ -699,48 +942,49 @@ extern "C" {
/** @} *********************************************************************/
- /**
+ /**
* @name Functions: Single User Mode
* @{
*/
/**
- * Enter Single user mode
+ * Enter Single user mode
*
* @param handle NDB management handle.
- * @param nodeId Node Id of the single user node
+ * @param nodeId Node ID of the single user node
* @param reply Reply message.
* @return -1 on error.
*/
int ndb_mgm_enter_single_user(NdbMgmHandle handle, unsigned int nodeId,
struct ndb_mgm_reply* reply);
-
+
/**
- * Exit Single user mode
+ * Exit Single user mode
*
* @param handle NDB management handle.
- * @param nodeId Node Id of the single user node
* @param reply Reply message.
+ *
* @return -1 on error.
*/
- int ndb_mgm_exit_single_user(NdbMgmHandle handle,
+ int ndb_mgm_exit_single_user(NdbMgmHandle handle,
struct ndb_mgm_reply* reply);
-
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /** @} *********************************************************************/
/**
- * Listen event
- *
- * @param filter pairs of { level, category } that will be
- * pushed to fd, level=0 ends lists
- * @return fd which events will be pushed to
+ * @name Configuration handling
+ * @{
*/
- int ndb_mgm_listen_event(NdbMgmHandle handle, int filter[]);
-
+
/**
* Get configuration
* @param handle NDB management handle.
* @param version Version of configuration, 0 means latest
- * @see MAKE_VERSION
- * @Note the caller must call ndb_mgm_detroy_configuration
+ * (Currently this is the only supported value for this parameter)
+ *
+ * @return configuration
+ *
+ * @note The caller is responsible for calling ndb_mgm_destroy_configuration()
*/
struct ndb_mgm_configuration * ndb_mgm_get_configuration(NdbMgmHandle handle,
unsigned version);
@@ -748,6 +992,13 @@ extern "C" {
int ndb_mgm_alloc_nodeid(NdbMgmHandle handle,
unsigned version, int nodetype);
+
+
+ /**
+ * Get the node id of the mgm server we're connected to
+ */
+ Uint32 ndb_mgm_get_mgmd_nodeid(NdbMgmHandle handle);
+
/**
* Config iterator
*/
@@ -756,14 +1007,14 @@ extern "C" {
ndb_mgm_configuration_iterator* ndb_mgm_create_configuration_iterator
(struct ndb_mgm_configuration *, unsigned type_of_section);
void ndb_mgm_destroy_iterator(ndb_mgm_configuration_iterator*);
-
+
int ndb_mgm_first(ndb_mgm_configuration_iterator*);
int ndb_mgm_next(ndb_mgm_configuration_iterator*);
int ndb_mgm_valid(const ndb_mgm_configuration_iterator*);
- int ndb_mgm_find(ndb_mgm_configuration_iterator*,
+ int ndb_mgm_find(ndb_mgm_configuration_iterator*,
int param, unsigned value);
-
- int ndb_mgm_get_int_parameter(const ndb_mgm_configuration_iterator*,
+
+ int ndb_mgm_get_int_parameter(const ndb_mgm_configuration_iterator*,
int param, unsigned * value);
int ndb_mgm_get_int64_parameter(const ndb_mgm_configuration_iterator*,
int param, Uint64 * value);
@@ -771,6 +1022,38 @@ extern "C" {
int param, const char ** value);
int ndb_mgm_purge_stale_sessions(NdbMgmHandle handle, char **);
int ndb_mgm_check_connection(NdbMgmHandle handle);
+#endif
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ enum ndb_mgm_clusterlog_level {
+ NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL = -1,
+ NDB_MGM_CLUSTERLOG_ON = 0,
+ NDB_MGM_CLUSTERLOG_DEBUG = 1,
+ NDB_MGM_CLUSTERLOG_INFO = 2,
+ NDB_MGM_CLUSTERLOG_WARNING = 3,
+ NDB_MGM_CLUSTERLOG_ERROR = 4,
+ NDB_MGM_CLUSTERLOG_CRITICAL = 5,
+ NDB_MGM_CLUSTERLOG_ALERT = 6,
+ NDB_MGM_CLUSTERLOG_ALL = 7
+ };
+ inline
+ int ndb_mgm_filter_clusterlog(NdbMgmHandle h,
+ enum ndb_mgm_clusterlog_level s,
+ int e, struct ndb_mgm_reply* r)
+ { return ndb_mgm_set_clusterlog_severity_filter(h,(ndb_mgm_event_severity)s,
+ e,r); }
+
+ inline
+ const unsigned int *ndb_mgm_get_logfilter(NdbMgmHandle h)
+ { return ndb_mgm_get_clusterlog_severity_filter(h); }
+
+ inline
+ int ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle h, int n,
+ enum ndb_mgm_event_category c,
+ int l, struct ndb_mgm_reply* r)
+ { return ndb_mgm_set_clusterlog_loglevel(h,n,c,l,r); }
+#endif
+
#ifdef __cplusplus
}
#endif
diff --git a/ndb/include/mgmapi/mgmapi_config_parameters.h b/ndb/include/mgmapi/mgmapi_config_parameters.h
index 2e3b47eb42e..8f95e159b38 100644
--- a/ndb/include/mgmapi/mgmapi_config_parameters.h
+++ b/ndb/include/mgmapi/mgmapi_config_parameters.h
@@ -99,7 +99,7 @@
#define CFG_LOGLEVEL_INFO 256
#define CFG_LOGLEVEL_WARNING 257
#define CFG_LOGLEVEL_ERROR 258
-#define CFG_LOGLEVEL_GREP 259
+#define CFG_LOGLEVEL_CONGESTION 259
#define CFG_LOGLEVEL_DEBUG 260
#define CFG_LOGLEVEL_BACKUP 261
#define CFG_MAX_LOGLEVEL 261
diff --git a/ndb/include/mgmapi/mgmapi_debug.h b/ndb/include/mgmapi/mgmapi_debug.h
index 1c562cd164f..e86d9d4b768 100644
--- a/ndb/include/mgmapi/mgmapi_debug.h
+++ b/ndb/include/mgmapi/mgmapi_debug.h
@@ -131,6 +131,7 @@ extern "C" {
int param,
const char * value,
struct ndb_mgm_reply* reply);
+
#ifdef __cplusplus
}
#endif
diff --git a/ndb/include/mgmapi/ndb_logevent.h b/ndb/include/mgmapi/ndb_logevent.h
new file mode 100644
index 00000000000..b69379545fc
--- /dev/null
+++ b/ndb/include/mgmapi/ndb_logevent.h
@@ -0,0 +1,623 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef NDB_LOGEVENT_H
+#define NDB_LOGEVENT_H
+
+/** @addtogroup MGM_C_API
+ * @{
+ */
+
+#include "mgmapi_config_parameters.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ /**
+ * Available log events grouped by @ref ndb_mgm_event_category
+ */
+
+ enum Ndb_logevent_type {
+
+ NDB_LE_ILLEGAL_TYPE = -1,
+
+ /** NDB_MGM_EVENT_CATEGORY_CONNECTION */
+ NDB_LE_Connected = 0,
+ /** NDB_MGM_EVENT_CATEGORY_CONNECTION */
+ NDB_LE_Disconnected = 1,
+ /** NDB_MGM_EVENT_CATEGORY_CONNECTION */
+ NDB_LE_CommunicationClosed = 2,
+ /** NDB_MGM_EVENT_CATEGORY_CONNECTION */
+ NDB_LE_CommunicationOpened = 3,
+ /** NDB_MGM_EVENT_CATEGORY_CONNECTION */
+ NDB_LE_ConnectedApiVersion = 51,
+
+ /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */
+ NDB_LE_GlobalCheckpointStarted = 4,
+ /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */
+ NDB_LE_GlobalCheckpointCompleted = 5,
+ /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */
+ NDB_LE_LocalCheckpointStarted = 6,
+ /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */
+ NDB_LE_LocalCheckpointCompleted = 7,
+ /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */
+ NDB_LE_LCPStoppedInCalcKeepGci = 8,
+ /** NDB_MGM_EVENT_CATEGORY_CHECKPOINT */
+ NDB_LE_LCPFragmentCompleted = 9,
+
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_NDBStartStarted = 10,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_NDBStartCompleted = 11,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_STTORRYRecieved = 12,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_StartPhaseCompleted = 13,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_CM_REGCONF = 14,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_CM_REGREF = 15,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_FIND_NEIGHBOURS = 16,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_NDBStopStarted = 17,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_NDBStopAborted = 18,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_StartREDOLog = 19,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_StartLog = 20,
+ /** NDB_MGM_EVENT_CATEGORY_STARTUP */
+ NDB_LE_UNDORecordsExecuted = 21,
+
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_NR_CopyDict = 22,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_NR_CopyDistr = 23,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_NR_CopyFragsStarted = 24,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_NR_CopyFragDone = 25,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_NR_CopyFragsCompleted = 26,
+
+ /* NODEFAIL */
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_NodeFailCompleted = 27,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_NODE_FAILREP = 28,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_ArbitState = 29,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_ArbitResult = 30,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_GCP_TakeoverStarted = 31,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_GCP_TakeoverCompleted = 32,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_LCP_TakeoverStarted = 33,
+ /** NDB_MGM_EVENT_CATEGORY_NODE_RESTART */
+ NDB_LE_LCP_TakeoverCompleted = 34,
+
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_TransReportCounters = 35,
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_OperationReportCounters = 36,
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_TableCreated = 37,
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_UndoLogBlocked = 38,
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_JobStatistic = 39,
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_SendBytesStatistic = 40,
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_ReceiveBytesStatistic = 41,
+ /** NDB_MGM_EVENT_CATEGORY_STATISTIC */
+ NDB_LE_MemoryUsage = 50,
+
+ /** NDB_MGM_EVENT_CATEGORY_ERROR */
+ NDB_LE_TransporterError = 42,
+ /** NDB_MGM_EVENT_CATEGORY_ERROR */
+ NDB_LE_TransporterWarning = 43,
+ /** NDB_MGM_EVENT_CATEGORY_ERROR */
+ NDB_LE_MissedHeartbeat = 44,
+ /** NDB_MGM_EVENT_CATEGORY_ERROR */
+ NDB_LE_DeadDueToHeartbeat = 45,
+ /** NDB_MGM_EVENT_CATEGORY_ERROR */
+ NDB_LE_WarningEvent = 46,
+
+ /** NDB_MGM_EVENT_CATEGORY_INFO */
+ NDB_LE_SentHeartbeat = 47,
+ /** NDB_MGM_EVENT_CATEGORY_INFO */
+ NDB_LE_CreateLogBytes = 48,
+ /** NDB_MGM_EVENT_CATEGORY_INFO */
+ NDB_LE_InfoEvent = 49,
+
+ /* SINGLE USER */
+ NDB_LE_SingleUser = 52,
+ /* NDB_LE_ UNUSED = 53, */
+
+ /** NDB_MGM_EVENT_CATEGORY_BACKUP */
+ NDB_LE_BackupStarted = 54,
+ /** NDB_MGM_EVENT_CATEGORY_BACKUP */
+ NDB_LE_BackupFailedToStart = 55,
+ /** NDB_MGM_EVENT_CATEGORY_BACKUP */
+ NDB_LE_BackupCompleted = 56,
+ /** NDB_MGM_EVENT_CATEGORY_BACKUP */
+ NDB_LE_BackupAborted = 57
+ };
+
+ /**
+ * Log event severities (used to filter the cluster log,
+ * ndb_mgm_set_clusterlog_severity_filter(), and filter listening to events
+ * ndb_mgm_listen_event())
+ */
+ enum ndb_mgm_event_severity {
+ NDB_MGM_ILLEGAL_EVENT_SEVERITY = -1,
+ /* Must be a nonnegative integer (used for array indexing) */
+ /** Cluster log on */
+ NDB_MGM_EVENT_SEVERITY_ON = 0,
+ /** Used in NDB Cluster developement */
+ NDB_MGM_EVENT_SEVERITY_DEBUG = 1,
+ /** Informational messages*/
+ NDB_MGM_EVENT_SEVERITY_INFO = 2,
+ /** Conditions that are not error condition, but might require handling.
+ */
+ NDB_MGM_EVENT_SEVERITY_WARNING = 3,
+ /** Conditions that, while not fatal, should be corrected. */
+ NDB_MGM_EVENT_SEVERITY_ERROR = 4,
+ /** Critical conditions, like device errors or out of resources */
+ NDB_MGM_EVENT_SEVERITY_CRITICAL = 5,
+ /** A condition that should be corrected immediately,
+ * such as a corrupted system
+ */
+ NDB_MGM_EVENT_SEVERITY_ALERT = 6,
+ /* must be next number, works as bound in loop */
+ /** All severities */
+ NDB_MGM_EVENT_SEVERITY_ALL = 7
+ };
+
+ /**
+ * Log event categories, used to set filter level on the log events using
+ * ndb_mgm_set_clusterlog_loglevel() and ndb_mgm_listen_event()
+ */
+ enum ndb_mgm_event_category {
+ /**
+ * Invalid log event category
+ */
+ NDB_MGM_ILLEGAL_EVENT_CATEGORY = -1,
+ /**
+ * Log events during all kinds of startups
+ */
+ NDB_MGM_EVENT_CATEGORY_STARTUP = CFG_LOGLEVEL_STARTUP,
+ /**
+ * Log events during shutdown
+ */
+ NDB_MGM_EVENT_CATEGORY_SHUTDOWN = CFG_LOGLEVEL_SHUTDOWN,
+ /**
+ * Statistics log events
+ */
+ NDB_MGM_EVENT_CATEGORY_STATISTIC = CFG_LOGLEVEL_STATISTICS,
+ /**
+ * Log events related to checkpoints
+ */
+ NDB_MGM_EVENT_CATEGORY_CHECKPOINT = CFG_LOGLEVEL_CHECKPOINT,
+ /**
+ * Log events during node restart
+ */
+ NDB_MGM_EVENT_CATEGORY_NODE_RESTART = CFG_LOGLEVEL_NODERESTART,
+ /**
+ * Log events related to connections between cluster nodes
+ */
+ NDB_MGM_EVENT_CATEGORY_CONNECTION = CFG_LOGLEVEL_CONNECTION,
+ /**
+ * Backup related log events
+ */
+ NDB_MGM_EVENT_CATEGORY_BACKUP = CFG_LOGLEVEL_BACKUP,
+ /**
+ * Congestion related log events
+ */
+ NDB_MGM_EVENT_CATEGORY_CONGESTION = CFG_LOGLEVEL_CONGESTION,
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
+ * Loglevel debug
+ */
+ NDB_MGM_EVENT_CATEGORY_DEBUG = CFG_LOGLEVEL_DEBUG,
+#endif
+ /**
+ * Uncategorized log events (severity info)
+ */
+ NDB_MGM_EVENT_CATEGORY_INFO = CFG_LOGLEVEL_INFO,
+ /**
+ * Uncategorized log events (severity warning or higher)
+ */
+ NDB_MGM_EVENT_CATEGORY_ERROR = CFG_LOGLEVEL_ERROR,
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ NDB_MGM_MIN_EVENT_CATEGORY = CFG_MIN_LOGLEVEL,
+ NDB_MGM_MAX_EVENT_CATEGORY = CFG_MAX_LOGLEVEL
+#endif
+ };
+
+ /**
+ * Structure to store and retrieve log event information.
+ * @see @ref secSLogEvents
+ */
+ struct ndb_logevent {
+ /** NdbLogEventHandle (to be used for comparing only)
+ * set in ndb_logevent_get_next()
+ */
+ void *handle;
+
+ /** Which event */
+ enum Ndb_logevent_type type;
+
+ /** Time when log event was registred at the management server */
+ unsigned time;
+
+ /** Category of log event */
+ enum ndb_mgm_event_category category;
+
+ /** Severity of log event */
+ enum ndb_mgm_event_severity severity;
+
+ /** Level (0-15) of log event */
+ unsigned level;
+
+ /** Node ID of the node that reported the log event */
+ unsigned source_nodeid;
+
+ /** Union of log event specific data. Use @ref type to decide
+ * which struct to use
+ */
+ union {
+ /* CONNECT */
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ } Connected;
+
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ } Disconnected;
+
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ } CommunicationClosed;
+
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ } CommunicationOpened;
+
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ unsigned version;
+ } ConnectedApiVersion;
+
+ /* CHECKPOINT */
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned gci;
+ } GlobalCheckpointStarted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned gci;
+ } GlobalCheckpointCompleted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned lci;
+ unsigned keep_gci;
+ unsigned restore_gci;
+ } LocalCheckpointStarted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned lci;
+ } LocalCheckpointCompleted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned data;
+ } LCPStoppedInCalcKeepGci;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ unsigned table_id;
+ unsigned fragment_id;
+ } LCPFragmentCompleted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned acc_count;
+ unsigned tup_count;
+ } UndoLogBlocked;
+
+ /* STARTUP */
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned version;
+ } NDBStartStarted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned version;
+ } NDBStartCompleted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ } STTORRYRecieved;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned phase;
+ unsigned starttype;
+ } StartPhaseCompleted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned own_id;
+ unsigned president_id;
+ unsigned dynamic_id;
+ } CM_REGCONF;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned own_id;
+ unsigned other_id;
+ unsigned cause;
+ } CM_REGREF;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned own_id;
+ unsigned left_id;
+ unsigned right_id;
+ unsigned dynamic_id;
+ } FIND_NEIGHBOURS;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned stoptype;
+ } NDBStopStarted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ } NDBStopAborted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ unsigned keep_gci;
+ unsigned completed_gci;
+ unsigned restorable_gci;
+ } StartREDOLog;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned log_part;
+ unsigned start_mb;
+ unsigned stop_mb;
+ unsigned gci;
+ } StartLog;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned block;
+ unsigned data1;
+ unsigned data2;
+ unsigned data3;
+ unsigned data4;
+ unsigned data5;
+ unsigned data6;
+ unsigned data7;
+ unsigned data8;
+ unsigned data9;
+ unsigned data10;
+ } UNDORecordsExecuted;
+
+ /* NODERESTART */
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ } NR_CopyDict;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ } NR_CopyDistr;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned dest_node;
+ } NR_CopyFragsStarted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned dest_node;
+ unsigned table_id;
+ unsigned fragment_id;
+ } NR_CopyFragDone;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned dest_node;
+ } NR_CopyFragsCompleted;
+
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned block; /* 0 = all */
+ unsigned failed_node;
+ unsigned completing_node; /* 0 = all */
+ } NodeFailCompleted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned failed_node;
+ unsigned failure_state;
+ } NODE_FAILREP;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned code; /* code & state << 16 */
+ unsigned arbit_node;
+ unsigned ticket_0;
+ unsigned ticket_1;
+ /* TODO */
+ } ArbitState;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned code; /* code & state << 16 */
+ unsigned arbit_node;
+ unsigned ticket_0;
+ unsigned ticket_1;
+ /* TODO */
+ } ArbitResult;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ } GCP_TakeoverStarted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ } GCP_TakeoverCompleted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ } LCP_TakeoverStarted;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned state;
+ } LCP_TakeoverCompleted;
+
+ /* STATISTIC */
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned trans_count;
+ unsigned commit_count;
+ unsigned read_count;
+ unsigned simple_read_count;
+ unsigned write_count;
+ unsigned attrinfo_count;
+ unsigned conc_op_count;
+ unsigned abort_count;
+ unsigned scan_count;
+ unsigned range_scan_count;
+ } TransReportCounters;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned ops;
+ } OperationReportCounters;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned table_id;
+ } TableCreated;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned mean_loop_count;
+ } JobStatistic;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned to_node;
+ unsigned mean_sent_bytes;
+ } SendBytesStatistic;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned from_node;
+ unsigned mean_received_bytes;
+ } ReceiveBytesStatistic;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ int gth;
+ unsigned page_size_kb;
+ unsigned pages_used;
+ unsigned pages_total;
+ unsigned block;
+ } MemoryUsage;
+
+ /* ERROR */
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned to_node;
+ unsigned code;
+ } TransporterError;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned to_node;
+ unsigned code;
+ } TransporterWarning;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ unsigned count;
+ } MissedHeartbeat;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ } DeadDueToHeartbeat;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ /* TODO */
+ } WarningEvent;
+
+ /* INFO */
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ } SentHeartbeat;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ unsigned node;
+ } CreateLogBytes;
+ /** Log event specific data for for corresponding NDB_LE_ log event */
+ struct {
+ /* TODO */
+ } InfoEvent;
+
+ /** Log event data for @ref NDB_LE_BackupStarted */
+ struct {
+ unsigned starting_node;
+ unsigned backup_id;
+ } BackupStarted;
+ /** Log event data @ref NDB_LE_BackupFailedToStart */
+ struct {
+ unsigned starting_node;
+ unsigned error;
+ } BackupFailedToStart;
+ /** Log event data @ref NDB_LE_BackupCompleted */
+ struct {
+ unsigned starting_node;
+ unsigned backup_id;
+ unsigned start_gci;
+ unsigned stop_gci;
+ unsigned n_records;
+ unsigned n_log_records;
+ unsigned n_bytes;
+ unsigned n_log_bytes;
+ } BackupCompleted;
+ /** Log event data @ref NDB_LE_BackupAborted */
+ struct {
+ unsigned starting_node;
+ unsigned backup_id;
+ unsigned error;
+ } BackupAborted;
+ /** Log event data @ref NDB_LE_SingleUser */
+ struct {
+ unsigned type;
+ unsigned node_id;
+ } SingleUser;
+#ifndef DOXYGEN_FIX
+ };
+#else
+ } <union>;
+#endif
+ };
+
+enum ndb_logevent_handle_error {
+ NDB_LEH_NO_ERROR,
+ NDB_LEH_READ_ERROR,
+ NDB_LEH_MISSING_EVENT_SPECIFIER,
+ NDB_LEH_UNKNOWN_EVENT_TYPE,
+ NDB_LEH_UNKNOWN_EVENT_VARIABLE,
+ NDB_LEH_INTERNAL_ERROR
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif
diff --git a/ndb/include/mgmcommon/ConfigRetriever.hpp b/ndb/include/mgmcommon/ConfigRetriever.hpp
index be6d656e1a5..c0b877af07d 100644
--- a/ndb/include/mgmcommon/ConfigRetriever.hpp
+++ b/ndb/include/mgmcommon/ConfigRetriever.hpp
@@ -32,6 +32,7 @@ public:
~ConfigRetriever();
int do_connect(int no_retries, int retry_delay_in_seconds, int verbose);
+ int disconnect();
/**
* Get configuration for current node.
@@ -75,6 +76,8 @@ public:
Uint32 get_mgmd_port() const;
const char *get_mgmd_host() const;
const char *get_connectstring(char *buf, int buf_sz) const;
+ NdbMgmHandle get_mgmHandle() { return m_handle; };
+ NdbMgmHandle* get_mgmHandlePtr() { return &m_handle; };
Uint32 get_configuration_nodeid() const;
private:
diff --git a/ndb/include/ndb_constants.h b/ndb/include/ndb_constants.h
new file mode 100644
index 00000000000..c292880749b
--- /dev/null
+++ b/ndb/include/ndb_constants.h
@@ -0,0 +1,72 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/**
+ * @file ndb_constants.h
+ *
+ * Constants common to NDB API and NDB kernel.
+ * Changing the values makes database upgrade impossible.
+ *
+ * New or removed definitions must be replicated to
+ * NdbDictionary.hpp and NdbSqlUtil.hpp.
+ *
+ * Not for use by application programs.
+ * Use the enums provided by NdbDictionary instead.
+ */
+
+#ifndef NDB_CONSTANTS_H
+#define NDB_CONSTANTS_H
+
+/*
+ * Data type constants.
+ */
+
+#define NDB_TYPE_UNDEFINED 0
+
+#define NDB_TYPE_TINYINT 1
+#define NDB_TYPE_TINYUNSIGNED 2
+#define NDB_TYPE_SMALLINT 3
+#define NDB_TYPE_SMALLUNSIGNED 4
+#define NDB_TYPE_MEDIUMINT 5
+#define NDB_TYPE_MEDIUMUNSIGNED 6
+#define NDB_TYPE_INT 7
+#define NDB_TYPE_UNSIGNED 8
+#define NDB_TYPE_BIGINT 9
+#define NDB_TYPE_BIGUNSIGNED 10
+#define NDB_TYPE_FLOAT 11
+#define NDB_TYPE_DOUBLE 12
+#define NDB_TYPE_OLDDECIMAL 13
+#define NDB_TYPE_CHAR 14
+#define NDB_TYPE_VARCHAR 15
+#define NDB_TYPE_BINARY 16
+#define NDB_TYPE_VARBINARY 17
+#define NDB_TYPE_DATETIME 18
+#define NDB_TYPE_DATE 19
+#define NDB_TYPE_BLOB 20
+#define NDB_TYPE_TEXT 21
+#define NDB_TYPE_BIT 22
+#define NDB_TYPE_LONGVARCHAR 23
+#define NDB_TYPE_LONGVARBINARY 24
+#define NDB_TYPE_TIME 25
+#define NDB_TYPE_YEAR 26
+#define NDB_TYPE_TIMESTAMP 27
+#define NDB_TYPE_OLDDECIMALUNSIGNED 28
+#define NDB_TYPE_DECIMAL 29
+#define NDB_TYPE_DECIMALUNSIGNED 30
+
+#define NDB_TYPE_MAX 31
+
+#endif
diff --git a/ndb/include/ndb_global.h.in b/ndb/include/ndb_global.h.in
index eadd5e37b9b..43f90e1f8b5 100644
--- a/ndb/include/ndb_global.h.in
+++ b/ndb/include/ndb_global.h.in
@@ -14,8 +14,8 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#ifndef NDBGLOBAL_H
-#define NDBGLOBAL_H
+#ifndef NDB_GLOBAL_H
+#define NDB_GLOBAL_H
#include <ndb_types.h>
@@ -31,45 +31,22 @@
#define HAVE_STRCASECMP
#define strcasecmp _strcmpi
#pragma warning(disable: 4503 4786)
-typedef unsigned __int64 Uint64;
-typedef signed __int64 Int64;
#else
#undef NDB_WIN32
#define DIR_SEPARATOR "/"
-typedef unsigned long long Uint64;
-typedef signed long long Int64;
#endif
#include <my_global.h>
-typedef signed char Int8;
-typedef unsigned char Uint8;
-typedef signed short Int16;
-typedef unsigned short Uint16;
-typedef signed int Int32;
-typedef unsigned int Uint32;
-
-typedef unsigned int UintR;
-
-#ifdef __SIZE_TYPE__
-typedef __SIZE_TYPE__ UintPtr;
-#elif SIZEOF_CHARP == 4
-typedef Uint32 UintPtr;
-#elif SIZEOF_CHARP == 8
-typedef Uint64 UintPtr;
-#else
-#error "Unknown size of (char *)"
-#endif
-
-#if ! (SIZEOF_CHAR == 1)
+#if ! (NDB_SIZEOF_CHAR == SIZEOF_CHAR)
#error "Invalid define for Uint8"
#endif
-#if ! (SIZEOF_INT == 4)
+#if ! (NDB_SIZEOF_INT == SIZEOF_INT)
#error "Invalid define for Uint32"
#endif
-#if ! (SIZEOF_LONG_LONG == 8)
+#if ! (NDB_SIZEOF_LONG_LONG == SIZEOF_LONG_LONG)
#error "Invalid define for Uint64"
#endif
diff --git a/ndb/include/ndb_types.h.in b/ndb/include/ndb_types.h.in
new file mode 100644
index 00000000000..2a5d576ffea
--- /dev/null
+++ b/ndb/include/ndb_types.h.in
@@ -0,0 +1,81 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/**
+ * @file ndb_types.h
+ */
+
+#ifndef NDB_TYPES_H
+#define NDB_TYPES_H
+
+#if defined(_WIN32) || defined(__WIN32__) || defined(WIN32) || defined(_WIN64)
+#define NDB_SIZEOF_CHARP SIZEOF_CHARP
+#define NDB_SIZEOF_CHAR SIZEOF_CHAR
+#define NDB_SIZEOF_SHORT 2
+#define NDB_SIZEOF_INT SIZEOF_INT
+#define NDB_SIZEOF_LONG SIZEOF_LONG
+#define NDB_SIZEOF_LONG_LONG SIZEOF_LONG_LONG
+typedef unsigned __int64 Uint64;
+typedef signed __int64 Int64;
+#else
+#define NDB_SIZEOF_CHARP @NDB_SIZEOF_CHARP@
+#define NDB_SIZEOF_CHAR @NDB_SIZEOF_CHAR@
+#define NDB_SIZEOF_INT @NDB_SIZEOF_INT@
+#define NDB_SIZEOF_SHORT @NDB_SIZEOF_SHORT@
+#define NDB_SIZEOF_LONG @NDB_SIZEOF_LONG@
+#define NDB_SIZEOF_LONG_LONG @NDB_SIZEOF_LONG_LONG@
+typedef unsigned long long Uint64;
+typedef signed long long Int64;
+#endif
+
+typedef signed char Int8;
+typedef unsigned char Uint8;
+typedef signed short Int16;
+typedef unsigned short Uint16;
+typedef signed int Int32;
+typedef unsigned int Uint32;
+
+typedef unsigned int UintR;
+
+#ifdef __SIZE_TYPE__
+ typedef __SIZE_TYPE__ UintPtr;
+#elif NDB_SIZEOF_CHARP == 4
+ typedef Uint32 UintPtr;
+#elif NDB_SIZEOF_CHARP == 8
+ typedef Uint64 UintPtr;
+#else
+ #error "Unknown size of (char *)"
+#endif
+
+#if ! (NDB_SIZEOF_CHAR == 1)
+#error "Invalid define for Uint8"
+#endif
+
+#if ! (NDB_SIZEOF_SHORT == 2)
+#error "Invalid define for Uint16"
+#endif
+
+#if ! (NDB_SIZEOF_INT == 4)
+#error "Invalid define for Uint32"
+#endif
+
+#if ! (NDB_SIZEOF_LONG_LONG == 8)
+#error "Invalid define for Uint64"
+#endif
+
+#include "ndb_constants.h"
+
+#endif
diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp
index e905a304c97..7e2e9037a34 100644
--- a/ndb/include/ndbapi/Ndb.hpp
+++ b/ndb/include/ndbapi/Ndb.hpp
@@ -17,245 +17,700 @@
/**
@mainpage NDB API Programmers' Guide
- This guide assumes a basic familiarity with NDB Cluster concepts.
- Some of the fundamental ones are described in section @ref secConcepts.
-
- The <em>NDB API</em> is an NDB Cluster application interface
- that implements both synchronous and asynchronous transactions.
+ This guide assumes a basic familiarity with MySQL Cluster concepts found
+ on http://dev.mysql.com/doc/mysql/en/NDBCluster.html .
+ Some of the fundamental ones are also described in section @ref secConcepts.
+
+ The NDB API is a MySQL Cluster application interface
+ that implements transactions.
The NDB API consists of the following fundamental classes:
- - Ndb is the main class representing the database,
- - NdbConnection represents a transaction,
- - NdbOperation represents a transaction operation using primary key,
- - NdbIndexOperation represents a transaction operation using a secondary
- index,
- - NdbRecAttr represents the value of an attribute, and
+ - Ndb_cluster_connection, representing a connection to a cluster,
+ - Ndb is the main class, representing a connection to a database,
+ - NdbTransaction represents a transaction,
+ - NdbOperation represents an operation using a primary key,
+ - NdbScanOperation represents an operation performing a full table scan.
+ - NdbIndexOperation represents an operation using a unique hash index,
+ - NdbIndexScanOperation represents an operation performing a scan using
+ an ordered index,
+ - NdbRecAttr represents an attribute value
- NdbDictionary represents meta information about tables and attributes.
- - NdbError represents an error condition
- There are also some auxiliary classes.
+
+ In addition, the NDB API defines a structure NdbError, which contains the
+ specification for an error.
+
+ It is also possible to receive "events" triggered when data in the database in changed.
+ This is done through the NdbEventOperation class.
+
+ There are also some auxiliary classes, which are listed in the class hierarchy.
The main structure of an application program is as follows:
- -# Construct and initialize Ndb object(s).
- -# Define and execute (synchronous or asynchronous) transactions.
- -# Delete Ndb objects
-
- The main structure of a transaction is as follows:
- -# Start transaction
- -# Add and define operations (associated with the transaction)
- -# Execute transaction
-
- The execute can be of two different types,
- <em>Commit</em> or <em>NoCommit</em>.
- (The execute can also be divided into three
- steps: prepare, send, and poll to get asynchronous
- transactions. More about this later.)
-
- If the execute is of type NoCommit,
- then the application program executes part of a transaction,
- but without committing the transaction.
- After a NoCommit type of execute, the program can continue
+ -# Connect to a cluster using the Ndb_cluster_connection
+ object.
+ -# Initiate a database connection by constructing and initialising one or more Ndb objects.
+ -# Define and execute transactions using the NdbTransaction class.
+ -# Delete Ndb objects.
+ -# Terminate the connection to the cluster (terminate instance of Ndb_cluster_connection).
+
+ The procedure for using transactions is as follows:
+ -# Start transaction (instantiate an NdbTransaction object)
+ -# Add and define operations associated with the transaction using instances of one or more of the
+ NdbOperation, NdbScanOperation, NdbIndexOperation, and NdbIndexScanOperation classes
+ -# Execute transaction (call NdbTransaction::execute())
+
+ The operation can be of two different types,
+ <var>Commit</var> or <var>NoCommit</var>.
+ If the operation is of type <var>NoCommit</var>,
+ then the application program executes the operation part of a transaction,
+ but without actually committing the transaction.
+ After executing a <var>NoCommit</var> operation, the program can continue
to add and define more operations to the transaction
for later execution.
- If the execute is of type Commit, then the transaction is
- committed and no further adding and defining of operations
- is allowed.
-
+ If the operation is of type <var>Commit</var>, then the transaction is
+ immediately committed. The transaction <em>must</em> be closed after it has been
+ commited (event if commit fails), and no further addition or definition of
+ operations for this transaction is allowed.
@section secSync Synchronous Transactions
- Synchronous transactions are defined and executed in the following way.
+ Synchronous transactions are defined and executed as follows:
- -# Start (create) transaction (the transaction will be
- referred to by an NdbConnection object,
- typically created by Ndb::startTransaction).
- At this step the transaction is being defined.
- It is not yet sent to the NDB kernel.
- -# Add and define operations to the transaction
- (using NdbConnection::getNdbOperation and
- methods from class NdbOperation).
- The transaction is still not sent to the NDB kernel.
- -# Execute the transaction (using NdbConnection::execute).
- -# Close the transaction (using Ndb::closeTransaction).
+ -# Start (create) the transaction, which is
+ referenced by an NdbTransaction object
+ (typically created using Ndb::startTransaction()).
+ At this point, the transaction is only being defined,
+ and is not yet sent to the NDB kernel.
+ -# Define operations and add them to the transaction, using one or more of
+ - NdbTransaction::getNdbOperation()
+ - NdbTransaction::getNdbScanOperation()
+ - NdbTransaction::getNdbIndexOperation()
+ - NdbTransaction::getNdbIndexScanOperation()
+ along with the appropriate methods of the respective NdbOperation class
+ (or one possiblt one or more of its subclasses).
+ Note that the transaction has still not yet been sent to the NDB kernel.
+ -# Execute the transaction, using the NdbTransaction::execute() method.
+ -# Close the transaction (call Ndb::closeTransaction()).
- See example program in section @ref ndbapi_example1.cpp.
+ For an example of this process, see the program listing in
+ @ref ndbapi_simple.cpp.
To execute several parallel synchronous transactions, one can either
- use multiple Ndb objects in several threads or start multiple
- applications programs.
- Another way to execute several parallel transactions is to use
- asynchronous transactions.
-
-
+ use multiple Ndb objects in several threads, or start multiple
+ application programs.
+
@section secNdbOperations Operations
- Each transaction (NdbConnection object) consist of a list of
- operations (NdbOperation or NdbIndexOperation objects.
- NdbIndexOperation is used for accessing tables through secondary indexes).
- Operations are of two different kinds:
- -# standard operations, and
- -# interpreted program operations.
+ A NdbTransaction consists of a list of operations, each of which is represented
+ by an instance of NdbOperation, NdbScanOperation, NdbIndexOperation, or
+ NdbIndexScanOperation.
- <h3>Standard Operations</h3>
- After the operation is created using NdbConnection::getNdbOperation
- (or NdbConnection::getNdbIndexOperation),
- it is defined in the following three steps:
- -# Defining standard operation type
- (e.g. using NdbOperation::readTuple)
- -# Specifying search conditions
- (e.g. using NdbOperation::equal)
- -# Specify attribute actions
- (e.g. using NdbOperation::getValue)
-
- Example code (using an NdbOperation):
+ <h3>Single row operations</h3>
+ After the operation is created using NdbTransaction::getNdbOperation()
+ (or NdbTransaction::getNdbIndexOperation()), it is defined in the following
+ three steps:
+ -# Define the standard operation type, using NdbOperation::readTuple()
+ -# Specify search conditions, using NdbOperation::equal()
+ -# Specify attribute actions, using NdbOperation::getValue()
+
+ Here are two brief examples illustrating this process. For the sake of
+ brevity, we omit error handling.
+
+ This first example uses an NdbOperation:
@code
- MyOperation = MyConnection->getNdbOperation("MYTABLENAME"); // 1. Create
- if (MyOperation == NULL) APIERROR(MyConnection->getNdbError());
+ // 1. Retrieve table object
+ myTable= myDict->getTable("MYTABLENAME");
+
+ // 2. Create
+ myOperation= myTransaction->getNdbOperation(myTable);
- MyOperation->readTuple(); // 2. Define type of operation
- MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions
+ // 3. Define type of operation and lock mode
+ myOperation->readTuple(NdbOperation::LM_Read);
+
+ // 4. Specify Search Conditions
+ myOperation->equal("ATTR1", i);
- MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions
- if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError());
+ // 5. Attribute Actions
+ myRecAttr= myOperation->getValue("ATTR2", NULL);
@endcode
- For more examples, see @ref ndbapi_example1.cpp and @ref ndbapi_example2.cpp.
+ For additional examples of this sort, see @ref ndbapi_simple.cpp.
- Example code using an NdbIndexOperation:
+ The second example uses an NdbIndexOperation:
@code
- MyOperation = // 1. Create
- MyConnection->getNdbIndexOperation("MYINDEX", "MYTABLENAME");
- if (MyOperation == NULL) APIERROR(MyConnection->getNdbError());
+ // 1. Retrieve index object
+ myIndex= myDict->getIndex("MYINDEX", "MYTABLENAME");
+
+ // 2. Create
+ myOperation= myTransaction->getNdbIndexOperation(myIndex);
+
+ // 3. Define type of operation and lock mode
+ myOperation->readTuple(NdbOperation::LM_Read);
- MyOperation->readTuple(); // 2. Define type of operation
- MyOperation->equal("ATTR1", i); // 3. Specify Search Conditions
+ // 4. Specify Search Conditions
+ myOperation->equal("ATTR1", i);
- MyRecAttr = MyOperation->getValue("ATTR2", NULL); // 4. Attribute Actions
- if (MyRecAttr == NULL) APIERROR(MyConnection->getNdbError());
+ // 5. Attribute Actions
+ myRecAttr = myOperation->getValue("ATTR2", NULL);
@endcode
- For more examples, see @ref ndbapi_example4.cpp.
+ Another example of this second type can be found in
+ @ref ndbapi_simple_index.cpp.
+ We will now discuss in somewhat greater detail each step involved in the
+ creation and use of synchronous transactions.
- <h4>Step 1: Define Standard Operation Type</h4>
- The following types of standard operations exist:
- -# NdbOperation::insertTuple :
+ <h4>Step 1: Define single row operation type</h4>
+ The following operation types are supported:
+ -# NdbOperation::insertTuple() :
inserts a non-existing tuple
- -# NdbOperation::writeTuple :
+ -# NdbOperation::writeTuple() :
updates an existing tuple if is exists,
otherwise inserts a new tuple
- -# NdbOperation::updateTuple :
+ -# NdbOperation::updateTuple() :
updates an existing tuple
- -# NdbOperation::deleteTuple :
+ -# NdbOperation::deleteTuple() :
deletes an existing tuple
- -# NdbOperation::readTuple :
- reads an existing tuple
- -# NdbOperation::readTupleExclusive :
- reads an existing tuple using an exclusive lock
- -# NdbOperation::simpleRead :
- reads an existing tuple (using shared read lock),
- but releases lock immediately after read
- -# NdbOperation::committedRead :
- reads committed tuple
- -# NdbOperation::dirtyUpdate :
- updates an existing tuple, but releases lock immediately
- after read (uses dirty lock)
- -# NdbOperation::dirtyWrite :
- updates or writes a tuple, but releases lock immediately
- after read (uses dirty lock)
+ -# NdbOperation::readTuple() :
+ reads an existing tuple with specified lock mode
All of these operations operate on the unique tuple key.
(When NdbIndexOperation is used then all of these operations
- operate on a defined secondary index.)
-
-
- Some comments:
- - NdbOperation::simpleRead and
- NdbOperation::committedRead can execute on the same transaction
- as the above operations but will release its locks immediately
- after reading the tuple.
- NdbOperation::simpleRead will always read the latest version
- of the tuple.
- Thus it will wait until it can acquire a shared read lock on
- the tuple.
- NdbOperation::committedRead will read the latest committed
- version of the tuple.
- <br>
- Both NdbOperation::simpleRead and NdbOperation::committedRead
- are examples of consistent reads which are not repeatable.
- All reads read the latest version if updates were made by the same
- transaction.
- Errors on simple read are only reported by the NdbOperation object.
- These error codes are not transferred to the NdbConnection object.
- - NdbOperation::dirtyUpdate and NdbOperation::dirtyWrite
- will execute in the same transaction
- but will release the lock immediately after updating the
- tuple.
- It will wait on the lock until it can acquire an exclusive
- write lock.
- In a replicated version of NDB Cluster NdbOperation::dirtyUpdate
- can lead to inconsistency between the replicas.
- Examples of when it could be used is
- to update statistical counters on tuples which are "hot-spots".
+ operate on a defined unique hash index.)
@note If you want to define multiple operations within the same transaction,
- then you need to call NdbConnection::getNdbOperation
- (or NdbConnection::getNdbIndexOperation) for each
- operation.
-
+ then you need to call NdbTransaction::getNdbOperation() or
+ NdbTransaction::getNdbIndexOperation() for each operation.
<h4>Step 2: Specify Search Conditions</h4>
- The search condition is used to select tuples.
- (In the current NdbIndexOperation implementation
- this means setting the value of
- the secondary index attributes of the wanted tuple.)
-
- If a tuple identity is used, then NdbOperation::setTupleId
- is used to define the search key when inserting new tuples.
- Otherwise, NdbOperation::equal is used.
-
- For NdbOperation::insertTuple it is also allowed to define the
- search key by using NdbOperation::setValue.
- The NDB API will automatically detect that it is
- supposed to use NdbOperation::equal instead.
- For NdbOperation::insertTuple it is not necessary to use
- NdbOperation::setValue on key attributes before other attributes.
-
+ The search condition is used to select tuples. Search conditions are set using NdbOperation::equal().
<h4>Step 3: Specify Attribute Actions</h4>
- Now it is time to define which attributes should be read or updated.
- Deletes can neither read nor set values, read can only read values and
- updates can only set values.
- Normally the attribute is defined by its name but it is
- also possible to use the attribute identity to define the
+ Next, it is necessary to determine which attributes should be read or updated.
+ It is important to remember that:
+ - Deletes can neither read nor set values, but only delete them
+ - Reads can only read values
+ - Updates can only set values
+ Normally the attribute is identified by name, but it is
+ also possible to use the attribute's identity to determine the
attribute.
- The mapping from name to identity is performed by the Table object.
- NdbIndexOperation::getValue returns an NdbRecAttr object
+ NdbOperation::getValue() returns an NdbRecAttr object
containing the read value.
- To get the value, there is actually two methods.
- The application can either
+ To obtain the actual value, one of two methods can be used;
+ the application can either
- use its own memory (passed through a pointer aValue) to
- NdbIndexOperation::getValue, or
+ NdbOperation::getValue(), or
- receive the attribute value in an NdbRecAttr object allocated
by the NDB API.
- The NdbRecAttr object is released when Ndb::closeTransaction
+ The NdbRecAttr object is released when Ndb::closeTransaction()
is called.
- Thus, the application can not reference this object after
- Ndb::closeTransaction have been called.
- The result of reading data from an NdbRecAttr object before
- calling NdbConnection::execute is undefined.
+ Thus, the application cannot reference this object following
+ any subsequent call to Ndb::closeTransaction().
+ Attempting to read data from an NdbRecAttr object before
+ calling NdbTransaction::execute() yields an undefined result.
+
+
+ @subsection secScan Scan Operations
+
+ Scans are roughly the equivalent of SQL cursors, providing a means to
+ preform high-speed row processing. A scan can be performed
+ on either a table (using @ref NdbScanOperation) or
+ an ordered index (by means of an @ref NdbIndexScanOperation).
+
+ Scan operations are characterised by the following:
+ - They can perform only reads (shared, exclusive or dirty)
+ - They can potentially work with multiple rows
+ - They can be used to update or delete multiple rows
+ - They can operate on several nodes in parallel
+
+ After the operation is created using NdbTransaction::getNdbScanOperation()
+ (or NdbTransaction::getNdbIndexScanOperation()),
+ it is carried out in the following three steps:
+ -# Define the standard operation type, using NdbScanOperation::readTuples()
+ -# Specify search conditions, using @ref NdbScanFilter and/or
+ @ref NdbIndexScanOperation::setBound()
+ -# Specify attribute actions, using NdbOperation::getValue()
+ -# Executing the transaction, using NdbTransaction::execute()
+ -# Traversing the result set by means of succssive calls to
+ NdbScanOperation::nextResult()
+
+ Here are two brief examples illustrating this process. Once again, in order
+ to keep things relatively short and simple, we will forego any error handling.
+
+ This first example performs a table scan, using an NdbScanOperation:
+ @code
+ // 1. Retrieve table object
+ myTable= myDict->getTable("MYTABLENAME");
+
+ // 2. Create
+ myOperation= myTransaction->getNdbScanOperation(myTable);
+
+ // 3. Define type of operation and lock mode
+ myOperation->readTuples(NdbOperation::LM_Read);
+
+ // 4. Specify Search Conditions
+ NdbScanFilter sf(myOperation);
+ sf.begin(NdbScanFilter::OR);
+ sf.eq(0, i); // Return rows with column 0 equal to i or
+ sf.eq(1, i+1); // column 1 equal to (i+1)
+ sf.end();
+
+ // 5. Attribute Actions
+ myRecAttr= myOperation->getValue("ATTR2", NULL);
+ @endcode
+
+ Our second example uses an NdbIndexScanOperation to perform an index scan:
+ @code
+ // 1. Retrieve index object
+ myIndex= myDict->getIndex("MYORDEREDINDEX", "MYTABLENAME");
+
+ // 2. Create
+ myOperation= myTransaction->getNdbIndexScanOperation(myIndex);
+ // 3. Define type of operation and lock mode
+ myOperation->readTuples(NdbOperation::LM_Read);
+ // 4. Specify Search Conditions
+ // All rows with ATTR1 between i and (i+1)
+ myOperation->setBound("ATTR1", NdbIndexScanOperation::BoundGE, i);
+ myOperation->setBound("ATTR1", NdbIndexScanOperation::BoundLE, i+1);
+
+ // 5. Attribute Actions
+ myRecAttr = MyOperation->getValue("ATTR2", NULL);
+ @endcode
+
+ Some additional discussion of each step required to perform a scan follows:
+
+ <h4>Step 1: Define Scan Operation Type</h4>
+ It is important to remember that only a single operation is supported for each scan operation
+ (@ref NdbScanOperation::readTuples() or @ref NdbIndexScanOperation::readTuples()).
+
+ @note If you want to define multiple scan operations within the same
+ transaction, then you need to call
+ NdbTransaction::getNdbScanOperation() or
+ NdbTransaction::getNdbIndexScanOperation() separately for <b>each</b> operation.
+
+ <h4>Step 2: Specify Search Conditions</h4>
+ The search condition is used to select tuples.
+ If no search condition is specified, the scan will return all rows
+ in the table.
+
+ The search condition can be an @ref NdbScanFilter (which can be used on both
+ @ref NdbScanOperation and @ref NdbIndexScanOperation) or bounds which
+ can only be used on index scans (@ref NdbIndexScanOperation::setBound()).
+ An index scan can use both NdbScanFilter and bounds.
+
+ @note When NdbScanFilter is used, each row is examined, whether or not it is
+ actually returned. However, when using bounds, only rows within the bounds will be examined.
+
+ <h4>Step 3: Specify Attribute Actions</h4>
+
+ Next, it is necessary to define which attributes should be read.
+ As with transaction attributes, scan attributes are defined by name but it is
+ also possible to use the attributes' identities to define attributes.
+
+ As previously discussed (see @ref secSync), the value read is returned as
+ an NdbRecAttr object by the NdbOperation::getValue() method.
+
+ <h3>Using Scan to Update/Delete</h3>
+ Scanning can also be used to update or delete rows.
+ This is performed by
+ -# Scanning using exclusive locks (using NdbOperation::LM_Exclusive)
+ -# When iterating through the result set, for each row optionally calling
+ either NdbScanOperation::updateCurrentTuple() or
+ NdbScanOperation::deleteCurrentTuple()
+ -# (If performing NdbScanOperation::updateCurrentTuple():)
+ Setting new values for records simply by using @ref NdbOperation::setValue().
+ NdbOperation::equal() should <em>not</em> be called in such cases, as the primary
+ key is retrieved from the scan.
+
+ @note The actual update or delete will not be performed until the next
+ call to NdbTransaction::execute(), just as with single row operations.
+ NdbTransaction::execute() also must be called before any locks are released;
+ see @ref secScanLocks for more information.
+
+ <h4>Features Specific to Index Scans</h4>
+
+ When performing an index scan, it is possible to
+ scan only a subset of a table using @ref NdbIndexScanOperation::setBound().
+ In addition, result sets can be sorted in either ascending or descending order, using
+ @ref NdbIndexScanOperation::readTuples(). Note that rows are returned unordered
+ by default, that is, unless <var>sorted</var> is set to <b>true</b>.
+ It is also important to note that, when using NdbIndexScanOperation::BoundEQ
+ on a partition key, only fragments containing rows will actually be scanned.
+
+ @note When performing a sorted scan, any value passed as the
+ NdbIndexScanOperation::readTuples() method's <code>parallel</code> argument
+ will be ignored and maximum parallelism will be used instead. In other words, all
+ fragments which it is possible to scan will be scanned simultaneously and in parallel
+ in such cases.
+
+ @subsection secScanLocks Lock handling with scans
+
+ Performing scans on either a tables or an index has the potential
+ return a great many records; however, Ndb will lock only a predetermined
+ number of rows per fragment at a time.
+ How many rows will be locked per fragment is controlled by the
+ <var>batch</var> parameter passed to NdbScanOperation::readTuples().
+
+ In order to allow the application to handle how locks are released,
+ NdbScanOperation::nextResult() has a Boolean parameter <var>fetch_allow</var>.
+ If NdbScanOperation::nextResult() is called with <var>fetch_allow</var> equal to
+ <b>false</b>, then no locks may be released as result of the function call.
+ Otherwise the locks for the current batch may be released.
+
+ This next example shows a scan delete that handle locks in an efficient manner.
+ For the sake of brevity, we omit error-handling.
+ @code
+ int check;
+
+ // Outer loop for each batch of rows
+ while((check = MyScanOperation->nextResult(true)) == 0)
+ {
+ do
+ {
+ // Inner loop for each row within batch
+ MyScanOperation->deleteCurrentTuple();
+ } while((check = MyScanOperation->nextResult(false)) == 0);
+
+ // When no more rows in batch, exeute all defined deletes
+ MyTransaction->execute(NoCommit);
+ }
+ @endcode
+
+ See @ref ndbapi_scan.cpp for a more complete example of a scan.
+
+ @section secError Error Handling
+
+ Errors can occur either when operations making up a transaction are being
+ defined, or when the transaction is actually being executed. Catching and
+ handling either sort of error requires testing the value returned by
+ NdbTransaction::execute(), and then, if an error is indicated (that is,
+ if this value is equal to -1), using the following two methods in order to
+ identify the error's type and location:
+
+ - NdbTransaction::getNdbErrorOperation() returns a reference to the
+ operation causing the most recent error.
+ - NdbTransaction::getNdbErrorLine() yields the method number of the
+ erroneous method in the operation.
+
+ This short example illustrates how to detect an error and to use these
+ two methods to identify it:
+
+ @code
+ theTransaction = theNdb->startTransaction();
+ theOperation = theTransaction->getNdbOperation("TEST_TABLE");
+ if (theOperation == NULL) goto error;
+ theOperation->readTuple(NdbOperation::LM_Read);
+ theOperation->setValue("ATTR_1", at1);
+ theOperation->setValue("ATTR_2", at1); // Error occurs here
+ theOperation->setValue("ATTR_3", at1);
+ theOperation->setValue("ATTR_4", at1);
+
+ if (theTransaction->execute(Commit) == -1) {
+ errorLine = theTransaction->getNdbErrorLine();
+ errorOperation = theTransaction->getNdbErrorOperation();
+ }
+ @endcode
+
+ Here <code>errorLine</code> will be 3, as the error occurred in the
+ third method called on the NdbOperation object (in this case,
+ <code>theOperation</code>); if the result of
+ NdbTransaction::getNdbErrorLine() is 0, this means that the error
+ occurred when the operations were executed. In this example,
+ <code>errorOperation</code> will be a pointer to the <code>theOperation</code>
+ object. The NdbTransaction::getNdbError() method returns an NdbError
+ object providing information about the error.
+
+ @note Transactions are <b>not</b> automatically closed when an error occurs. Call
+ Ndb::closeTransaction() to close the transaction.
+
+ One recommended way to handle a transaction failure
+ (i.e. an error is reported) is to:
+ -# Rollback transaction (call NdbTransaction::execute() with a special parameter)
+ -# Close transaction (call NdbTransaction::closeTransaction())
+ -# If the error was temporary, attempt to restart the transaction
+
+ Several errors can occur when a transaction contains multiple
+ operations which are simultaneously executed.
+ In this case the application has to go through all operations
+ and query their NdbError objects to find out what really happened.
+
+ It is also important to note that errors can occur even when a commit is
+ reported as successful. In order to handle such situations, the NDB API
+ provides an additional NdbTransaction::commitStatus() method to check the
+ transactions's commit status.
+
+******************************************************************************/
+
+/**
+ * @page ndbapi_simple.cpp ndbapi_simple.cpp
+ * @include ndbapi_simple.cpp
+ */
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
+ * @page ndbapi_async.cpp ndbapi_async.cpp
+ * @include ndbapi_async.cpp
+ */
+/**
+ * @page ndbapi_async1.cpp ndbapi_async1.cpp
+ * @include ndbapi_async1.cpp
+ */
+#endif
+
+/**
+ * @page ndbapi_retries.cpp ndbapi_retries.cpp
+ * @include ndbapi_retries.cpp
+ */
+
+/**
+ * @page ndbapi_simple_index.cpp ndbapi_simple_index.cpp
+ * @include ndbapi_simple_index.cpp
+ */
+
+/**
+ * @page ndbapi_scan.cpp ndbapi_scan.cpp
+ * @include ndbapi_scan.cpp
+ */
+/**
+ * @page ndbapi_event.cpp ndbapi_event.cpp
+ * @include ndbapi_event.cpp
+ */
+
+
+/**
+ @page secAdapt Adaptive Send Algorithm
+
+ At the time of "sending" a transaction
+ (using NdbTransaction::execute()), the transactions
+ are in reality <em>not</em> immediately transfered to the NDB Kernel.
+ Instead, the "sent" transactions are only kept in a
+ special send list (buffer) in the Ndb object to which they belong.
+ The adaptive send algorithm decides when transactions should
+ actually be transferred to the NDB kernel.
+
+ The NDB API is designed as a multi-threaded interface and so
+ it is often desirable to transfer database operations from more than
+ one thread at a time.
+ The NDB API keeps track of which Ndb objects are active in transferring
+ information to the NDB kernel and the expected amount of threads to
+ interact with the NDB kernel.
+ Note that a given instance of Ndb should be used in at most one thread;
+ different threads should <em>not</em> use the same Ndb object.
+
+ There are four conditions leading to the transfer of database
+ operations from Ndb object buffers to the NDB kernel:
+ -# The NDB Transporter (TCP/IP, OSE, SCI or shared memory)
+ decides that a buffer is full and sends it off.
+ The buffer size is implementation-dependent and
+ may change between MySQL Cluster releases.
+ On TCP/IP the buffer size is usually around 64 KB;
+ on OSE/Delta it is usually less than 2000 bytes.
+ Since each Ndb object provides a single buffer per storage node,
+ the notion of a "full" buffer is local to this storage node.
+ -# The accumulation of statistical data on transferred information
+ may force sending of buffers to all storage nodes.
+ -# Every 10 ms, a special transmission thread checks whether or not
+ any send activity has occurred. If not, then the thread will
+ force transmission to all nodes.
+ This means that 20 ms is the maximum time database operations
+ are kept waiting before being sent off. The 10-millisecond limit
+ is likely to become a configuration parameter in
+ future releases of MySQL Cluster; however, for checks that
+ are more frequent than each 10 ms,
+ additional support from the operating system is required.
+ -# For methods that are affected by the adaptive send alorithm
+ (such as NdbTransaction::execute()), there is a <var>force</var>
+ parameter
+ that overrides its default behaviour in this regard and forces
+ immediate transmission to all nodes. See the inidvidual NDB API class
+ listings for more information.
+
+ @note The conditions listed above are subject to change in future releases
+ of MySQL Cluster.
+*/
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
+
+ For each of these "sent" transactions, there are three
+ possible states:
+ -# Waiting to be transferred to NDB Kernel.
+ -# Has been transferred to the NDB Kernel and is currently
+ being processed.
+ -# Has been transferred to the NDB Kernel and has
+ finished processing.
+ Now it is waiting for a call to a poll method.
+ (When the poll method is invoked,
+ then the transaction callback method will be executed.)
+
+ The poll method invoked (either Ndb::pollNdb() or Ndb::sendPollNdb())
+ will return when:
+ -# at least 'minNoOfEventsToWakeup' of the transactions
+ in the send list have transitioned to state 3 as described above, and
+ -# all of these transactions have executed their callback methods.
+*/
+#endif
+
+/**
+ @page secConcepts MySQL Cluster Concepts
+
+ The <em>NDB Kernel</em> is the collection of storage nodes
+ belonging to a MySQL Cluster.
+ The application programmer can for most purposes view the
+ set of all storage nodes as a single entity.
+ Each storage node is made up of three main components:
+ - TC : The transaction co-ordinator
+ - ACC : Index storage component
+ - TUP : Data storage component
+
+ When an application program executes a transaction,
+ it connects to one transaction co-ordinator on one storage node.
+ Usually, the programmer does not need to specify which TC should be used,
+ but in some cases when performance is important, the programmer can
+ provide "hints" to use a certain TC.
+ (If the node with the desired transaction co-ordinator is down, then another TC will
+ automatically take over the work.)
+
+ Every storage node has an ACC and a TUP which store
+ the indexes and data portions of the database table fragment.
+ Even though one TC is responsible for the transaction,
+ several ACCs and TUPs on other storage nodes might be involved in the
+ execution of the transaction.
+
+
+ @section secNdbKernelConnection Selecting a Transaction Co-ordinator
+
+ The default method is to select the transaction co-ordinator (TC) determined to be
+ the "closest" storage node, using a heuristic for proximity based on
+ the type of transporter connection. In order of closest to most distant, these are
+ - SCI
+ - SHM
+ - TCP/IP (localhost)
+ - TCP/IP (remote host)
+ If there are several connections available with the same proximity, they will each be
+ selected in a round robin fashion for every transaction. Optionally
+ one may set the method for TC selection to round-robin mode, where each new set of
+ transactions is placed on the next DB node. The pool of connections from which this
+ selection is made consists of all available connections.
+
+ As noted previously, the application programmer can provide hints to the NDB API as to
+ which transaction co-ordinator it should use. This is done by
+ providing a <em>table</em> and <em>partition key</em>
+ (usually the primary key).
+ By using the primary key as the partition key,
+ the transaction will be placed on the node where the primary replica
+ of that record resides.
+ Note that this is only a hint; the system can be
+ reconfigured at any time, in which case the NDB API will choose a transaction
+ co-ordinator without using the hint.
+ For more information, see NdbDictionary::Column::getPartitionKey() and
+ Ndb::startTransaction(). The application programmer can specify
+ the partition key from SQL by using the construct,
+ <code>CREATE TABLE ... ENGINE=NDB PARTITION BY KEY (<var>attribute-list</var>);</code>.
+
+
+ @section secRecordStruct NDB Record Structure
+ The NDB Cluster engine used by MySQL Cluster is a relational database engine
+ storing records in tables just as with any other RDBMS.
+ Table rows represent records as tuples of relational data.
+ When a new table is created, its attribute schema is specified for the table as a whole,
+ and thus each record of the table has the same structure. Again, this is typical
+ of relational databases, and NDB is no different in this regard.
+
+
+ @subsection secKeys Primary Keys
+ Each record has from 1 up to 32 attributes which belong
+ to the primary key of the table.
+
+ @section secTrans Transactions
+
+ Transactions are committed first to main memory,
+ and then to disk after a global checkpoint (GCP) is issued.
+ Since all data is (in most NDB Cluster configurations)
+ synchronously replicated and stored on multiple NDB nodes,
+ the system can still handle processor failures without loss
+ of data.
+ However, in the case of a system failure (e.g. the whole system goes down),
+ then all (committed or not) transactions occurring since the latest GCP are lost.
+
+
+ @subsection secConcur Concurrency Control
+ NDB Cluster uses pessimistic concurrency control based on locking.
+ If a requested lock (implicit and depending on database operation)
+ cannot be attained within a specified time,
+ then a timeout error occurs.
+
+ Concurrent transactions as requested by parallel application programs and
+ thread-based applications can sometimes deadlock when they try to access
+ the same information simultaneously.
+ Thus, applications need to be written in a manner so that timeout errors
+ occurring due to such deadlocks are handled gracefully. This generally
+ means that the transaction encountering a timeout should be rolled back
+ and restarted.
+
+
+ @section secHint Hints and Performance
+
+ Placing the transaction co-ordinator in close proximity
+ to the actual data used in the transaction can in many cases
+ improve performance significantly. This is particularly true for
+ systems using TCP/IP. For example, a Solaris system using a single 500 MHz processor
+ has a cost model for TCP/IP communication which can be represented by the formula
+
+ <code>[30 microseconds] + ([100 nanoseconds] * [<var>number of bytes</var>])</code>
+
+ This means that if we can ensure that we use "popular" links we increase
+ buffering and thus drastically reduce the communication cost.
+ The same system using SCI has a different cost model:
+
+ <code>[5 microseconds] + ([10 nanoseconds] * [<var>number of bytes</var>])</code>
+
+ Thus, the efficiency of an SCI system is much less dependent on selection of
+ transaction co-ordinators.
+ Typically, TCP/IP systems spend 30-60% of their working time on communication,
+ whereas for SCI systems this figure is closer to 5-10%.
+ Thus, employing SCI for data transport means that less care from the NDB API
+ programmer is required and greater scalability can be achieved, even for
+ applications using data from many different parts of the database.
+
+ A simple example is an application that uses many simple updates where
+ a transaction needs to update one record.
+ This record has a 32 bit primary key,
+ which is also the partition key.
+ Then the keyData will be the address of the integer
+ of the primary key and keyLen will be 4.
+*/
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
+ (A transaction's execution can also be divided into three
+ steps: prepare, send, and poll. This allows us to perform asynchronous
+ transactions. More about this later.)
+*/
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
+ Another way to execute several parallel transactions is to use
+ asynchronous transactions.
+*/
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
+ Operations are of two different kinds:
+ -# standard operations, and
+ -# interpreted program operations.
+*/
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
<h3>Interpreted Program Operations</h3>
The following types of interpreted program operations exist:
-# NdbOperation::interpretedUpdateTuple :
updates a tuple using an interpreted program
-# NdbOperation::interpretedDeleteTuple :
delete a tuple using an interpreted program
- -# NdbOperation::openScanRead :
- scans a table with read lock on each tuple
- -# NdbOperation::openScanExclusive :
- scans a table with exclusive update lock on each tuple
The operations interpretedUpdateTuple and interpretedDeleteTuple both
work using the unique tuple key.
@@ -306,124 +761,10 @@
There might be zero NdbOperation::getValue calls.
-# The fifth step is possible subroutine definitions using
NdbOperation::def_subroutine and NdbOperation::ret_sub.
-
-
- @subsection secScan Scanning
- The most common use of interpreted programs is for scanning
- tables. Scanning is a search of all tuples in a table.
- Tuples which satisfy conditions (a search filter)
- stated in the interpreted program
- are sent to the application.
-
- Reasons for using scan transactions include
- need to use a search key different from the primary key
- and any secondary index.
- Or that the query needs to access so many tuples so that
- it is more efficient to scan the entire table.
-
- Scanning can also be used to update information.
- The scanning transaction itself is however
- not allowed to update any tuples.
- To do updates via scanning transactions, the tuples
- need to be handed over to another transaction which is
- executing the actual update.
-
- Even though a scan operation is part of a transaction,
- the scan transaction is not a normal transaction.
- The locks are <em>not</em> kept throughout the entire
- scan transaction, since this would imply non-optimal performance.
- <em>
- A transaction containing a scan operation can only
- contain that operation.
- No other operations are allowed in the same transaction.
- </em>
-
- The NdbOperation::openScanRead operation
- only sets a temporary read lock while
- reading the tuple.
- The tuple lock is released already when the
- result of the read reaches the application.
- The NdbOperation::openScanExclusive operation sets an
- exclusive lock on the tuple
- and sends the result to the application.
- Thus when the application reads the data it is still
- locked with the exclusive lock.
-
- If the application desires to update the tuple it may transfer
- the tuple to another transaction which updates the tuple.
- The updating transaction can consist of a combination of tuples
- received from the scan and normal operations.
-
- For transferred operations it is not necessary to provide the
- primary key. It is part of the transfer.
- You only need to give the operation type and the
- actions to perform on the tuple.
-
- The scan transaction starts like a usual transaction,
- but is of the following form:
- -# Start transaction
- -# Get NdbOperation for the table to be scanned
- -# Set the operation type using NdbOperation::openScanRead or
- NdbOperation::openScanExclusive
- -# Search conditions are defined by an interpreted program
- (setValue and write_attr are not allowed, since scan transactions
- are only allowed to read information).
- The instruction interpret_exit_nok does in this case
- not abort the transaction, it only skips the tuple and
- proceeds with the next.
- The skipped tuple will not be reported to the application.
- -# Call NdbConnection::executeScan to define (and start) the scan.
- -# Call NdbConnection::nextScanResult to proceed with next tuple.
- When calling NdbConnection::nextScanResult, the lock on any
- previous tuples are released.
- <br>
- If the tuple should be updated then it must be transferred over
- to another updating transaction.
- This is performed by calling
- NdbOperation::takeOverForUpdate or takeOverForDelete on
- the scanning transactions NdbOperation object with the updating
- transactions NdbConnection object as parameter.
- <p>
- If NdbOperation::takeOverFor* returns NULL then the
- operation was not successful, otherwise it returns a reference
- to the NdbOperation which the updating transaction has received
- -# Use Ndb::closeTransaction as usual to close the transaction.
- This can be performed even if there are more tuples to scan.
-
- See also example program in section @ref select_all.cpp.
-
- However, a new scan api is under development, using NdbScanOperation
- and NdbScanFilter. NdbScanFilter makes it easier to define a search
- criteria and is recommended instead of using Interpreted Programs.
-
- The scan transaction starts like a usual transaction,
- but is of the following form:
- -# Start transaction
- -# Get NdbScanOperation for the table to be scanned
- -# NdbScanOperation::readTuplesExclusive returns a handle to a
- NdbResultSet.
- -# Search conditions are defined by NdbScanFilter
- -# Call NdbConnection::execute(NoCommit) to start the scan.
- -# Call NdbResultSet::nextResult to proceed with next tuple.
- When calling NdbResultSet::nextResult(false), the lock on any
- previous tuples are released and the next tuple cached in the API
- is fetched.
- <br>
- If the tuple should be updated then define a new update operation
- (NdbOperation) using NdbResultSet::updateTuple().
- The new update operation can the be used to modify the tuple.
- When nextResult(false) returns != 0, then no more tuples
- are cached in the API. Updated tuples is now commit using
- NdbConnection::execute(Commit).
- After the commit, more tuples are fetched from NDB using
- nextResult(true).
- -# Use Ndb::closeTransaction as usual to close the transaction.
- This can be performed even if there are more tuples to scan.
-
- See the scan example program in @ref ndbapi_scan.cppn for example
- usage of the new scan api.
-
-
+*/
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
<h3>Interpreted Programs</h3>
Interpretation programs are executed in a
register-based virtual machine.
@@ -494,8 +835,11 @@
The parameter used by NdbOperation::def_subroutine
should match the automatic numbering to make it easier to
debug the interpreted program.
+*/
+#endif
-
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
@section secAsync Asynchronous Transactions
The asynchronous interface is used to increase the speed of
transaction executing by better utilizing the connection
@@ -508,7 +852,7 @@
chunks of data are sent when actually sending and thus decreasing
the operating system overhead.
- The synchronous call to NdbConnection::execute
+ The synchronous call to NdbTransaction::execute
normally performs three main steps:<br>
-# <b>Prepare</b>
Check transaction status
@@ -520,7 +864,7 @@
-# <b>Poll</b>
Wait for response from NDB kernel.
- The asynchronous method NdbConnection::executeAsynchPrepare
+ The asynchronous method NdbTransaction::executeAsynchPrepare
only perform step 1.
(The abort part in step 1 is only prepared for. The actual
aborting of the transaction is performed in a later step.)
@@ -531,11 +875,11 @@
synchronous transactions)
-# Add and define operations (also as in the synchronous case)
-# <b>Prepare</b> transactions
- (using NdbConnection::executeAsynchPrepare or
- NdbConnection::executeAsynch)
+ (using NdbTransaction::executeAsynchPrepare or
+ NdbTransaction::executeAsynch)
-# <b>Send</b> transactions to NDB Kernel
(using Ndb::sendPreparedTransactions,
- NdbConnection::executeAsynch, or Ndb::sendPollNdb)
+ NdbTransaction::executeAsynch, or Ndb::sendPollNdb)
-# <b>Poll</b> NDB kernel to find completed transactions
(using Ndb::pollNdb or Ndb::sendPollNdb)
-# Close transactions (same way as for the synchronous transactions)
@@ -546,24 +890,24 @@
- (Prepare-Send-Poll). This is the one-step variant provided
by synchronous transactions.
- (Prepare-Send)-Poll. This is the two-step variant using
- NdbConnection::executeAsynch and Ndb::pollNdb.
+ NdbTransaction::executeAsynch and Ndb::pollNdb.
- Prepare-(Send-Poll). This is the two-step variant using
- NdbConnection::executeAsynchPrepare and Ndb::sendPollNdb.
+ NdbTransaction::executeAsynchPrepare and Ndb::sendPollNdb.
- Prepare-Send-Poll. This is the three-step variant using
- NdbConnection::executeAsynchPrepare, Ndb::sendPreparedTransactions, and
+ NdbTransaction::executeAsynchPrepare, Ndb::sendPreparedTransactions, and
Ndb::pollNdb.
Transactions first has to be prepared by using method
- NdbConnection::executeAsynchPrepare or NdbConnection::executeAsynch.
+ NdbTransaction::executeAsynchPrepare or NdbTransaction::executeAsynch.
The difference between these is that
- NdbConnection::executeAsynch also sends the transaction to
+ NdbTransaction::executeAsynch also sends the transaction to
the NDB kernel.
One of the arguments to these methods is a callback method.
The callback method is executed during polling (item 5 above).
- Note that NdbConnection::executeAsynchPrepare does not
+ Note that NdbTransaction::executeAsynchPrepare does not
send the transaction to the NDB kernel. When using
- NdbConnection::executeAsynchPrepare, you either have to call
+ NdbTransaction::executeAsynchPrepare, you either have to call
Ndb::sendPreparedTransactions or Ndb::sendPollNdb to send the
database operations.
(Ndb::sendPollNdb also polls Ndb for completed transactions.)
@@ -583,276 +927,34 @@
The poll method returns the number of transactions that
have finished processing and executed their callback methods.
-
+
@note When an asynchronous transaction has been started and sent to
the NDB kernel, it is not allowed to execute any methods on
objects belonging to this transaction until the transaction
callback method have been executed.
(The transaction is stated and sent by either
- NdbConnection::executeAsynch or through the combination of
- NdbConnection::executeAsynchPrepare and either
+ NdbTransaction::executeAsynch or through the combination of
+ NdbTransaction::executeAsynchPrepare and either
Ndb::sendPreparedTransactions or Ndb::sendPollNdb).
- More about how transactions are send the NDB Kernel is
+ More about how transactions are sent the NDB Kernel is
available in section @ref secAdapt.
-
-
- @section secError Error Handling
-
- Errors can occur when
- -# operations are being defined, or when the
- -# transaction is being executed.
-
- One recommended way to handle a transaction failure
- (i.e. an error is reported) is to:
- -# Rollback transaction (NdbConnection::execute with a special parameter)
- -# Close transaction
- -# Restart transaction (if the error was temporary)
-
- @note Transaction are not automatically closed when an error occur.
-
- Several errors can occur when a transaction holds multiple
- operations which are simultaneously executed.
- In this case the application has to go through the operation
- objects and query for their NdbError objects to find out what really
- happened.
-
- NdbConnection::getNdbErrorOperation returns a reference to the
- operation causing the latest error.
- NdbConnection::getNdbErrorLine delivers the method number of the
- erroneous method in the operation.
-
- @code
- theConnection = theNdb->startTransaction();
- theOperation = theConnection->getNdbOperation("TEST_TABLE");
- if (theOperation == NULL) goto error;
- theOperation->readTuple();
- theOperation->setValue("ATTR_1", at1);
- theOperation->setValue("ATTR_2", at1); //Here an error occurs
- theOperation->setValue("ATTR_3", at1);
- theOperation->setValue("ATTR_4", at1);
-
- if (theConnection->execute(Commit) == -1) {
- errorLine = theConnection->getNdbErrorLine();
- errorOperation = theConnection->getNdbErrorOperation();
- @endcode
-
- Here errorLine will be 3 as the error occurred in the third method
- on the operation object.
- Getting errorLine == 0 means that the error occurred when executing the
- operations.
- Here errorOperation will be a pointer to the theOperation object.
- NdbConnection::getNdbError will return the NdbError object
- including holding information about the error.
-
- Since errors could have occurred even when a commit was reported,
- there is also a special method, NdbConnection::commitStatus,
- to check the commit status of the transaction.
-
-*******************************************************************************/
-
-/**
- * @page ndbapi_example1.cpp ndbapi_example1.cpp
- * @include ndbapi_example1.cpp
- */
-
-/**
- * @page ndbapi_example2.cpp ndbapi_example2.cpp
- * @include ndbapi_example2.cpp
- */
-
-/**
- * @page ndbapi_example3.cpp ndbapi_example3.cpp
- * @include ndbapi_example3.cpp
- */
-
-/**
- * @page ndbapi_example4.cpp ndbapi_example4.cpp
- * @include ndbapi_example4.cpp
- */
-
-/**
- * @page select_all.cpp select_all.cpp
- * @include select_all.cpp
- */
-
-/**
- * @page ndbapi_async.cpp ndbapi_async.cpp
- * @include ndbapi_async.cpp
- */
-
-/**
- * @page ndbapi_scan.cpp ndbapi_scan.cpp
- * @include ndbapi_scan.cpp
- */
-
-
-/**
- @page secAdapt Adaptive Send Algorithm
-
- At the time of "sending" the transaction
- (using NdbConnection::execute, NdbConnection::executeAsynch,
- Ndb::sendPreparedTransactions, or Ndb::sendPollNdb), the transactions
- are in reality <em>not</em> immediately transfered to the NDB Kernel.
- Instead, the "sent" transactions are only kept in a
- special send list (buffer) in the Ndb object to which they belong.
- The adaptive send algorithm decides when transactions should
- be transfered to the NDB kernel.
-
- For each of these "sent" transactions, there are three
- possible states:
- -# Waiting to be transfered to NDB Kernel.
- -# Has been transfered to the NDB Kernel and is currently
- being processed.
- -# Has been transfered to the NDB Kernel and has
- finished processing.
- Now it is waiting for a call to a poll method.
- (When the poll method is invoked,
- then the transaction callback method will be executed.)
-
- The poll method invoked (either Ndb::pollNdb or Ndb::sendPollNdb)
- will return when:
- -# at least 'minNoOfEventsToWakeup' of the transactions
- in the send list have transitioned to state 3 as described above, and
- -# all of these transactions have executed their callback methods.
-
-
- Since the NDB API is designed as a multi-threaded interface,
- it is desirable to transfer database operations from more than
- one thread at a time.
- The NDB API keeps track of which Ndb objects are active in transfering
- information to the NDB kernel and the expected amount of threads to
- interact with the NDB kernel.
- Note that an Ndb object should be used in at most one thread.
- Two different threads should <em>not</em> use the same Ndb object.
-
- There are four reasons leading to transfering of database
- operations:
- -# The NDB Transporter (TCP/IP, OSE, SCI or shared memory)
- decides that a buffer is full and sends it off.
- The buffer size is implementation dependent and
- might change between NDB Cluster releases.
- On TCP/IP the buffer size is usually around 64 kByte and
- on OSE/Delta it is usually less than 2000 bytes.
- In each Ndb object there is one buffer per DB node,
- so this criteria of a full buffer is only
- local to the connection to one DB node.
- -# Statistical information on the transfered information
- may force sending of buffers to all DB nodes.
- -# Every 10 ms a special send-thread checks whether
- any send activity has occurred. If not, then the thread will
- force sending to all nodes.
- This means that 20 ms is the maximum time database operations
- are waiting before being sent off. The 10 millisecond limit
- is likely to become a configuration parameter in
- later releases of NDB Cluster.
- However, to support faster than 10 ms checks,
- there has to be support from the operating system.
- -# When calling NdbConnection::execute synchronously or calling any
- of the poll-methods, there is a force parameter that overrides the
- adaptive algorithm and forces the send to all nodes.
-
- @note The times mentioned above are examples. These might
- change in later releases of NDB Cluster.
*/
-
-/**
- @page secConcepts NDB Cluster Concepts
-
- The <em>NDB Kernel</em> is the collection of database (DB) nodes
- belonging to an NDB Cluster.
- The application programmer can for most purposes view the
- set of all DB nodes as one entity.
- Each DB node has three main components:
- - TC : The transaction coordinator
- - ACC : The index storage
- - TUP : The data storage
-
- When the application program executes a transaction,
- it connects to one TC on one DB node.
- Usually, the programmer does not need to specify which TC to use,
- but some cases when performance is important,
- transactions can be hinted to use a certain TC.
- (If the node with the TC is down, then another TC will
- automatically take over the work.)
-
- Every DB node has an ACC and a TUP which stores
- the index and the data part of the database.
- Even though one TC is responsible for the transaction,
- several ACCs and TUPs on other DB nodes might be involved in the
- execution of the transaction.
-
-
- @section secNdbKernelConnection Selecting Transaction Coordinator
- The default method is round robin,
- where each new set of transactions
- is placed on the next DB node.
- The application chooses a TC for a number of transactions
- and then lets the next TC (on the next DB node) carry out
- the next set of transactions.
-
- The application programmer can however hint the NDB API which
- transaction coordinator to use
- by providing a <em>distribution key</em> (usually the primary key).
- By using the primary key as distribution key,
- the transaction will be placed on the node where the primary replica
- of that record resides.
- Note that this is only a hint, the system can be
- reconfigured and then the NDB API will choose a transaction
- coordinator without using the hint.
- For more information, see NdbDictionary::Column::setDistributionKey.
+#endif
- @section secRecordStruct Record Structure
- NDB Cluster is a relational database with tables of records.
- Table rows represent tuples of relational data stored as records.
- When created, the attribute schema of the table is specified,
- and thus each record of the table has the same schema.
+/**
-
- @subsection secKeys Tuple Keys
- Each record has from zero up to four attributes which belong
- to the primary key of the table.
- If no attribute belongs to the primary key, then
- the NDB Cluster creates an attribute named <em>NDB$TID</em>
- which stores a tuple identity.
- The <em>tuple key</em> of a table is thus either
- the primary key attributes or the special NDB$TID attribute.
-
+ Put this back when real array ops are supported
+ i.e. get/setValue("kalle[3]");
@subsection secArrays Array Attributes
- A table attribute in NDB Cluster can be of <em>array type</em>.
- This means that the attribute consists of an array of
- <em>elements</em>. The <em>attribute size</em> is the size
- of one element of the array (expressed in bits) and the
- <em>array size</em> is the number of elements of the array.
-
-
- @section secTrans Transactions
+ A table attribute in NDB Cluster can be of type <var>Array</var>,
+ meaning that the attribute consists of an ordered sequence of
+ elements. In such cases, <var>attribute size</var> is the size
+ (expressed in bits) of any one element making up the array; the
+ <var>array size</var> is the number of elements in the array.
- Transactions are committed to main memory,
- and are committed to disk after a global checkpoint, GCP.
- Since all data is (in most NDB Cluster configurations)
- synchronously replicated and stored on multiple NDB nodes,
- the system can still handle processor failures without loss
- of data.
- However, in the case of a system failure (e.g. the whole system goes down),
- then all (committed or not) transactions after the latest GCP are lost.
-
-
- @subsection secConcur Concurrency Control
- NDB Cluster uses pessimistic concurrency control based on locking.
- If a requested lock (implicit and depending on database operation)
- cannot be attained within a specified time,
- then a timeout error occurs.
-
- Concurrent transactions (parallel application programs, thread-based
- applications, or applications with asynchronous transactions)
- sometimes deadlock when they try to access the same information.
- Applications need to be programmed so that timeout errors
- occurring due to deadlocks are handled. This generally
- means that the transaction encountering timeout
- should be rolled back and restarted.
*/
#ifndef Ndb_H
@@ -870,7 +972,7 @@ class NdbEventOperationImpl;
class NdbScanOperation;
class NdbIndexScanOperation;
class NdbIndexOperation;
-class NdbConnection;
+class NdbTransaction;
class NdbApiSignal;
class NdbRecAttr;
class NdbLabel;
@@ -897,10 +999,6 @@ typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*);
#define WAITFOR_RESPONSE_TIMEOUT 120000 // Milliseconds
#endif
-#define NDB_MAX_INTERNAL_TABLE_LENGTH NDB_MAX_DATABASE_NAME_SIZE + \
- NDB_MAX_SCHEMA_NAME_SIZE + \
- NDB_MAX_TAB_NAME_SIZE*2
-
/**
* @class Ndb
* @brief Represents the NDB kernel and is the main class of the NDB API.
@@ -928,6 +1026,11 @@ typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*);
* Semaphores, mutexes and so forth are easy ways of issuing memory
* barriers without having to bother about the memory barrier concept.
*
+ */
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+// to be documented later
+/*
* If one Ndb object is used to handle parallel transactions through the
* asynchronous programming interface, please read the notes regarding
* asynchronous transactions (Section @ref secAsync).
@@ -938,12 +1041,15 @@ typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*);
* asynchronous transaction or the methods for
* synchronous transactions but not both.
*/
+#endif
+
class Ndb
{
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbReceiver;
friend class NdbOperation;
friend class NdbEventOperationImpl;
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class Table;
friend class NdbApiSignal;
friend class NdbIndexOperation;
@@ -952,6 +1058,7 @@ class Ndb
friend class NdbDictionaryImpl;
friend class NdbDictInterface;
friend class NdbBlob;
+#endif
public:
/**
@@ -959,29 +1066,26 @@ public:
* @{
*/
/**
- * The starting point of your application code is to create an
- * Ndb object.
- * This object represents the NDB kernel and is the main
- * object used in interaction with the NDB kernel.
+ * The Ndb object represents a connection to a database.
*
- * @param aCatalogName is the name of the catalog you want to use.
- * @note The catalog name provides a name space for the tables and
+ * @note The init() method must be called before the Ndb object may actually be used.
+ *
+ * @param ndb_cluster_connection is a connection to the cluster containing
+ * the database to be used
+ * @param aCatalogName is the name of the catalog to be used.
+ * @note The catalog name provides a namespace for the tables and
* indexes created in any connection from the Ndb object.
* @param aSchemaName is the name of the schema you
- * want to use. It is optional and defaults to the "def" schema.
- * @note The schema name provides an additional name space
+ * want to use.
+ * @note The schema name provides an additional namespace
* for the tables and indexes created in a given catalog.
- * @note The methods get/setDatabaseName and get/setDatabaseSchemaName
- * are equivalent to get/setCatalogName and get/setSchemaName.
- * The get/setDatabaseName and get/setDatabaseSchemaName are
- * deprecated.
*/
- Ndb(const char* aCatalogName = "", const char* aSchemaName = "def");
Ndb(Ndb_cluster_connection *ndb_cluster_connection,
const char* aCatalogName = "", const char* aSchemaName = "def");
~Ndb();
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* The current catalog name can be fetched by getCatalogName.
*
@@ -1009,7 +1113,7 @@ public:
* @param aSchemaName is the new name of the current schema
*/
void setSchemaName(const char * aSchemaName);
-
+#endif
/**
* The current database name can be fetched by getDatabaseName.
@@ -1040,22 +1144,21 @@ public:
void setDatabaseSchemaName(const char * aDatabaseSchemaName);
/**
- * Before anything else it is necessary to initialize (start)
- * the Ndb object.
+ * Initializes the Ndb object
*
* @param maxNoOfTransactions
* Maximum number of parallel
- * NdbConnection objects that should be handled by the Ndb object.
- * A value larger than 1024 will be downgraded to 1024.
- * This means that one Ndb object can handle at most 1024 parallel
- * transactions.
- * @return 0 if successful, -1 otherwise.
+ * NdbTransaction objects that can be handled by the Ndb object.
+ * Maximum value is 1024.
+ *
+ * @note each scan or index scan operation uses one extra
+ * NdbTransaction object
*
- * @note The internal implementation multiplies this value
- * with 3.
+ * @return 0 if successful, -1 otherwise.
*/
int init(int maxNoOfTransactions = 4);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Wait for Ndb object to successfully set-up connections to
* the NDB kernel.
@@ -1068,8 +1171,8 @@ public:
* @return 0: Ndb is ready and timeout has not occurred.<br>
* -1: Timeout has expired
*/
-
int waitUntilReady(int timeout = 60);
+#endif
/** @} *********************************************************************/
@@ -1079,30 +1182,55 @@ public:
*/
/**
- * Query the database for schema information
- * (without performing any transaction).
+ * Get an object for retrieving or manipulating database schema information
+ *
+ * @note this object operates outside any transaction
*
* @return Object containing meta information about all tables
* in NDB Cluster.
*/
class NdbDictionary::Dictionary* getDictionary() const;
+
+ /** @} *********************************************************************/
+
+ /**
+ * @name Event subscriptions
+ * @{
+ */
+
+ /**
+ * Create a subcription to an event defined in the database
+ *
+ * @param eventName
+ * unique identifier of the event
+ * @param bufferLength
+ * circular buffer size for storing event data
+ *
+ * @return Object representing an event, NULL on failure
+ */
NdbEventOperation* createEventOperation(const char* eventName,
const int bufferLength);
- int dropEventOperation(NdbEventOperation*);
- void monitorEvent(NdbEventOperation *, NdbEventCallback, void*);
- int pollEvents(int aMillisecondNumber);
+ /**
+ * Drop a subscription to an event
+ *
+ * @param eventOp
+ * Event operation
+ *
+ * @return 0 on success
+ */
+ int dropEventOperation(NdbEventOperation* eventOp);
/**
- * Get the application node identity.
+ * Wait for an event to occur. Will return as soon as an event
+ * is detected on any of the created events.
*
- * Each node (DB nodes, Applications, and Management Servers)
- * has its own node identity in the NDB Cluster.
- * See documentation for the management server configuration file.
+ * @param aMillisecondNumber
+ * maximum time to wait
*
- * @return Node id of this application.
+ * @return the number of events that has occured, -1 on failure
*/
- int getNodeId();
+ int pollEvents(int aMillisecondNumber);
/** @} *********************************************************************/
@@ -1112,79 +1240,33 @@ public:
*/
/**
- * This method returns an NdbConnection which caters for the transaction.
- * When the transaction is completed it must be closed.
- * The Ndb::closeTransaction also return the NdbConnection object
- * and all other memory related to the transaction.
- * Failure to close the transaction will lead to memory leakage.
- * The transaction must be closed independent of its outcome, i.e.
- * even if there is an error.
- *
- * NDB API can be hinted to select a particular transaction coordinator.
- * The default method is round robin where each set of new transactions
- * is placed on the next NDB kernel node.
- * By providing a distribution key (usually the primary key
- * of the mostly used table of the transaction) for a record
- * the transaction will be placed on the node where the primary replica
- * of that record resides.
- * Note that this is only a hint, the system can
- * be under reconfiguration and then the NDB API
- * will use select the transaction coordinator without using
- * this hint.
- *
- * Placing the transaction coordinator close
- * to the actual data used in the transaction can in many cases
- * improve performance significantly. This is particularly true for
- * systems using TCP/IP. A system using Solaris and a 500 MHz processor
- * has a cost model for TCP/IP communication which is:
- *
- * 30 microseconds + (100 nanoseconds * no of Bytes)
- *
- * This means that if we can ensure that we use "popular" links we increase
- * buffering and thus drastically reduce the communication cost.
- * Systems using SCI has a different cost model which is:
+ * Start a transaction
*
- * 5 microseconds + (10 nanoseconds * no of Bytes)
+ * @note When the transaction is completed it must be closed using
+ * Ndb::closeTransaction or NdbTransaction::close.
+ * The transaction must be closed independent of its outcome, i.e.
+ * even if there is an error.
*
- * Thus SCI systems are much less dependent on selection of
- * transaction coordinators.
- * Typically TCP/IP systems spend 30-60% of the time during communication,
- * whereas SCI systems typically spend 5-10% of the time during
- * communication.
- * Thus SCI means that less care from the NDB API programmer is
- * needed and great scalability can be achieved even for applications using
- * data from many parts of the database.
- *
- * A simple example is an application that uses many simple updates where
- * a transaction needs to update one record.
- * This record has a 32 bit primary key,
- * which is also the distribution key.
- * Then the keyData will be the address of the integer
- * of the primary key and keyLen will be 4.
- *
- * @note Transaction priorities are not yet supported.
- *
- * @param prio The priority of the transaction.<br>
- * Priority 0 is the highest priority and is used
- * for short transactions with requirements on low delay.<br>
- * Priority 1 is a medium priority for short transactions.
- * <br>
- * Priority 2 is a medium priority for long transactions.<br>
- * Priority 3 is a low priority for long transactions.<br>
- * <em>This parameter is not currently used,
- * and can be set to any value</em>
- * @param keyData Pointer to distribution key
- * @param keyLen Length of distribution key expressed in bytes
+ * @param table Pointer to table object used for deciding
+ * which node to run the Transaction Coordinator on
+ * @param keyData Pointer to partition key corresponding to
+ * <var>table</var>
+ * @param keyLen Length of partition key expressed in bytes
*
- * @return NdbConnection object, or NULL if method failed.
+ * @return NdbTransaction object, or NULL on failure.
*/
- NdbConnection* startTransaction(Uint32 prio = 0,
- const char * keyData = 0,
- Uint32 keyLen = 0);
+ NdbTransaction* startTransaction(const NdbDictionary::Table *table= 0,
+ const char *keyData = 0,
+ Uint32 keyLen = 0);
/**
- * When a transactions is completed, the transaction has to be closed.
+ * Close a transaction.
*
+ * @note should be called after the transaction has completed, irrespective
+ * of success or failure
+ */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
* @note It is not allowed to call Ndb::closeTransaction after sending the
* transaction asynchronously with either
* Ndb::sendPreparedTransactions or
@@ -1194,11 +1276,13 @@ public:
* has completed before calling Ndb::closeTransaction).
* If the transaction is not committed it will be aborted.
*/
- void closeTransaction(NdbConnection* aConnection);
-
+#endif
+ void closeTransaction(NdbTransaction*);
/** @} *********************************************************************/
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ // to be documented later
/**
* @name Asynchronous Transactions
* @{
@@ -1209,11 +1293,10 @@ public:
* Will return as soon as at least 'minNoOfEventsToWakeUp'
* of them have completed, or the maximum time given as timeout has passed.
*
- * @param aMillisecondNumber Maximum time to wait for transactions
- * to complete.
- * Polling without wait is achieved by setting the
- * timer to zero.
- * Time is expressed in milliseconds.
+ * @param aMillisecondNumber
+ * Maximum time to wait for transactions to complete. Polling
+ * without wait is achieved by setting the timer to zero.
+ * Time is expressed in milliseconds.
* @param minNoOfEventsToWakeup Minimum number of transactions
* which has to wake up before the poll-call will return.
* If minNoOfEventsToWakeup is
@@ -1275,9 +1358,9 @@ public:
int sendPollNdb(int aMillisecondNumber = WAITFOR_RESPONSE_TIMEOUT,
int minNoOfEventsToWakeup = 1,
int forceSend = 0);
-
/** @} *********************************************************************/
-
+#endif
+
/**
* @name Error Handling
* @{
@@ -1286,7 +1369,7 @@ public:
/**
* Get the NdbError object
*
- * The NdbError object is valid until you call a new NDB API method.
+ * @note The NdbError object is valid until a new NDB API method is called.
*/
const NdbError & getNdbError() const;
@@ -1298,37 +1381,29 @@ public:
const NdbError & getNdbError(int errorCode);
+ /** @} *********************************************************************/
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
- * setConnectString
- * @param connectString - the connectString has the following format:
- * @code
- * "nodeid=<ID>;host=host://<HOSTNAME>:<PORT>;
- * host=host://<HOSTNAME2>:<PORT>;..."
- * @endcode
- * or
- * @code
- * "nodeid=<ID>;host=<HOSTNAME>:<PORT>;host=<HOSTNAME2>:<PORT>;..."
- * @endcode
+ * Get the application node identity.
+ *
+ * @return Node id of this application.
*/
- static void setConnectString(const char * connectString);
+ int getNodeId();
bool usingFullyQualifiedNames();
- /** @} *********************************************************************/
-
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
-
/**
* Different types of tampering with the NDB Cluster.
* <b>Only for debugging purposes only.</b>
*/
enum TamperType {
- LockGlbChp = 1, ///< Lock GCP
- UnlockGlbChp, ///< Unlock GCP
- CrashNode, ///< Crash an NDB node
- ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster
- InsertError ///< Execute an error in NDB Cluster
- ///< (may crash system)
+ LockGlbChp = 1, ///< Lock GCP
+ UnlockGlbChp, ///< Unlock GCP
+ CrashNode, ///< Crash an NDB node
+ ReadRestartGCI, ///< Request the restart GCI id from NDB Cluster
+ InsertError ///< Execute an error in NDB Cluster
+ ///< (may crash system)
};
/**
@@ -1347,9 +1422,7 @@ public:
* on type of tampering.
*/
int NdbTamper(TamperType aAction, int aNode);
-#endif
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Return a unique tuple id for a table. The id sequence is
* ascending but may contain gaps.
@@ -1379,12 +1452,10 @@ public:
bool increase);
bool setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase);
Uint64 opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op);
-#endif
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
*/
- NdbConnection* hupp( NdbConnection* );
+ NdbTransaction* hupp( NdbTransaction* );
Uint32 getReference() const { return theMyRef;}
struct Free_list_usage
@@ -1411,11 +1482,11 @@ private:
void connected(Uint32 block_reference);
- NdbConnection* startTransactionLocal(Uint32 aPrio, Uint32 aFragmentId);
+ NdbTransaction* startTransactionLocal(Uint32 aPrio, Uint32 aFragmentId);
// Connect the connection object to the Database.
int NDB_connect(Uint32 tNode);
- NdbConnection* doConnect(Uint32 nodeId);
+ NdbTransaction* doConnect(Uint32 nodeId);
void doDisconnect();
NdbReceiver* getNdbScanRec();// Get a NdbScanReceiver from idle list
@@ -1447,8 +1518,8 @@ private:
void check_send_timeout();
void remove_sent_list(Uint32);
- Uint32 insert_completed_list(NdbConnection*);
- Uint32 insert_sent_list(NdbConnection*);
+ Uint32 insert_completed_list(NdbTransaction*);
+ Uint32 insert_sent_list(NdbTransaction*);
// Handle a received signal. Used by both
// synchronous and asynchronous interface
@@ -1488,20 +1559,20 @@ private:
void freeNdbScanRec(); // Free the first idle NdbScanRec obj
void freeNdbBlob(); // Free the first etc
- NdbConnection* getNdbCon(); // Get a connection from idle list
+ NdbTransaction* getNdbCon(); // Get a connection from idle list
/**
- * Get a connected NdbConnection to nodeId
+ * Get a connected NdbTransaction to nodeId
* Returns NULL if none found
*/
- NdbConnection* getConnectedNdbConnection(Uint32 nodeId);
+ NdbTransaction* getConnectedNdbTransaction(Uint32 nodeId);
// Release and disconnect from DBTC a connection
// and seize it to theConIdleList
- void releaseConnectToNdb (NdbConnection* aConnectConnection);
+ void releaseConnectToNdb (NdbTransaction*);
// Release a connection to idle list
- void releaseNdbCon (NdbConnection* aConnection);
+ void releaseNdbCon (NdbTransaction*);
int checkInitState(); // Check that we are initialized
void report_node_failure(Uint32 node_id); // Report Failed node
@@ -1511,28 +1582,30 @@ private:
int NDB_connect(); // Perform connect towards NDB Kernel
- // Release arrays of NdbConnection pointers
+ // Release arrays of NdbTransaction pointers
void releaseTransactionArrays();
- Uint32 pollCompleted(NdbConnection** aCopyArray);
+ Uint32 pollCompleted(NdbTransaction** aCopyArray);
void sendPrepTrans(int forceSend);
- void reportCallback(NdbConnection** aCopyArray, Uint32 aNoOfComplTrans);
+ void reportCallback(NdbTransaction** aCopyArray, Uint32 aNoOfComplTrans);
void waitCompletedTransactions(int milliSecs, int noOfEventsToWaitFor);
- void completedTransaction(NdbConnection* aTransaction);
- void completedScanTransaction(NdbConnection* aTransaction);
+ void completedTransaction(NdbTransaction* aTransaction);
+ void completedScanTransaction(NdbTransaction* aTransaction);
void abortTransactionsAfterNodeFailure(Uint16 aNodeId);
static
- const char * externalizeTableName(const char * internalTableName, bool fullyQualifiedNames);
+ const char * externalizeTableName(const char * internalTableName,
+ bool fullyQualifiedNames);
const char * externalizeTableName(const char * internalTableName);
- const char * internalizeTableName(const char * externalTableName);
+ const BaseString internalize_table_name(const char * external_name) const;
static
- const char * externalizeIndexName(const char * internalIndexName, bool fullyQualifiedNames);
+ const char * externalizeIndexName(const char * internalIndexName,
+ bool fullyQualifiedNames);
const char * externalizeIndexName(const char * internalIndexName);
- const char * internalizeIndexName(const NdbTableImpl * table,
- const char * externalIndexName);
+ const BaseString internalize_index_name(const NdbTableImpl * table,
+ const char * external_name) const;
static
const BaseString getDatabaseFromInternalName(const char * internalName);
@@ -1541,21 +1614,21 @@ private:
void* int2void (Uint32 val);
NdbReceiver* void2rec (void* val);
- NdbConnection* void2con (void* val);
+ NdbTransaction* void2con (void* val);
NdbOperation* void2rec_op (void* val);
NdbIndexOperation* void2rec_iop (void* val);
/******************************************************************************
* These are the private variables in this class.
*****************************************************************************/
- NdbConnection** thePreparedTransactionsArray;
- NdbConnection** theSentTransactionsArray;
- NdbConnection** theCompletedTransactionsArray;
+ NdbTransaction** thePreparedTransactionsArray;
+ NdbTransaction** theSentTransactionsArray;
+ NdbTransaction** theCompletedTransactionsArray;
Uint32 theNoOfPreparedTransactions;
Uint32 theNoOfSentTransactions;
Uint32 theNoOfCompletedTransactions;
- Uint32 theNoOfAllocatedTransactions;
+ Uint32 theRemainingStartTransactions;
Uint32 theMaxNoOfTransactions;
Uint32 theMinNoOfEventsToWakeUp;
@@ -1563,19 +1636,14 @@ private:
bool fullyQualifiedNames;
- // Ndb database name.
- char theDataBase[NDB_MAX_DATABASE_NAME_SIZE];
- // Ndb database schema name.
- char theDataBaseSchema[NDB_MAX_SCHEMA_NAME_SIZE];
- char prefixName[NDB_MAX_INTERNAL_TABLE_LENGTH];
- char * prefixEnd;
+
class NdbImpl * theImpl;
class NdbDictionaryImpl* theDictionary;
class NdbGlobalEventBufferHandle* theGlobalEventBufferHandle;
- NdbConnection* theTransactionList;
- NdbConnection** theConnectionArray;
+ NdbTransaction* theTransactionList;
+ NdbTransaction** theConnectionArray;
Uint32 theMyRef; // My block reference
Uint32 theNode; // The node number of our node
diff --git a/ndb/include/ndbapi/NdbApi.hpp b/ndb/include/ndbapi/NdbApi.hpp
index ae7025f560a..aed4d5efbd7 100644
--- a/ndb/include/ndbapi/NdbApi.hpp
+++ b/ndb/include/ndbapi/NdbApi.hpp
@@ -21,14 +21,13 @@
#include "ndb_cluster_connection.hpp"
#include "ndbapi_limits.h"
#include "Ndb.hpp"
-#include "NdbConnection.hpp"
+#include "NdbTransaction.hpp"
#include "NdbOperation.hpp"
#include "NdbScanOperation.hpp"
#include "NdbIndexOperation.hpp"
#include "NdbIndexScanOperation.hpp"
#include "NdbScanFilter.hpp"
#include "NdbRecAttr.hpp"
-#include "NdbResultSet.hpp"
#include "NdbDictionary.hpp"
#include "NdbEventOperation.hpp"
#include "NdbPool.hpp"
diff --git a/ndb/include/ndbapi/NdbBlob.hpp b/ndb/include/ndbapi/NdbBlob.hpp
index a04f4f72bc9..cb0caafe34f 100644
--- a/ndb/include/ndbapi/NdbBlob.hpp
+++ b/ndb/include/ndbapi/NdbBlob.hpp
@@ -19,11 +19,11 @@
#include <ndb_types.h>
#include <NdbDictionary.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <NdbError.hpp>
class Ndb;
-class NdbConnection;
+class NdbTransaction;
class NdbOperation;
class NdbRecAttr;
class NdbTableImpl;
@@ -67,7 +67,7 @@ class NdbColumnImpl;
* cases NdbBlob is forced to do implicit executes. To avoid this,
* operate on complete blob parts.
*
- * Use NdbConnection::executePendingBlobOps to flush your reads and
+ * Use NdbTransaction::executePendingBlobOps to flush your reads and
* writes. It avoids execute penalty if nothing is pending. It is not
* needed after execute (obviously) or after next scan result.
*
@@ -88,8 +88,13 @@ class NdbColumnImpl;
* - lock mode vs allowed operation is not checked
* - too many pending blob ops can blow up i/o buffers
* - table and its blob part tables are not created atomically
+ */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+/**
* - there is no support for an asynchronous interface
*/
+#endif
+
class NdbBlob {
public:
/**
@@ -102,6 +107,9 @@ public:
Closed = 3,
Invalid = 9
};
+ /**
+ * Get the state of a NdbBlob object.
+ */
State getState();
/**
* Inline blob header.
@@ -111,7 +119,7 @@ public:
};
/**
* Prepare to read blob value. The value is available after execute.
- * Use getNull to check for NULL and getLength to get the real length
+ * Use getNull() to check for NULL and getLength() to get the real length
* and to check for truncation. Sets current read/write position to
* after the data read.
*/
@@ -124,10 +132,10 @@ public:
*/
int setValue(const void* data, Uint32 bytes);
/**
- * Callback for setActiveHook. Invoked immediately when the prepared
- * operation has been executed (but not committed). Any getValue or
- * setValue is done first. The blob handle is active so readData or
- * writeData etc can be used to manipulate blob value. A user-defined
+ * Callback for setActiveHook(). Invoked immediately when the prepared
+ * operation has been executed (but not committed). Any getValue() or
+ * setValue() is done first. The blob handle is active so readData or
+ * writeData() etc can be used to manipulate blob value. A user-defined
* argument is passed along. Returns non-zero on error.
*/
typedef int ActiveHook(NdbBlob* me, void* arg);
@@ -190,19 +198,26 @@ public:
const NdbError& getNdbError() const;
/**
* Return info about all blobs in this operation.
+ *
+ * Get first blob in list.
*/
- // Get first blob in list
NdbBlob* blobsFirstBlob();
- // Get next blob in list after this one
+ /**
+ * Return info about all blobs in this operation.
+ *
+ * Get next blob in list. Initialize with blobsFirstBlob().
+ */
NdbBlob* blobsNextBlob();
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class NdbOperation;
friend class NdbScanOperation;
friend class NdbDictionaryImpl;
friend class NdbResultSet; // atNextResult
+#endif
// state
State theState;
void setState(State newState);
@@ -211,7 +226,7 @@ private:
static void getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnImpl* c);
// ndb api stuff
Ndb* theNdb;
- NdbConnection* theNdbCon;
+ NdbTransaction* theNdbCon;
NdbOperation* theNdbOp;
const NdbTableImpl* theTable;
const NdbTableImpl* theAccessTable;
@@ -301,15 +316,15 @@ private:
// callbacks
int invokeActiveHook();
// blob handle maintenance
- int atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn);
- int preExecute(ExecType anExecType, bool& batch);
- int postExecute(ExecType anExecType);
+ int atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn);
+ int preExecute(NdbTransaction::ExecType anExecType, bool& batch);
+ int postExecute(NdbTransaction::ExecType anExecType);
int preCommit();
int atNextResult();
// errors
void setErrorCode(int anErrorCode, bool invalidFlag = true);
void setErrorCode(NdbOperation* anOp, bool invalidFlag = true);
- void setErrorCode(NdbConnection* aCon, bool invalidFlag = true);
+ void setErrorCode(NdbTransaction* aCon, bool invalidFlag = true);
#ifdef VM_TRACE
int getOperationType() const;
friend class NdbOut& operator<<(NdbOut&, const NdbBlob&);
diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp
index a541cd5190e..69ce6616ca1 100644
--- a/ndb/include/ndbapi/NdbDictionary.hpp
+++ b/ndb/include/ndbapi/NdbDictionary.hpp
@@ -14,18 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbDictionary.hpp
- * Include:
- * Link:
- * Author: Jonas Oreland
- * Date: 2003-05-14
- * Version: 0.1
- * Description: Data dictionary support
- * Documentation:
- * Adjust: 2003-05-14 Jonas Oreland First version.
- ****************************************************************************/
-
#ifndef NdbDictionary_H
#define NdbDictionary_H
@@ -39,11 +27,17 @@ typedef struct charset_info_st CHARSET_INFO;
* @class NdbDictionary
* @brief Data dictionary class
*
- * This class supports all schema data definition and enquiry such as:
- * -# Creating tables (Dictionary::createTable) and table columns
- * -# Dropping tables (Dictionary::dropTable)
- * -# Creating secondary indexes (Dictionary::createIndex)
- * -# Dropping secondary indexes (Dictionary::dropIndex)
+ * The preferred and supported way to create and drop tables and indexes
+ * in ndb is through the
+ * MySQL Server (see MySQL reference Manual, section MySQL Cluster).
+ *
+ * Tables and indexes that are created directly through the
+ * NdbDictionary class
+ * can not be viewed from the MySQL Server.
+ * Dropping indexes directly via the NdbApi will cause inconsistencies
+ * if they were originally created from a MySQL Cluster.
+ *
+ * This class supports schema data enquiries such as:
* -# Enquiries about tables
* (Dictionary::getTable, Table::getNoOfColumns,
* Table::getPrimaryKey, and Table::getNoOfPrimaryKeys)
@@ -51,12 +45,19 @@ typedef struct charset_info_st CHARSET_INFO;
* (Dictionary::getIndex, Index::getNoOfColumns,
* and Index::getColumn)
*
- * NdbDictionary has several help (inner) classes:
+ * This class supports schema data definition such as:
+ * -# Creating tables (Dictionary::createTable) and table columns
+ * -# Dropping tables (Dictionary::dropTable)
+ * -# Creating secondary indexes (Dictionary::createIndex)
+ * -# Dropping secondary indexes (Dictionary::dropIndex)
+ *
+ * NdbDictionary has several help (inner) classes to support this:
+ * -# NdbDictionary::Dictionary the dictionary handling dictionary objects
* -# NdbDictionary::Table for creating tables
* -# NdbDictionary::Column for creating table columns
* -# NdbDictionary::Index for creating secondary indexes
- *
- * See @ref ndbapi_example4.cpp for details of usage.
+ *
+ * See @ref ndbapi_simple_index.cpp for details of usage.
*/
class NdbDictionary {
public:
@@ -101,8 +102,6 @@ public:
SystemTable = 1, ///< System table
UserTable = 2, ///< User table (may be temporary)
UniqueHashIndex = 3, ///< Unique un-ordered hash index
- HashIndex = 4, ///< Non-unique un-ordered hash index
- UniqueOrderedIndex = 5, ///< Unique ordered index
OrderedIndex = 6, ///< Non-unique ordered index
HashIndexTrigger = 7, ///< Index maintenance, internal
IndexTrigger = 8, ///< Index maintenance, internal
@@ -156,14 +155,20 @@ public:
/**
* @class Column
- * @brief Represents an column in an NDB Cluster table
+ * @brief Represents a column in an NDB Cluster table
*
- * Each column has a type. The type of a column is determind by a number
+ * Each column has a type. The type of a column is determined by a number
* of type specifiers.
* The type specifiers are:
* - Builtin type
* - Array length or max length
- * - Precision and scale
+ * - Precision and scale (not used yet)
+ * - Character set for string types
+ * - Inline and part sizes for blobs
+ *
+ * Types in general correspond to MySQL types and their variants.
+ * Data formats are same as in MySQL. NDB API provides no support for
+ * constructing such formats. NDB kernel checks them however.
*/
class Column {
public:
@@ -171,78 +176,56 @@ public:
* The builtin column types
*/
enum Type {
- Undefined=0,///< Undefined
- Tinyint, ///< 8 bit. 1 byte signed integer, can be used in array
- Tinyunsigned, ///< 8 bit. 1 byte unsigned integer, can be used in array
- Smallint, ///< 16 bit. 2 byte signed integer, can be used in array
- Smallunsigned, ///< 16 bit. 2 byte unsigned integer, can be used in array
- Mediumint, ///< 24 bit. 3 byte signed integer, can be used in array
- Mediumunsigned,///< 24 bit. 3 byte unsigned integer, can be used in array
- Int, ///< 32 bit. 4 byte signed integer, can be used in array
- Unsigned, ///< 32 bit. 4 byte unsigned integer, can be used in array
- Bigint, ///< 64 bit. 8 byte signed integer, can be used in array
- Bigunsigned, ///< 64 Bit. 8 byte signed integer, can be used in array
- Float, ///< 32-bit float. 4 bytes float, can be used in array
- Double, ///< 64-bit float. 8 byte float, can be used in array
- Olddecimal, ///< MySQL < 5.0 signed decimal, Precision, Scale
- Char, ///< Len. A fixed array of 1-byte chars
- Varchar, ///< Max len
- Binary, ///< Len
- Varbinary, ///< Max len
- Datetime, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
- Date, ///< Precision down to 1 day(sizeof(Date) == 4 bytes )
- Blob, ///< Binary large object (see NdbBlob)
- Text, ///< Text blob
- Time = 25, ///< Time without date
- Year = 26, ///< Year 1901-2155 (1 byte)
- Timestamp = 27, ///< Unix time
- Olddecimalunsigned = 28
+ Undefined = NDB_TYPE_UNDEFINED, ///< Undefined
+ Tinyint = NDB_TYPE_TINYINT, ///< 8 bit. 1 byte signed integer, can be used in array
+ Tinyunsigned = NDB_TYPE_TINYUNSIGNED, ///< 8 bit. 1 byte unsigned integer, can be used in array
+ Smallint = NDB_TYPE_SMALLINT, ///< 16 bit. 2 byte signed integer, can be used in array
+ Smallunsigned = NDB_TYPE_SMALLUNSIGNED, ///< 16 bit. 2 byte unsigned integer, can be used in array
+ Mediumint = NDB_TYPE_MEDIUMINT, ///< 24 bit. 3 byte signed integer, can be used in array
+ Mediumunsigned = NDB_TYPE_MEDIUMUNSIGNED,///< 24 bit. 3 byte unsigned integer, can be used in array
+ Int = NDB_TYPE_INT, ///< 32 bit. 4 byte signed integer, can be used in array
+ Unsigned = NDB_TYPE_UNSIGNED, ///< 32 bit. 4 byte unsigned integer, can be used in array
+ Bigint = NDB_TYPE_BIGINT, ///< 64 bit. 8 byte signed integer, can be used in array
+ Bigunsigned = NDB_TYPE_BIGUNSIGNED, ///< 64 Bit. 8 byte signed integer, can be used in array
+ Float = NDB_TYPE_FLOAT, ///< 32-bit float. 4 bytes float, can be used in array
+ Double = NDB_TYPE_DOUBLE, ///< 64-bit float. 8 byte float, can be used in array
+ Olddecimal = NDB_TYPE_OLDDECIMAL, ///< MySQL < 5.0 signed decimal, Precision, Scale
+ Olddecimalunsigned = NDB_TYPE_OLDDECIMALUNSIGNED,
+ Decimal = NDB_TYPE_DECIMAL, ///< MySQL >= 5.0 signed decimal, Precision, Scale
+ Decimalunsigned = NDB_TYPE_DECIMALUNSIGNED,
+ Char = NDB_TYPE_CHAR, ///< Len. A fixed array of 1-byte chars
+ Varchar = NDB_TYPE_VARCHAR, ///< Length bytes: 1, Max: 255
+ Binary = NDB_TYPE_BINARY, ///< Len
+ Varbinary = NDB_TYPE_VARBINARY, ///< Length bytes: 1, Max: 255
+ Datetime = NDB_TYPE_DATETIME, ///< Precision down to 1 sec (sizeof(Datetime) == 8 bytes )
+ Date = NDB_TYPE_DATE, ///< Precision down to 1 day(sizeof(Date) == 4 bytes )
+ Blob = NDB_TYPE_BLOB, ///< Binary large object (see NdbBlob)
+ Text = NDB_TYPE_TEXT, ///< Text blob
+ Bit = NDB_TYPE_BIT, ///< Bit, length specifies no of bits
+ Longvarchar = NDB_TYPE_LONGVARCHAR, ///< Length bytes: 2, little-endian
+ Longvarbinary = NDB_TYPE_LONGVARBINARY, ///< Length bytes: 2, little-endian
+ Time = NDB_TYPE_TIME, ///< Time without date
+ Year = NDB_TYPE_YEAR, ///< Year 1901-2155 (1 byte)
+ Timestamp = NDB_TYPE_TIMESTAMP ///< Unix time
};
/**
* @name General
* @{
*/
- /**
- * Constructor
- * @param name Name of column
- */
- Column(const char * name = "");
- /**
- * Copy constructor
- * @param column Column to be copied
- */
- Column(const Column& column);
- ~Column();
/**
- * Set name of column
- * @param name Name of the column
- */
- void setName(const char * name);
-
- /**
* Get name of column
* @return Name of the column
*/
const char* getName() const;
/**
- * Set whether column is nullable or not
- */
- void setNullable(bool);
-
- /**
* Get if the column is nullable or not
*/
bool getNullable() const;
/**
- * Set that column is part of primary key
- */
- void setPrimaryKey(bool);
-
- /**
* Check if column is part of primary key
*/
bool getPrimaryKey() const;
@@ -259,92 +242,62 @@ public:
*/
bool equal(const Column& column) const;
+
/** @} *******************************************************************/
/**
- * @name Type Specifiers
+ * @name Get Type Specifiers
* @{
*/
/**
- * Set type of column
- * @param type Type of column
- *
- * @note setType resets <em>all</em> column attributes
- * to (type dependent) defaults and should be the first
- * method to call. Default type is Unsigned.
- */
- void setType(Type type);
-
- /**
* Get type of column
*/
Type getType() const;
/**
- * Set precision of column.
- * @note Only applicable for decimal types
- */
- void setPrecision(int);
-
- /**
* Get precision of column.
* @note Only applicable for decimal types
*/
int getPrecision() const;
/**
- * Set scale of column.
- * @note Only applicable for decimal types
- */
- void setScale(int);
-
- /**
* Get scale of column.
* @note Only applicable for decimal types
*/
int getScale() const;
/**
- * Set length for column
- * Array length for column or max length for variable length arrays.
- */
- void setLength(int length);
-
- /**
* Get length for column
* Array length for column or max length for variable length arrays.
*/
int getLength() const;
/**
- * For Char or Varchar or Text, set or get MySQL CHARSET_INFO. This
+ * For Char or Varchar or Text, get MySQL CHARSET_INFO. This
* specifies both character set and collation. See get_charset()
* etc in MySQL. (The cs is not "const" in MySQL).
*/
- void setCharset(CHARSET_INFO* cs);
CHARSET_INFO* getCharset() const;
+
/**
- * For blob, set or get "inline size" i.e. number of initial bytes
+ * For blob, get "inline size" i.e. number of initial bytes
* to store in table's blob attribute. This part is normally in
* main memory and can be indexed and interpreted.
*/
- void setInlineSize(int size);
int getInlineSize() const;
/**
- * For blob, set or get "part size" i.e. number of bytes to store in
+ * For blob, get "part size" i.e. number of bytes to store in
* each tuple of the "blob table". Can be set to zero to omit parts
* and to allow only inline bytes ("tinyblob").
*/
- void setPartSize(int size);
int getPartSize() const;
/**
* For blob, set or get "stripe size" i.e. number of consecutive
* <em>parts</em> to store in each node group.
*/
- void setStripeSize(int size);
int getStripeSize() const;
/**
@@ -353,64 +306,162 @@ public:
int getSize() const;
/**
- * Set distribution key
+ * Check if column is part of partition key
*
- * A <em>distribution key</em> is a set of attributes which are used
+ * A <em>partition key</em> is a set of attributes which are used
* to distribute the tuples onto the NDB nodes.
- * The distribution key uses the NDB Cluster hashing function.
+ * The partition key uses the NDB Cluster hashing function.
*
* An example where this is useful is TPC-C where it might be
- * good to use the warehouse id and district id as the distribution key.
+ * good to use the warehouse id and district id as the partition key.
* This would place all data for a specific district and warehouse
* in the same database node.
*
* Locally in the fragments the full primary key
* will still be used with the hashing algorithm.
*
- * @param enable If set to true, then the column will be part of
- * the distribution key.
+ * @return true then the column is part of
+ * the partition key.
+ */
+ bool getPartitionKey() const;
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ inline bool getDistributionKey() const { return getPartitionKey(); };
+#endif
+
+ /** @} *******************************************************************/
+
+
+ /**
+ * @name Column creation
+ * @{
+ *
+ * These operations should normally not be performed in an NbdApi program
+ * as results will not be visable in the MySQL Server
+ *
*/
- void setDistributionKey(bool enable);
/**
- * Check if column is part of distribution key
- * @see setDistributionKey
+ * Constructor
+ * @param name Name of column
*/
- bool getDistributionKey() const;
- /** @} *******************************************************************/
+ Column(const char * name = "");
+ /**
+ * Copy constructor
+ * @param column Column to be copied
+ */
+ Column(const Column& column);
+ ~Column();
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- void setTupleKey(bool);
- bool getTupleKey() const;
-
- void setDistributionGroup(bool, int bits = 16);
- bool getDistributionGroup() const;
- int getDistributionGroupBits() const;
-
- void setIndexOnlyStorage(bool);
- bool getIndexOnlyStorage() const;
+ /**
+ * Set name of column
+ * @param name Name of the column
+ */
+ void setName(const char * name);
- const Table * getBlobTable() const;
+ /**
+ * Set whether column is nullable or not
+ */
+ void setNullable(bool);
+
+ /**
+ * Set that column is part of primary key
+ */
+ void setPrimaryKey(bool);
+
+ /**
+ * Set type of column
+ * @param type Type of column
+ *
+ * @note setType resets <em>all</em> column attributes
+ * to (type dependent) defaults and should be the first
+ * method to call. Default type is Unsigned.
+ */
+ void setType(Type type);
+
+ /**
+ * Set precision of column.
+ * @note Only applicable for decimal types
+ */
+ void setPrecision(int);
+
+ /**
+ * Set scale of column.
+ * @note Only applicable for decimal types
+ */
+ void setScale(int);
+
+ /**
+ * Set length for column
+ * Array length for column or max length for variable length arrays.
+ */
+ void setLength(int length);
+
+ /**
+ * For Char or Varchar or Text, get MySQL CHARSET_INFO. This
+ * specifies both character set and collation. See get_charset()
+ * etc in MySQL. (The cs is not "const" in MySQL).
+ */
+ void setCharset(CHARSET_INFO* cs);
+
+ /**
+ * For blob, get "inline size" i.e. number of initial bytes
+ * to store in table's blob attribute. This part is normally in
+ * main memory and can be indexed and interpreted.
+ */
+ void setInlineSize(int size);
+
+ /**
+ * For blob, get "part size" i.e. number of bytes to store in
+ * each tuple of the "blob table". Can be set to zero to omit parts
+ * and to allow only inline bytes ("tinyblob").
+ */
+ void setPartSize(int size);
+
+ /**
+ * For blob, get "stripe size" i.e. number of consecutive
+ * <em>parts</em> to store in each node group.
+ */
+ void setStripeSize(int size);
/**
- * @name ODBC Specific methods
- * @{
+ * Set partition key
+ * @see getPartitionKey
+ *
+ * @param enable If set to true, then the column will be part of
+ * the partition key.
*/
- void setAutoIncrement(bool);
+ void setPartitionKey(bool enable);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ inline void setDistributionKey(bool enable)
+ { setPartitionKey(enable); };
+#endif
+
+ /** @} *******************************************************************/
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const Table * getBlobTable() const;
+
+ void setAutoIncrement(bool);
bool getAutoIncrement() const;
void setAutoIncrementInitialValue(Uint64 val);
void setDefaultValue(const char*);
const char* getDefaultValue() const;
- /** @} *******************************************************************/
static const Column * FRAGMENT;
+ static const Column * FRAGMENT_MEMORY;
static const Column * ROW_COUNT;
static const Column * COMMIT_COUNT;
+ static const Column * ROW_SIZE;
+ static const Column * RANGE_NO;
+
+ int getSizeInBytes() const;
#endif
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbRecAttr;
friend class NdbColumnImpl;
+#endif
class NdbColumnImpl & m_impl;
Column(NdbColumnImpl&);
Column& operator=(const Column&);
@@ -448,30 +499,6 @@ public:
* @name General
* @{
*/
- /**
- * Constructor
- * @param name Name of table
- */
- Table(const char * name = "");
-
- /**
- * Copy constructor
- * @param table Table to be copied
- */
- Table(const Table& table);
- virtual ~Table();
-
- /**
- * Assignment operator, deep copy
- * @param table Table to be copied
- */
- Table& operator=(const Table&);
-
- /**
- * Name of table
- * @param name Name of table
- */
- void setName(const char * name);
/**
* Get table name
@@ -484,12 +511,6 @@ public:
int getTableId() const;
/**
- * Add a column definition to a table
- * @note creates a copy
- */
- void addColumn(const Column &);
-
- /**
* Get column definition via name.
* @return null if none existing name
*/
@@ -530,17 +551,7 @@ public:
* The default value is true and indicates a normal table
* with full checkpointing and logging activated.
*/
- void setLogging(bool);
-
- /**
- * @see NdbDictionary::Table::setLogging.
- */
bool getLogging() const;
-
- /**
- * Set fragmentation type
- */
- void setFragmentType(FragmentType);
/**
* Get fragmentation type
@@ -548,13 +559,6 @@ public:
FragmentType getFragmentType() const;
/**
- * Set KValue (Hash parameter.)
- * Only allowed value is 6.
- * Later implementations might add flexibility in this parameter.
- */
- void setKValue(int kValue);
-
- /**
* Get KValue (Hash parameter.)
* Only allowed value is 6.
* Later implementations might add flexibility in this parameter.
@@ -562,15 +566,6 @@ public:
int getKValue() const;
/**
- * Set MinLoadFactor (Hash parameter.)
- * This value specifies the load factor when starting to shrink
- * the hash table.
- * It must be smaller than MaxLoadFactor.
- * Both these factors are given in percentage.
- */
- void setMinLoadFactor(int);
-
- /**
* Get MinLoadFactor (Hash parameter.)
* This value specifies the load factor when starting to shrink
* the hash table.
@@ -580,16 +575,6 @@ public:
int getMinLoadFactor() const;
/**
- * Set MaxLoadFactor (Hash parameter.)
- * This value specifies the load factor when starting to split
- * the containers in the local hash tables.
- * 100 is the maximum which will optimize memory usage.
- * A lower figure will store less information in each container and thus
- * find the key faster but consume more memory.
- */
- void setMaxLoadFactor(int);
-
- /**
* Get MaxLoadFactor (Hash parameter.)
* This value specifies the load factor when starting to split
* the containers in the local hash tables.
@@ -631,15 +616,83 @@ public:
const void* getFrmData() const;
Uint32 getFrmLength() const;
+ /** @} *******************************************************************/
+
+ /**
+ * @name Table creation
+ * @{
+ *
+ * These methods should normally not be used in an application as
+ * the result is not accessible from the MySQL Server
+ *
+ */
+
/**
- * Set frm file to store with this table
- */
- void setFrm(const void* data, Uint32 len);
+ * Constructor
+ * @param name Name of table
+ */
+ Table(const char * name = "");
+ /**
+ * Copy constructor
+ * @param table Table to be copied
+ */
+ Table(const Table& table);
+ virtual ~Table();
+
/**
- * Set table object type
+ * Assignment operator, deep copy
+ * @param table Table to be copied
*/
- void setObjectType(Object::Type type);
+ Table& operator=(const Table& table);
+
+ /**
+ * Name of table
+ * @param name Name of table
+ */
+ void setName(const char * name);
+
+ /**
+ * Add a column definition to a table
+ * @note creates a copy
+ */
+ void addColumn(const Column &);
+
+ /**
+ * @see NdbDictionary::Table::getLogging.
+ */
+ void setLogging(bool);
+
+ /**
+ * Set fragmentation type
+ */
+ void setFragmentType(FragmentType);
+
+ /**
+ * Set KValue (Hash parameter.)
+ * Only allowed value is 6.
+ * Later implementations might add flexibility in this parameter.
+ */
+ void setKValue(int kValue);
+
+ /**
+ * Set MinLoadFactor (Hash parameter.)
+ * This value specifies the load factor when starting to shrink
+ * the hash table.
+ * It must be smaller than MaxLoadFactor.
+ * Both these factors are given in percentage.
+ */
+ void setMinLoadFactor(int);
+
+ /**
+ * Set MaxLoadFactor (Hash parameter.)
+ * This value specifies the load factor when starting to split
+ * the containers in the local hash tables.
+ * 100 is the maximum which will optimize memory usage.
+ * A lower figure will store less information in each container and thus
+ * find the key faster but consume more memory.
+ */
+ void setMaxLoadFactor(int);
/**
* Get table object type
@@ -656,18 +709,32 @@ public:
*/
virtual int getObjectVersion() const;
+ /**
+ * Set frm file to store with this table
+ */
+ void setFrm(const void* data, Uint32 len);
+
+ /**
+ * Set table object type
+ */
+ void setObjectType(Object::Type type);
+
/** @} *******************************************************************/
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
void setStoredTable(bool x) { setLogging(x); }
bool getStoredTable() const { return getLogging(); }
int getRowSizeInBytes() const ;
int createTableInDb(Ndb*, bool existingEqualIsOk = true) const ;
+
+ int getReplicaCount() const ;
#endif
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbTableImpl;
+#endif
class NdbTableImpl & m_impl;
Table(NdbTableImpl&);
};
@@ -678,29 +745,18 @@ public:
*/
class Index : public Object {
public:
- /**
- * Constructor
- * @param name Name of index
+
+ /**
+ * @name Getting Index properties
+ * @{
*/
- Index(const char * name = "");
- virtual ~Index();
/**
- * Set the name of an index
- */
- void setName(const char * name);
-
- /**
* Get the name of an index
*/
const char * getName() const;
/**
- * Define the name of the table to be indexed
- */
- void setTable(const char * name);
-
- /**
* Get the name of the table being indexed
*/
const char * getTable() const;
@@ -710,22 +766,89 @@ public:
*/
unsigned getNoOfColumns() const;
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Get the number of columns in the index
* Depricated, use getNoOfColumns instead.
*/
int getNoOfIndexColumns() const;
+#endif
/**
* Get a specific column in the index
*/
const Column * getColumn(unsigned no) const ;
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Get a specific column name in the index
* Depricated, use getColumn instead.
*/
const char * getIndexColumn(int no) const ;
+#endif
+
+ /**
+ * Represents type of index
+ */
+ enum Type {
+ Undefined = 0, ///< Undefined object type (initial value)
+ UniqueHashIndex = 3, ///< Unique un-ordered hash index
+ ///< (only one currently supported)
+ OrderedIndex = 6 ///< Non-unique ordered index
+ };
+
+ /**
+ * Get index type of the index
+ */
+ Type getType() const;
+
+ /**
+ * Check if index is set to be stored on disk
+ *
+ * @return if true then logging id enabled
+ *
+ * @note Non-logged indexes are rebuilt at system restart.
+ * @note Ordered index does not currently support logging.
+ */
+ bool getLogging() const;
+
+ /**
+ * Get object status
+ */
+ virtual Object::Status getObjectStatus() const;
+
+ /**
+ * Get object version
+ */
+ virtual int getObjectVersion() const;
+
+ /** @} *******************************************************************/
+
+ /**
+ * @name Index creation
+ * @{
+ *
+ * These methods should normally not be used in an application as
+ * the result will not be visible from the MySQL Server
+ *
+ */
+
+ /**
+ * Constructor
+ * @param name Name of index
+ */
+ Index(const char * name = "");
+ virtual ~Index();
+
+ /**
+ * Set the name of an index
+ */
+ void setName(const char * name);
+
+ /**
+ * Define the name of the table to be indexed
+ */
+ void setTable(const char * name);
/**
* Add a column to the index definition
@@ -741,6 +864,7 @@ public:
*/
void addColumnName(const char * name);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Add a column name to the index definition
* Note that the order of indexes will be in
@@ -748,6 +872,7 @@ public:
* Depricated, use addColumnName instead.
*/
void addIndexColumn(const char * name);
+#endif
/**
* Add several column names to the index definition
@@ -756,6 +881,7 @@ public:
*/
void addColumnNames(unsigned noOfNames, const char ** names);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Add several column names to the index definition
* Note that the order of indexes will be in
@@ -763,18 +889,7 @@ public:
* Depricated, use addColumnNames instead.
*/
void addIndexColumns(int noOfNames, const char ** names);
-
- /**
- * Represents type of index
- */
- enum Type {
- Undefined = 0, ///< Undefined object type (initial value)
- UniqueHashIndex = 3, ///< Unique un-ordered hash index
- ///< (only one currently supported)
- HashIndex = 4, ///< Non-unique un-ordered hash index
- UniqueOrderedIndex = 5, ///< Unique ordered index
- OrderedIndex = 6 ///< Non-unique ordered index
- };
+#endif
/**
* Set index type of the index
@@ -782,47 +897,25 @@ public:
void setType(Type type);
/**
- * Get index type of the index
- */
- Type getType() const;
-
- /**
* Enable/Disable index storage on disk
*
* @param enable If enable is set to true, then logging becomes enabled
*
- * @see NdbDictionary::Table::setLogging
- *
- * @note Non-logged indexes are rebuilt at system restart.
- * @note Ordered index does not currently support logging.
+ * @see NdbDictionary::Index::getLogging
*/
void setLogging(bool enable);
- /**
- * Check if index is set to be stored on disk
- *
- * @see NdbDictionary::Index::setLogging
- */
- bool getLogging() const;
-
#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
void setStoredIndex(bool x) { setLogging(x); }
bool getStoredIndex() const { return getLogging(); }
#endif
- /**
- * Get object status
- */
- virtual Object::Status getObjectStatus() const;
-
- /**
- * Get object version
- */
- virtual int getObjectVersion() const;
+ /** @} *******************************************************************/
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbIndexImpl;
-
+#endif
class NdbIndexImpl & m_impl;
Index(NdbIndexImpl&);
};
@@ -833,35 +926,137 @@ public:
*/
class Event : public Object {
public:
- enum TableEvent { TE_INSERT=1, TE_DELETE=2, TE_UPDATE=4, TE_ALL=7 };
+ /**
+ * Specifies the type of database operations an Event listens to
+ */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /** TableEvent must match 1 << TriggerEvent */
+#endif
+ enum TableEvent {
+ TE_INSERT=1, ///< Insert event on table
+ TE_DELETE=2, ///< Delete event on table
+ TE_UPDATE=4, ///< Update event on table
+ TE_ALL=7 ///< Any/all event on table (not relevant when
+ ///< events are received)
+ };
+ /**
+ * Specifies the durability of an event
+ * (future version may supply other types)
+ */
enum EventDurability {
- ED_UNDEFINED = 0,
+ ED_UNDEFINED
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = 0
+#endif
#if 0 // not supported
- ED_SESSION = 1,
+ ,ED_SESSION = 1,
// Only this API can use it
// and it's deleted after api has disconnected or ndb has restarted
- ED_TEMPORARY = 2,
+ ED_TEMPORARY = 2
// All API's can use it,
// But's its removed when ndb is restarted
-#endif
- ED_PERMANENT = 3
- // All API's can use it,
- // It's still defined after a restart
+#endif
+ ,ED_PERMANENT ///< All API's can use it.
+ ///< It's still defined after a cluster system restart
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = 3
+#endif
};
-
+
+ /**
+ * Constructor
+ * @param name Name of event
+ */
Event(const char *name);
+ /**
+ * Constructor
+ * @param name Name of event
+ * @param table Reference retrieved from NdbDictionary
+ */
+ Event(const char *name, const NdbDictionary::Table& table);
virtual ~Event();
- void setName(const char *);
- void setTable(const char *);
- void addTableEvent(const TableEvent);
- void setDurability(const EventDurability);
+ /**
+ * Set unique identifier for the event
+ */
+ void setName(const char *name);
+ /**
+ * Get unique identifier for the event
+ */
+ const char *getName() const;
+ /**
+ * Define table on which events should be detected
+ *
+ * @note calling this method will default to detection
+ * of events on all columns. Calling subsequent
+ * addEventColumn calls will override this.
+ *
+ * @param table reference retrieved from NdbDictionary
+ */
+ void setTable(const NdbDictionary::Table& table);
+ /**
+ * Set table for which events should be detected
+ *
+ * @note preferred way is using setTable(const NdbDictionary::Table&)
+ * or constructor with table object parameter
+ */
+ void setTable(const char *tableName);
+ /**
+ * Get table name for events
+ *
+ * @return table name
+ */
+ const char* getTableName() const;
+ /**
+ * Add type of event that should be detected
+ */
+ void addTableEvent(const TableEvent te);
+ /**
+ * Set durability of the event
+ */
+ void setDurability(EventDurability);
+ /**
+ * Get durability of the event
+ */
+ EventDurability getDurability() const;
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
void addColumn(const Column &c);
+#endif
+ /**
+ * Add a column on which events should be detected
+ *
+ * @param attrId Column id
+ *
+ * @note errors will mot be detected until createEvent() is called
+ */
void addEventColumn(unsigned attrId);
+ /**
+ * Add a column on which events should be detected
+ *
+ * @param columnName Column name
+ *
+ * @note errors will not be detected until createEvent() is called
+ */
void addEventColumn(const char * columnName);
+ /**
+ * Add several columns on which events should be detected
+ *
+ * @param n Number of columns
+ * @param columnNames Column names
+ *
+ * @note errors will mot be detected until
+ * NdbDictionary::Dictionary::createEvent() is called
+ */
void addEventColumns(int n, const char ** columnNames);
/**
+ * Get no of columns defined in an Event
+ *
+ * @return Number of columns, -1 on error
+ */
+ int getNoOfEventColumns() const;
+
+ /**
* Get object status
*/
virtual Object::Status getObjectStatus() const;
@@ -871,11 +1066,15 @@ public:
*/
virtual int getObjectVersion() const;
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
void print();
+#endif
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbEventImpl;
friend class NdbEventOperationImpl;
+#endif
class NdbEventImpl & m_impl;
Event(NdbEventImpl&);
};
@@ -938,8 +1137,17 @@ public:
/**
* Fetch list of all objects, optionally restricted to given type.
+ *
+ * @param list List of objects returned in the dictionary
+ * @param type Restrict returned list to only contain objects of
+ * this type
+ *
+ * @return -1 if error.
+ *
*/
int listObjects(List & list, Object::Type type = Object::TypeUndefined);
+ int listObjects(List & list,
+ Object::Type type = Object::TypeUndefined) const;
/**
* Get the latest error
@@ -949,24 +1157,87 @@ public:
const struct NdbError & getNdbError() const;
/** @} *******************************************************************/
+
+ /**
+ * @name Retrieving references to Tables and Indexes
+ * @{
+ */
+
+ /**
+ * Get table with given name, NULL if undefined
+ * @param name Name of table to get
+ * @return table if successful otherwise NULL.
+ */
+ const Table * getTable(const char * name) const;
+
+ /**
+ * Get index with given name, NULL if undefined
+ * @param indexName Name of index to get.
+ * @param tableName Name of table that index belongs to.
+ * @return index if successful, otherwise 0.
+ */
+ const Index * getIndex(const char * indexName,
+ const char * tableName) const;
+
+ /**
+ * Fetch list of indexes of given table.
+ * @param list Reference to list where to store the listed indexes
+ * @param tableName Name of table that index belongs to.
+ * @return 0 if successful, otherwise -1
+ */
+ int listIndexes(List & list, const char * tableName);
+ int listIndexes(List & list, const char * tableName) const;
+
+ /** @} *******************************************************************/
+ /**
+ * @name Events
+ * @{
+ */
+
+ /**
+ * Create event given defined Event instance
+ * @param event Event to create
+ * @return 0 if successful otherwise -1.
+ */
+ int createEvent(const Event &event);
+
+ /**
+ * Drop event with given name
+ * @param eventName Name of event to drop.
+ * @return 0 if successful otherwise -1.
+ */
+ int dropEvent(const char * eventName);
+
+ /**
+ * Get event with given name.
+ * @param eventName Name of event to get.
+ * @return an Event if successful, otherwise NULL.
+ */
+ const Event * getEvent(const char * eventName);
+
+ /** @} *******************************************************************/
+
/**
- * @name Tables
+ * @name Table creation
* @{
+ *
+ * These methods should normally not be used in an application as
+ * the result will not be visible from the MySQL Server
*/
/**
* Create defined table given defined Table instance
- * @param Table Table to create
+ * @param table Table to create
* @return 0 if successful otherwise -1.
*/
- int createTable(const Table &);
+ int createTable(const Table &table);
/**
* Drop table given retrieved Table instance
- * @param Table Table to drop
+ * @param table Table to drop
* @return 0 if successful otherwise -1.
*/
- int dropTable(Table &);
+ int dropTable(Table & table);
/**
* Drop table given table name
@@ -975,23 +1246,16 @@ public:
*/
int dropTable(const char * name);
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Alter defined table given defined Table instance
- * @param Table Table to alter
+ * @param table Table to alter
* @return -2 (incompatible version) <br>
* -1 general error <br>
* 0 success
*/
- int alterTable(const Table &);
-
- /**
- * Get table with given name, NULL if undefined
- * @param name Name of table to get
- * @return table if successful otherwise NULL.
- */
- const Table * getTable(const char * name);
+ int alterTable(const Table &table);
-#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Invalidate cached table object
* @param name Name of table to invalidate
@@ -1000,24 +1264,31 @@ public:
#endif
/**
- * Remove table/index from local cache
+ * Remove table from local cache
*/
void removeCachedTable(const char * table);
+ /**
+ * Remove index from local cache
+ */
void removeCachedIndex(const char * index, const char * table);
/** @} *******************************************************************/
/**
- * @name Indexes
+ * @name Index creation
* @{
+ *
+ * These methods should normally not be used in an application as
+ * the result will not be visible from the MySQL Server
+ *
*/
/**
* Create index given defined Index instance
- * @param Index to create
+ * @param index Index to create
* @return 0 if successful otherwise -1.
*/
- int createIndex(const Index &);
+ int createIndex(const Index &index);
/**
* Drop index with given name
@@ -1028,15 +1299,6 @@ public:
int dropIndex(const char * indexName,
const char * tableName);
- /**
- * Get index with given name, NULL if undefined
- * @param indexName Name of index to get.
- * @param tableName Name of table that index belongs to.
- * @return index if successful, otherwise 0.
- */
- const Index * getIndex(const char * indexName,
- const char * tableName);
-
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Invalidate cached index object
@@ -1045,58 +1307,27 @@ public:
const char * tableName);
#endif
- /**
- * Fetch list of indexes of given table.
- * @param list Reference to list where to store the listed indexes
- * @param tableName Name of table that index belongs to.
- * @return 0 if successful, otherwise -1
- */
- int listIndexes(List & list, const char * tableName);
-
/** @} *******************************************************************/
- /**
- * @name Events
- * @{
- */
-
- /**
- * Create event given defined Event instance
- * @param Event to create
- * @return 0 if successful otherwise -1.
- */
- int createEvent(const Event &);
- /**
- * Drop event with given name
- * @param eventName Name of event to drop.
- * @return 0 if successful otherwise -1.
- */
- int dropEvent(const char * eventName);
-
- /**
- * Get event with given name.
- * @param eventName Name of event to get.
- * @return an Event if successful, otherwise NULL.
- */
- const Event * getEvent(const char * eventName);
-
- /** @} *******************************************************************/
-
protected:
Dictionary(Ndb & ndb);
~Dictionary();
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbDictionaryImpl;
friend class UtilTransactions;
friend class NdbBlob;
+#endif
class NdbDictionaryImpl & m_impl;
Dictionary(NdbDictionaryImpl&);
const Table * getIndexTable(const char * indexName,
- const char * tableName);
+ const char * tableName) const;
public:
- const Table * getTable(const char * name, void **data);
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const Table * getTable(const char * name, void **data) const;
void set_local_table_data_size(unsigned sz);
+#endif
};
};
diff --git a/ndb/include/ndbapi/NdbError.hpp b/ndb/include/ndbapi/NdbError.hpp
index 8cde2a8cf38..f67b3c4ccaa 100644
--- a/ndb/include/ndbapi/NdbError.hpp
+++ b/ndb/include/ndbapi/NdbError.hpp
@@ -41,7 +41,7 @@
* The <em>error messages</em> and <em>error details</em> may
* change without notice.
*
- * For example of use, see @ref ndbapi_example3.cpp.
+ * For example of use, see @ref ndbapi_retries.cpp.
*/
struct NdbError {
/**
@@ -168,7 +168,12 @@ struct NdbError {
/**
* Node shutdown
*/
- NodeShutdown = ndberror_cl_node_shutdown
+ NodeShutdown = ndberror_cl_node_shutdown,
+
+ /**
+ * Schema object already exists
+ */
+ SchemaObjectExists = ndberror_cl_schema_object_already_exists
};
/**
@@ -199,6 +204,7 @@ struct NdbError {
*/
char * details;
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
NdbError(){
status = UnknownResult;
classification = NoError;
@@ -222,6 +228,7 @@ struct NdbError {
ndberror.details = details;
return ndberror;
}
+#endif
};
class NdbOut& operator <<(class NdbOut&, const NdbError &);
diff --git a/ndb/include/ndbapi/NdbEventOperation.hpp b/ndb/include/ndbapi/NdbEventOperation.hpp
index 056e9a58c74..55ee96b3144 100644
--- a/ndb/include/ndbapi/NdbEventOperation.hpp
+++ b/ndb/include/ndbapi/NdbEventOperation.hpp
@@ -14,19 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbEventOperation.hpp
- * Include:
- * Link:
- * Author: Tomas Ulin MySQL AB
- * Date: 2003-11-21
- * Version: 0.1
- * Description: Event support
- * Documentation:
- * Adjust: 2003-11-21 Tomas Ulin First version.
- * Adjust: 2003-12-11 Tomas Ulin Alpha Release.
- ****************************************************************************/
-
#ifndef NdbEventOperation_H
#define NdbEventOperation_H
@@ -37,80 +24,79 @@ class NdbEventOperationImpl;
* @class NdbEventOperation
* @brief Class of operations for getting change events from database.
*
- * An NdbEventOperation object is instantiated by
- * NdbEventOperation *Ndb::createEventOperation(const char *eventName,
- * int bufferLength)
- *
- * Prior to that an event must have been created in the Database through
- * int NdbDictionary::createEvent(NdbDictionary::Event)
- *
- * bufferLength indicates size of circular buffer to store event info as
- * they occur.
+ * Brief description on how to work with events:
*
- * The instance is removed by Ndb::dropEventOperation(NdbEventOperation*)
+ * - An event, represented by an NdbDictionary::Event, i created in the
+ * Database through
+ * NdbDictionary::Dictionary::createEvent() (note that this can be done
+ * by any application or thread and not necessarily by the "listener")
+ * - To listen to events, an NdbEventOperation object is instantiated by
+ * Ndb::createEventOperation()
+ * - execute() starts the event flow. Use Ndb::pollEvents() to wait
+ * for an event to occur. Use next() to iterate
+ * through the events that have occured.
+ * - The instance is removed by Ndb::dropEventOperation()
*
* For more info see:
- * ndbapi_example5.cpp
- * Ndb.hpp
- * NdbDictionary.hpp
+ * @ref ndbapi_event.cpp
*
* Known limitations:
*
- * Maximum number of active NdbEventOperations are now set at compile time.
+ * - Maximum number of active NdbEventOperations are now set at compile time.
* Today 100. This will become a configuration parameter later.
- *
- * Maximum number of NdbEventOperations tied to same event are maximum 16
+ * - Maximum number of NdbEventOperations tied to same event are maximum 16
* per process.
*
* Known issues:
*
- * When several NdbEventOperation s are tied to the same event in the same
+ * - When several NdbEventOperation's are tied to the same event in the same
* process they will share the circular buffer. The BufferLength will then
* be the same for all and decided by the first NdbEventOperation
* instantiation. Just make sure to instantiate the "largest" one first.
- *
- * Today all events INSERT/DELETE/UPDATE and all changed attributes are
+ * - Today all events INSERT/DELETE/UPDATE and all changed attributes are
* sent to the API, even if only specific attributes have been specified.
* These are however hidden from the user and only relevant data is shown
- * after next(). However false exits from pollEvents() may occur and thus
- * the subsequent next() will return zero, since there was no available
- * data. Just do pollEvents() again. Will be fixed in later versions.
- *
- * Event code does not check table schema version. Make sure to drop events
+ * after next().
+ * - "False" exits from Ndb::pollEvents() may occur and thus
+ * the subsequent next() will return zero,
+ * since there was no available data. Just do Ndb::pollEvents() again.
+ * - Event code does not check table schema version. Make sure to drop events
* after table is dropped. Will be fixed in later
* versions.
- *
- * On a replicated system one will receive each event 2 times, one for each
- * replica. If a node fails events will not be received twice anymore
- * for data in corresponding fragment. Will be optimized in later versions.
- *
- * If a nodefailiure has occured not all events will be recieved
+ * - If a node failure has occured not all events will be recieved
* anymore. Drop NdbEventOperation and Create again after nodes are up
* again. Will be fixed in later versions.
*
* Test status:
- * Tests have been run on 1-node and 2-node systems
*
- * Known bugs:
- *
- * None, except if we can call some of the "isses" above bugs
+ * - Tests have been run on 1-node and 2-node systems
*
* Useful API programs:
*
- * select_all -d sys 'NDB$EVENTS_0'
- * Will show contents in the system table containing created events.
+ * - ndb_select_all -d sys 'NDB$EVENTS_0'
+ * shows contents in the system table containing created events.
*
+ * @note this is an inteface to viewing events that is subject to change
*/
class NdbEventOperation {
public:
- enum State {CREATED,EXECUTING,ERROR};
-
+ /**
+ * State of the NdbEventOperation object
+ */
+ enum State {
+ EO_CREATED, ///< Created but execute() not called
+ EO_EXECUTING, ///< execute() called
+ EO_ERROR ///< An error has occurred. Object unusable.
+ };
+ /**
+ * Retrieve current state of the NdbEventOperation object
+ */
State getState();
/**
* Activates the NdbEventOperation to start receiving events. The
* changed attribute values may be retrieved after next() has returned
- * a value greater than zero. The getValue() methods below must be called
+ * a value greater than zero. The getValue() methods must be called
* prior to execute().
*
* @return 0 if successful otherwise -1.
@@ -132,21 +118,21 @@ public:
* aligned appropriately. The buffer is used directly
* (avoiding a copy penalty) only if it is aligned on a
* 4-byte boundary and the attribute size in bytes
- * (i.e. NdbRecAttr::attrSize times NdbRecAttr::arraySize is
+ * (i.e. NdbRecAttr::attrSize() times NdbRecAttr::arraySize() is
* a multiple of 4).
*
- * @note There are two versions, NdbOperation::getValue and
- * NdbOperation::getPreValue for retrieving the current and
+ * @note There are two versions, getValue() and
+ * getPreValue() for retrieving the current and
* previous value repectively.
*
* @note This method does not fetch the attribute value from
* the database! The NdbRecAttr object returned by this method
* is <em>not</em> readable/printable before the
- * NdbEventConnection::execute has been made and
- * NdbEventConnection::next has returned a value greater than
+ * execute() has been made and
+ * next() has returned a value greater than
* zero. If a specific attribute has not changed the corresponding
* NdbRecAttr will be in state UNDEFINED. This is checked by
- * NdbRecAttr::isNull which then returns -1.
+ * NdbRecAttr::isNULL() which then returns -1.
*
* @param anAttrName Attribute name
* @param aValue If this is non-NULL, then the attribute value
@@ -158,19 +144,24 @@ public:
* (indicating error).
*/
NdbRecAttr *getValue(const char *anAttrName, char *aValue = 0);
+ /**
+ * See getValue().
+ */
NdbRecAttr *getPreValue(const char *anAttrName, char *aValue = 0);
/**
* Retrieves event resultset if available, inserted into the NdbRecAttrs
* specified in getValue() and getPreValue(). To avoid polling for
- * a resultset, one can use Ndb::pollEvents(int millisecond_timeout)
+ * a resultset, one can use Ndb::pollEvents()
* which will wait on a mutex until an event occurs or the specified
* timeout occurs.
*
- * @return >=0 if successful otherwise -1. Return value inicates number
+ * @return >=0 if successful otherwise -1. Return value indicates number
* of available events. By sending pOverRun one may query for buffer
* overflow and *pOverRun will indicate the number of events that have
* overwritten.
+ *
+ * @return number of available events, -1 on failure
*/
int next(int *pOverRun=0);
@@ -182,18 +173,47 @@ public:
/**
* Query for occured event type.
- * NdbDictionary::Event::{TE_INSERT,TE_UPDATE,TE_DELETE}
- * Only valid after next() has returned value >= 0
+ *
+ * @note Only valid after next() has been called and returned value >= 0
+ *
+ * @return type of event
*/
NdbDictionary::Event::TableEvent getEventType();
+ /**
+ * Retrieve the GCI of the latest retrieved event
+ *
+ * @return GCI number
+ */
Uint32 getGCI();
+
+ /**
+ * Retrieve the complete GCI in the cluster (not necessarily
+ * associated with an event)
+ *
+ * @return GCI number
+ */
Uint32 getLatestGCI();
+
+ /**
+ * Get the latest error
+ *
+ * @return Error object.
+ */
+ const struct NdbError & getNdbError() const;
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /*
+ *
+ */
void print();
+#endif
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbEventOperationImpl;
friend class Ndb;
+#endif
NdbEventOperation(Ndb *theNdb, const char* eventName,int bufferLength);
~NdbEventOperation();
static int wait(void *p, int aMillisecondNumber);
diff --git a/ndb/include/ndbapi/NdbIndexOperation.hpp b/ndb/include/ndbapi/NdbIndexOperation.hpp
index 2ab63cfc4f9..a8a15978568 100644
--- a/ndb/include/ndbapi/NdbIndexOperation.hpp
+++ b/ndb/include/ndbapi/NdbIndexOperation.hpp
@@ -14,18 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbIndexOperation.hpp
- * Include:
- * Link:
- * Author: Martin Sköld
- * Date: 2002-04-01
- * Version: 0.1
- * Description: Secondary index support
- * Documentation:
- * Adjust: 2002-04-01 Martin Sköld First version.
- ****************************************************************************/
-
#ifndef NdbIndexOperation_H
#define NdbIndexOperation_H
@@ -40,8 +28,10 @@ class NdbResultSet;
*/
class NdbIndexOperation : public NdbOperation
{
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
+#endif
public:
/**
@@ -54,16 +44,17 @@ public:
/**
* Define the NdbIndexOperation to be a standard operation of type readTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* reads a tuple.
*
* @return 0 if successful otherwise -1.
*/
int readTuple(LockMode);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Define the NdbIndexOperation to be a standard operation of type readTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* reads a tuple.
*
* @return 0 if successful otherwise -1.
@@ -73,7 +64,7 @@ public:
/**
* Define the NdbIndexOperation to be a standard operation of type
* readTupleExclusive.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* read a tuple using an exclusive lock.
*
* @return 0 if successful otherwise -1.
@@ -82,7 +73,7 @@ public:
/**
* Define the NdbIndexOperation to be a standard operation of type simpleRead.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* reads an existing tuple (using shared read lock),
* but releases lock immediately after read.
*
@@ -101,7 +92,7 @@ public:
/**
* Define the NdbOperation to be a standard operation of type committedRead.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* read latest committed value of the record.
*
* This means that if another transaction is updating the
@@ -113,7 +104,6 @@ public:
*/
int dirtyRead();
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
int committedRead();
#endif
@@ -121,7 +111,7 @@ public:
* Define the NdbIndexOperation to be a standard operation of type
* updateTuple.
*
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* updates a tuple in the table.
*
* @return 0 if successful otherwise -1.
@@ -132,24 +122,27 @@ public:
* Define the NdbIndexOperation to be a standard operation of type
* deleteTuple.
*
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* deletes a tuple.
*
* @return 0 if successful otherwise -1.
*/
int deleteTuple();
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Define the NdbIndexOperation to be a standard operation of type
* dirtyUpdate.
*
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* updates without two-phase commit.
*
* @return 0 if successful otherwise -1.
*/
int dirtyUpdate();
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** @} *********************************************************************/
/**
* @name Define Interpreted Program Operation
@@ -169,6 +162,7 @@ public:
* @return 0 if successful otherwise -1.
*/
int interpretedDeleteTuple();
+#endif
/** @} *********************************************************************/
@@ -176,30 +170,17 @@ private:
NdbIndexOperation(Ndb* aNdb);
~NdbIndexOperation();
- void closeScan();
-
int receiveTCINDXREF(NdbApiSignal* aSignal);
- // Overloaded method from NdbOperation
- void setLastFlag(NdbApiSignal* signal, Uint32 lastFlag);
-
- // Overloaded methods from NdbCursorOperation
- int executeCursor(int ProcessorId);
-
// Overloaded methods from NdbCursorOperation
int indxInit(const class NdbIndexImpl* anIndex,
const class NdbTableImpl* aTable,
- NdbConnection* myConnection);
+ NdbTransaction*);
- int equal_impl(const class NdbColumnImpl*, const char* aValue, Uint32 len);
int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId);
// Private attributes
const NdbIndexImpl* m_theIndex;
- const NdbTableImpl* m_thePrimaryTable;
- Uint32 m_theIndexDefined[NDB_MAX_ATTRIBUTES_IN_INDEX][3];
- Uint32 m_theIndexLen; // Length of the index in words
- Uint32 m_theNoOfIndexDefined; // The number of index attributes
friend struct Ndb_free_list_t<NdbIndexOperation>;
};
diff --git a/ndb/include/ndbapi/NdbIndexScanOperation.hpp b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
index 7cd2daea6a6..e9f92d84d1c 100644
--- a/ndb/include/ndbapi/NdbIndexScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbIndexScanOperation.hpp
@@ -24,36 +24,51 @@
* @brief Class of scan operations for use to scan ordered index
*/
class NdbIndexScanOperation : public NdbScanOperation {
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class NdbResultSet;
friend class NdbOperation;
friend class NdbScanOperation;
+#endif
+
public:
/**
- * readTuples returns a NdbResultSet where tuples are stored.
- * Tuples are not stored in NdbResultSet until execute(NoCommit)
- * has been executed and nextResult has been called.
+ * readTuples using ordered index
+ *
+ * @param lock_mode Lock mode
+ * @param scan_flags see @ref ScanFlag
+ * @param parallel No of fragments to scan in parallel (0=max)
+ */
+ virtual int readTuples(LockMode lock_mode = LM_Read,
+ Uint32 scan_flags = 0, Uint32 parallel = 0);
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
+ * readTuples using ordered index
*
- * @param parallel Scan parallelism
+ * @param lock_mode Lock mode
* @param batch No of rows to fetch from each fragment at a time
- * @param LockMode Scan lock handling
+ * @param parallel No of fragments to scan in parallel
* @param order_by Order result set in index order
- * @returns NdbResultSet.
+ * @param order_desc Order descending, ignored unless order_by
+ * @param read_range_no Enable reading of range no using @ref get_range_no
+ * @returns 0 for success and -1 for failure
* @see NdbScanOperation::readTuples
*/
- NdbResultSet* readTuples(LockMode = LM_Read,
- Uint32 batch = 0,
- Uint32 parallel = 0,
- bool order_by = false);
-
- inline NdbResultSet* readTuples(int parallell){
- return readTuples(LM_Read, 0, parallell, false);
- }
-
- inline NdbResultSet* readTuplesExclusive(int parallell = 0){
- return readTuples(LM_Exclusive, 0, parallell, false);
+ inline int readTuples(LockMode lock_mode,
+ Uint32 batch,
+ Uint32 parallel,
+ bool order_by,
+ bool order_desc = false,
+ bool read_range_no = false) {
+ Uint32 scan_flags =
+ (SF_OrderBy & -(Int32)order_by) |
+ (SF_Descending & -(Int32)order_desc) |
+ (SF_ReadRangeNo & -(Int32)read_range_no);
+ return readTuples(lock_mode, scan_flags, parallel);
}
+#endif
/**
* Type of ordered index key bound. The values (0-4) will not change
@@ -76,7 +91,7 @@ public:
*
* For equality, it is better to use BoundEQ instead of the equivalent
* pair of BoundLE and BoundGE. This is especially true when table
- * distribution key is an initial part of the index key.
+ * partition key is an initial part of the index key.
*
* The sets of lower and upper bounds must be on initial sequences of
* index keys. All but possibly the last bound must be non-strict.
@@ -93,15 +108,14 @@ public:
* An index stores also all-NULL keys. Doing index scan with empty
* bound set returns all table tuples.
*
- * @param attrName Attribute name, alternatively:
- * @param anAttrId Index column id (starting from 0)
+ * @param attr Attribute name, alternatively:
* @param type Type of bound
* @param value Pointer to bound value, 0 for NULL
* @param len Value length in bytes.
* Fixed per datatype and can be omitted
* @return 0 if successful otherwise -1
*/
- int setBound(const char* attr, int type, const void* aValue, Uint32 len = 0);
+ int setBound(const char* attr, int type, const void* value, Uint32 len = 0);
/**
* Define bound on index key in range scan using index column id.
@@ -114,8 +128,27 @@ public:
* sent on next execute
*/
int reset_bounds(bool forceSend = false);
+
+ /**
+ * Marks end of a bound,
+ * used when batching index reads (multiple ranges)
+ */
+ int end_of_bound(Uint32 range_no);
+
+ /**
+ * Return range no for current row
+ */
+ int get_range_no();
+ /**
+ * Is current scan sorted
+ */
bool getSorted() const { return m_ordered; }
+
+ /**
+ * Is current scan sorted descending
+ */
+ bool getDescending() const { return m_descending; }
private:
NdbIndexScanOperation(Ndb* aNdb);
virtual ~NdbIndexScanOperation();
@@ -132,6 +165,8 @@ private:
int compare(Uint32 key, Uint32 cols, const NdbReceiver*, const NdbReceiver*);
Uint32 m_sort_columns;
+ Uint32 m_this_bound_start;
+ Uint32 * m_first_bound_word;
friend struct Ndb_free_list_t<NdbIndexScanOperation>;
};
diff --git a/ndb/include/ndbapi/NdbOperation.hpp b/ndb/include/ndbapi/NdbOperation.hpp
index 46e44226e18..1035c642c97 100644
--- a/ndb/include/ndbapi/NdbOperation.hpp
+++ b/ndb/include/ndbapi/NdbOperation.hpp
@@ -28,7 +28,7 @@ class Ndb;
class NdbApiSignal;
class NdbRecAttr;
class NdbOperation;
-class NdbConnection;
+class NdbTransaction;
class NdbColumnImpl;
class NdbBlob;
@@ -38,14 +38,17 @@ class NdbBlob;
*/
class NdbOperation
{
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class NdbScanOperation;
friend class NdbScanReceiver;
friend class NdbScanFilter;
friend class NdbScanFilterImpl;
friend class NdbReceiver;
friend class NdbBlob;
+#endif
+
public:
/**
* @name Define Standard Operation Type
@@ -57,17 +60,24 @@ public:
*/
enum LockMode {
- LM_Read = 0,
- LM_Exclusive = 1,
- LM_CommittedRead = 2,
+ LM_Read ///< Read with shared lock
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = 0
+#endif
+ ,LM_Exclusive ///< Read with exclusive lock
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = 1
+#endif
+ ,LM_CommittedRead ///< Ignore locks, read last committed value
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = 2,
LM_Dirty = 2
#endif
};
/**
* Define the NdbOperation to be a standard operation of type insertTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* adds a new tuple to the table.
*
* @return 0 if successful otherwise -1.
@@ -76,7 +86,7 @@ public:
/**
* Define the NdbOperation to be a standard operation of type updateTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* updates a tuple in the table.
*
* @return 0 if successful otherwise -1.
@@ -85,7 +95,7 @@ public:
/**
* Define the NdbOperation to be a standard operation of type writeTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* writes a tuple to the table.
* If the tuple exists, it updates it, otherwise an insert takes place.
*
@@ -95,7 +105,7 @@ public:
/**
* Define the NdbOperation to be a standard operation of type deleteTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* delete a tuple.
*
* @return 0 if successful otherwise -1.
@@ -104,16 +114,17 @@ public:
/**
* Define the NdbOperation to be a standard operation of type readTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* reads a tuple.
*
* @return 0 if successful otherwise -1.
*/
virtual int readTuple(LockMode);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Define the NdbOperation to be a standard operation of type readTuple.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* reads a tuple.
*
* @return 0 if successful otherwise -1.
@@ -123,7 +134,7 @@ public:
/**
* Define the NdbOperation to be a standard operation of type
* readTupleExclusive.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* read a tuple using an exclusive lock.
*
* @return 0 if successful otherwise -1.
@@ -133,7 +144,7 @@ public:
/**
* Define the NdbOperation to be a standard operation of type
* simpleRead.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* reads an existing tuple (using shared read lock),
* but releases lock immediately after read.
*
@@ -150,10 +161,9 @@ public:
*/
virtual int simpleRead();
-#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
* Define the NdbOperation to be a standard operation of type committedRead.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* read latest committed value of the record.
*
* This means that if another transaction is updating the
@@ -166,11 +176,10 @@ public:
* @depricated
*/
virtual int dirtyRead();
-#endif
/**
* Define the NdbOperation to be a standard operation of type committedRead.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* read latest committed value of the record.
*
* This means that if another transaction is updating the
@@ -184,7 +193,7 @@ public:
/**
* Define the NdbOperation to be a standard operation of type dirtyUpdate.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* updates without two-phase commit.
*
* @return 0 if successful otherwise -1.
@@ -193,13 +202,15 @@ public:
/**
* Define the NdbOperation to be a standard operation of type dirtyWrite.
- * When calling NdbConnection::execute, this operation
+ * When calling NdbTransaction::execute, this operation
* writes without two-phase commit.
*
* @return 0 if successful otherwise -1.
*/
virtual int dirtyWrite();
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** @} *********************************************************************/
/**
* @name Define Interpreted Program Operation Type
@@ -219,6 +230,7 @@ public:
* @return 0 if successful otherwise -1.
*/
virtual int interpretedDeleteTuple();
+#endif
/** @} *********************************************************************/
@@ -233,10 +245,13 @@ public:
* use several equals (then all of them must be satisfied for the
* tuple to be selected).
*
- * @note There are 10 versions of NdbOperation::equal with
+ * @note For insertTuple() it is also allowed to define the
+ * search key by using setValue().
+ *
+ * @note There are 10 versions of equal() with
* slightly different parameters.
*
- * @note When using NdbOperation::equal with a string (char *) as
+ * @note When using equal() with a string (char *) as
* second argument, the string needs to be padded with
* zeros in the following sense:
* @code
@@ -244,6 +259,8 @@ public:
* strncpy(buf, str, sizeof(buf));
* NdbOperation->equal("Attr1", buf);
* @endcode
+ *
+ *
*
* @param anAttrName Attribute name
* @param aValue Attribute value.
@@ -261,21 +278,6 @@ public:
int equal(Uint32 anAttrId, Int64 aValue);
int equal(Uint32 anAttrId, Uint64 aValue);
- /**
- * Generate a tuple id and set it as search argument.
- *
- * The Tuple id has NDB$TID as attribute name and 0 as attribute id.
- *
- * The generated tuple id is returned by the method.
- * If zero is returned there is an error.
- *
- * This is mostly used for tables without any primary key
- * attributes.
- *
- * @return Generated tuple id if successful, otherwise 0.
- */
- Uint64 setTupleId();
-
/** @} *********************************************************************/
/**
* @name Specify Attribute Actions for Operations
@@ -302,7 +304,7 @@ public:
* @note This method does not fetch the attribute value from
* the database! The NdbRecAttr object returned by this method
* is <em>not</em> readable/printable before the
- * transaction has been executed with NdbConnection::execute.
+ * transaction has been executed with NdbTransaction::execute.
*
* @param anAttrName Attribute name
* @param aValue If this is non-NULL, then the attribute value
@@ -339,6 +341,12 @@ public:
* then the API will assume that the pointer
* is correct and not bother with checking it.
*
+ * @note For insertTuple() the NDB API will automatically detect that
+ * it is supposed to use equal() instead.
+ *
+ * @note For insertTuple() it is not necessary to use
+ * setValue() on key attributes before other attributes.
+ *
* @note There are 14 versions of NdbOperation::setValue with
* slightly different parameters.
*
@@ -375,6 +383,7 @@ public:
virtual NdbBlob* getBlobHandle(const char* anAttrName);
virtual NdbBlob* getBlobHandle(Uint32 anAttrId);
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/** @} *********************************************************************/
/**
* @name Specify Interpreted Program Instructions
@@ -587,21 +596,21 @@ public:
* @param Label label to jump to
* @return -1 if unsuccessful
*/
- int branch_col_eq(Uint32 ColId, const char * val, Uint32 len,
+ int branch_col_eq(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label);
- int branch_col_ne(Uint32 ColId, const char * val, Uint32 len,
+ int branch_col_ne(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label);
- int branch_col_lt(Uint32 ColId, const char * val, Uint32 len,
+ int branch_col_lt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label);
- int branch_col_le(Uint32 ColId, const char * val, Uint32 len,
+ int branch_col_le(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label);
- int branch_col_gt(Uint32 ColId, const char * val, Uint32 len,
+ int branch_col_gt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label);
- int branch_col_ge(Uint32 ColId, const char * val, Uint32 len,
+ int branch_col_ge(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label);
- int branch_col_like(Uint32 ColId, const char *, Uint32 len,
+ int branch_col_like(Uint32 ColId, const void *, Uint32 len,
bool nopad, Uint32 Label);
- int branch_col_notlike(Uint32 ColId, const char *, Uint32 len,
+ int branch_col_notlike(Uint32 ColId, const void *, Uint32 len,
bool nopad, Uint32 Label);
/**
@@ -673,6 +682,7 @@ public:
* @return -1 if unsuccessful.
*/
int ret_sub();
+#endif
/** @} *********************************************************************/
@@ -700,8 +710,14 @@ public:
*/
const char* getTableName() const;
+ /**
+ * Get table object for this operation
+ */
+ const NdbDictionary::Table * getTable() const;
+
/** @} *********************************************************************/
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Type of operation
*/
@@ -717,9 +733,26 @@ public:
NotDefined2, ///< Internal for debugging
NotDefined ///< Internal for debugging
};
+#endif
+ /**
+ * Return lock mode for operation
+ */
LockMode getLockMode() const { return theLockMode; }
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ void setAbortOption(Int8 ao) { m_abortOption = ao; }
+
+ /**
+ * Set/get partition key
+ */
+ void setPartitionId(Uint32 id);
+ void setPartitionHash(Uint32 key);
+ void setPartitionHash(const Uint64 *, Uint32 len);
+ Uint32 getPartitionId() const;
+#endif
+protected:
+ int handle_distribution_key(const Uint64 *, Uint32 len);
protected:
/******************************************************************************
* These are the methods used to create and delete the NdbOperation objects.
@@ -732,15 +765,22 @@ protected:
//--------------------------------------------------------------
// Initialise after allocating operation to a transaction
//--------------------------------------------------------------
- int init(const class NdbTableImpl*, NdbConnection* aCon);
+ int init(const class NdbTableImpl*, NdbTransaction* aCon);
void initInterpreter();
NdbOperation(Ndb* aNdb);
virtual ~NdbOperation();
void next(NdbOperation*); // Set next pointer
NdbOperation* next(); // Get next pointer
+public:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const NdbOperation* next() const;
+ const NdbRecAttr* getFirstRecAttr() const;
+#endif
+protected:
- enum OperationStatus{
+ enum OperationStatus
+ {
Init,
OperationDefined,
TupleKeyDefined,
@@ -761,7 +801,7 @@ protected:
void Status(OperationStatus); // Set the status information
- void NdbCon(NdbConnection*); // Set reference to connection
+ void NdbCon(NdbTransaction*); // Set reference to connection
// object.
virtual void release(); // Release all operations
@@ -798,7 +838,7 @@ protected:
virtual int equal_impl(const NdbColumnImpl*,const char* aValue, Uint32 len);
virtual NdbRecAttr* getValue_impl(const NdbColumnImpl*, char* aValue = 0);
int setValue(const NdbColumnImpl* anAttrObject, const char* aValue, Uint32 len);
- NdbBlob* getBlobHandle(NdbConnection* aCon, const NdbColumnImpl* anAttrObject);
+ NdbBlob* getBlobHandle(NdbTransaction* aCon, const NdbColumnImpl* anAttrObject);
int incValue(const NdbColumnImpl* anAttrObject, Uint32 aValue);
int incValue(const NdbColumnImpl* anAttrObject, Uint64 aValue);
int subValue(const NdbColumnImpl* anAttrObject, Uint32 aValue);
@@ -806,17 +846,16 @@ protected:
int read_attr(const NdbColumnImpl* anAttrObject, Uint32 RegDest);
int write_attr(const NdbColumnImpl* anAttrObject, Uint32 RegSource);
int branch_reg_reg(Uint32 type, Uint32, Uint32, Uint32);
- int branch_col(Uint32 type, Uint32, const char *, Uint32, bool, Uint32 Label);
+ int branch_col(Uint32 type, Uint32, const void *, Uint32, bool, Uint32 Label);
int branch_col_null(Uint32 type, Uint32 col, Uint32 Label);
// Handle ATTRINFO signals
- int insertATTRINFO(Uint32 aData);
- int insertATTRINFOloop(const Uint32* aDataPtr, Uint32 aLength);
-
- int insertKEYINFO(const char* aValue,
- Uint32 aStartPosition,
- Uint32 aKeyLenInByte,
- Uint32 anAttrBitsInLastWord);
+ int insertATTRINFO(Uint32 aData);
+ int insertATTRINFOloop(const Uint32* aDataPtr, Uint32 aLength);
+
+ int insertKEYINFO(const char* aValue,
+ Uint32 aStartPosition,
+ Uint32 aKeyLenInByte);
virtual void setErrorCode(int aErrorCode);
virtual void setErrorCodeAbort(int aErrorCode);
@@ -848,16 +887,20 @@ protected:
int theErrorLine; // Error line
Ndb* theNdb; // Point back to the Ndb object.
- NdbConnection* theNdbCon; // Point back to the connection object.
+ NdbTransaction* theNdbCon; // Point back to the connection object.
NdbOperation* theNext; // Next pointer to operation.
- NdbApiSignal* theTCREQ; // The TC[KEY/INDX]REQ signal object
+
+ union {
+ NdbApiSignal* theTCREQ; // The TC[KEY/INDX]REQ signal object
+ NdbApiSignal* theSCAN_TABREQ;
+ };
+
NdbApiSignal* theFirstATTRINFO; // The first ATTRINFO signal object
NdbApiSignal* theCurrentATTRINFO; // The current ATTRINFO signal object
Uint32 theTotalCurrAI_Len; // The total number of attribute info
// words currently defined
Uint32 theAI_LenInCurrAI; // The number of words defined in the
// current ATTRINFO signal
- NdbApiSignal* theFirstKEYINFO; // The first KEYINFO signal object
NdbApiSignal* theLastKEYINFO; // The first KEYINFO signal object
class NdbLabel* theFirstLabel;
@@ -874,8 +917,8 @@ protected:
Uint32* theKEYINFOptr; // Pointer to where to write KEYINFO
Uint32* theATTRINFOptr; // Pointer to where to write ATTRINFO
- const class NdbTableImpl* m_currentTable; // The current table
- const class NdbTableImpl* m_accessTable;
+ const class NdbTableImpl* m_currentTable; // The current table
+ const class NdbTableImpl* m_accessTable; // Index table (== current for pk)
// Set to TRUE when a tuple key attribute has been defined.
Uint32 theTupleKeyDefined[NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY][3];
@@ -883,18 +926,18 @@ protected:
Uint32 theTotalNrOfKeyWordInSignal; // The total number of
// keyword in signal.
- Uint32 theTupKeyLen; // Length of the tuple key in words
- Uint32 theNoOfTupKeyDefined; // The number of tuple key attributes
- // currently defined
- OperationType theOperationType; // Read Request, Update Req......
-
+ Uint32 theTupKeyLen; // Length of the tuple key in words
+ // left until done
+ Uint8 theNoOfTupKeyLeft; // The number of tuple key attributes
+ OperationType theOperationType; // Read Request, Update Req......
+
LockMode theLockMode; // Can be set to WRITE if read operation
OperationStatus theStatus; // The status of the operation.
+
Uint32 theMagicNumber; // Magic number to verify that object
// is correct
Uint32 theScanInfo; // Scan info bits (take over flag etc)
- Uint32 theDistrKeySize; // Distribution Key size if used
- Uint32 theDistributionGroup; // Distribution Group if used
+ Uint32 theDistributionKey; // Distribution Key size if used
Uint32 theSubroutineSize; // Size of subroutines for interpretation
Uint32 theInitialReadSize; // Size of initial reads for interpretation
@@ -902,14 +945,12 @@ protected:
Uint32 theFinalUpdateSize; // Size of final updates for interpretation
Uint32 theFinalReadSize; // Size of final reads for interpretation
- Uint8 theStartIndicator; // Indicator of whether start operation
- Uint8 theCommitIndicator; // Indicator of whether commit operation
- Uint8 theSimpleIndicator; // Indicator of whether simple operation
- Uint8 theDirtyIndicator; // Indicator of whether dirty operation
- Uint8 theInterpretIndicator; // Indicator of whether interpreted operation
- Uint8 theDistrGroupIndicator; // Indicates whether distribution grp is used
- Uint8 theDistrGroupType; // Type of distribution group used
- Uint8 theDistrKeyIndicator; // Indicates whether distr. key is used
+ Uint8 theStartIndicator; // Indicator of whether start operation
+ Uint8 theCommitIndicator; // Indicator of whether commit operation
+ Uint8 theSimpleIndicator; // Indicator of whether simple operation
+ Uint8 theDirtyIndicator; // Indicator of whether dirty operation
+ Uint8 theInterpretIndicator; // Indicator of whether interpreted operation
+ Int8 theDistrKeyIndicator_; // Indicates whether distr. key is used
Uint16 m_tcReqGSN;
Uint16 m_keyInfoGSN;
@@ -933,6 +974,7 @@ protected:
#include <stdlib.h>
#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
inline
int
@@ -987,6 +1029,20 @@ NdbOperation::next()
return theNext;
}
+inline
+const NdbOperation*
+NdbOperation::next() const
+{
+ return theNext;
+}
+
+inline
+const NdbRecAttr*
+NdbOperation::getFirstRecAttr() const
+{
+ return theReceiver.theFirstRecAttr;
+}
+
/******************************************************************************
OperationStatus Status();
@@ -1016,14 +1072,14 @@ NdbOperation::Status( OperationStatus aStatus )
}
/******************************************************************************
-void NdbCon(NdbConnection* aNdbCon);
+void NdbCon(NdbTransaction* aNdbCon);
-Parameters: aNdbCon: Pointers to NdbConnection object.
+Parameters: aNdbCon: Pointers to NdbTransaction object.
Remark: Set the reference to the connection in the operation object.
******************************************************************************/
inline
void
-NdbOperation::NdbCon(NdbConnection* aNdbCon)
+NdbOperation::NdbCon(NdbTransaction* aNdbCon)
{
theNdbCon = aNdbCon;
}
@@ -1168,4 +1224,6 @@ NdbOperation::setValue(Uint32 anAttrId, double aPar)
return setValue(anAttrId, (const char*)&aPar, (Uint32)8);
}
+#endif // doxygen
+
#endif
diff --git a/ndb/include/ndbapi/NdbRecAttr.hpp b/ndb/include/ndbapi/NdbRecAttr.hpp
index 741ea3d52e2..3607a64f3b3 100644
--- a/ndb/include/ndbapi/NdbRecAttr.hpp
+++ b/ndb/include/ndbapi/NdbRecAttr.hpp
@@ -35,23 +35,22 @@ class NdbOperation;
* MyRecAttr = MyOperation->getValue("ATTR2", NULL);
* if (MyRecAttr == NULL) goto error;
*
- * if (MyConnection->execute(Commit) == -1) goto error;
+ * if (MyTransaction->execute(Commit) == -1) goto error;
*
* ndbout << MyRecAttr->u_32_value();
* @endcode
* For more examples, see
- * @ref ndbapi_example1.cpp and
- * @ref ndbapi_example2.cpp.
+ * @ref ndbapi_simple.cpp.
*
* @note The NdbRecAttr object is instantiated with its value when
- * NdbConnection::execute is called. Before this, the value is
+ * NdbTransaction::execute is called. Before this, the value is
* undefined. (NdbRecAttr::isNULL can be used to check
* if the value is defined or not.)
* This means that an NdbRecAttr object only has valid information
- * between the time of calling NdbConnection::execute and
+ * between the time of calling NdbTransaction::execute and
* the time of Ndb::closeTransaction.
* The value of the null indicator is -1 until the
- * NdbConnection::execute method have been called.
+ * NdbTransaction::execute method have been called.
*
* For simple types, there are methods which directly getting the value
* from the NdbRecAttr object.
@@ -73,12 +72,14 @@ class NdbOperation;
*/
class NdbRecAttr
{
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbOperation;
friend class NdbIndexScanOperation;
friend class NdbEventOperationImpl;
friend class NdbReceiver;
friend class Ndb;
friend class NdbOut& operator<<(class NdbOut&, const class AttributeS&);
+#endif
public:
/**
@@ -125,7 +126,7 @@ public:
* Check if attribute value is NULL.
*
* @return -1 = Not defined (Failure or
- * NdbConnection::execute not yet called).<br>
+ * NdbTransaction::execute not yet called).<br>
* 0 = Attribute value is defined, but not equal to NULL.<br>
* 1 = Attribute value is defined and equal to NULL.
*/
@@ -242,10 +243,16 @@ public:
* i.e. objects that has been cloned.
*/
~NdbRecAttr();
+
+public:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const NdbRecAttr* next() const;
+#endif
private:
Uint32 attrId() const; /* Get attribute id */
bool setNULL(); /* Set NULL indicator */
+ void setUNDEFINED(); /* Set UNDEFINED indicator */
bool receive_data(const Uint32*, Uint32);
void release(); /* Release memory if allocated */
@@ -253,7 +260,7 @@ private:
NdbRecAttr(Ndb*);
void next(NdbRecAttr* aRecAttr);
- NdbRecAttr* next() const;
+ NdbRecAttr* next();
int setup(const class NdbDictionary::Column* col, char* aValue);
int setup(const class NdbColumnImpl* anAttrInfo, char* aValue);
@@ -278,6 +285,8 @@ private:
friend struct Ndb_free_list_t<NdbRecAttr>;
};
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+
inline
NdbDictionary::Column::Type
NdbRecAttr::getType() const {
@@ -376,6 +385,13 @@ NdbRecAttr::next(NdbRecAttr* aRecAttr)
inline
NdbRecAttr*
+NdbRecAttr::next()
+{
+ return theNext;
+}
+
+inline
+const NdbRecAttr*
NdbRecAttr::next() const
{
return theNext;
@@ -411,6 +427,13 @@ NdbRecAttr::setNULL()
}
inline
+void
+NdbRecAttr::setUNDEFINED()
+{
+ theNULLind = -1;
+}
+
+inline
int
NdbRecAttr::isNULL() const
{
@@ -419,5 +442,7 @@ NdbRecAttr::isNULL() const
class NdbOut& operator <<(class NdbOut&, const NdbRecAttr &);
+#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+
#endif
diff --git a/ndb/include/ndbapi/NdbReceiver.hpp b/ndb/include/ndbapi/NdbReceiver.hpp
index af624f69bd3..ff6debc7fd3 100644
--- a/ndb/include/ndbapi/NdbReceiver.hpp
+++ b/ndb/include/ndbapi/NdbReceiver.hpp
@@ -21,7 +21,7 @@
#include <ndb_types.h>
class Ndb;
-class NdbConnection;
+class NdbTransaction;
class NdbReceiver
{
@@ -30,7 +30,7 @@ class NdbReceiver
friend class NdbScanOperation;
friend class NdbIndexOperation;
friend class NdbIndexScanOperation;
- friend class NdbConnection;
+ friend class NdbTransaction;
public:
enum ReceiverType { NDB_UNINITIALIZED,
NDB_OPERATION = 1,
@@ -51,7 +51,7 @@ public:
return m_type;
}
- inline NdbConnection * getTransaction();
+ inline NdbTransaction * getTransaction();
void* getOwner(){
return m_owner;
}
@@ -67,7 +67,7 @@ private:
Ndb* m_ndb;
Uint32 m_id;
Uint32 m_tcPtrI;
- Uint32 m_key_info;
+ Uint32 m_hidden_count;
ReceiverType m_type;
void* m_owner;
NdbReceiver* m_next;
@@ -76,7 +76,7 @@ private:
* At setup
*/
class NdbRecAttr * getValue(const class NdbColumnImpl*, char * user_dst_ptr);
- void do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size);
+ void do_get_value(NdbReceiver*, Uint32 rows, Uint32 key_size, Uint32 range);
void prepareSend();
void calculate_batch_size(Uint32, Uint32, Uint32&, Uint32&, Uint32&);
@@ -97,7 +97,7 @@ private:
Uint32 m_received_result_length;
bool nextResult() const { return m_current_row < m_result_rows; }
- void copyout(NdbReceiver&);
+ NdbRecAttr* copyout(NdbReceiver&);
};
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -146,5 +146,5 @@ NdbReceiver::execSCANOPCONF(Uint32 tcPtrI, Uint32 len, Uint32 rows){
return (tmp == len ? 1 : 0);
}
-#endif
+#endif // DOXYGEN_SHOULD_SKIP_INTERNAL
#endif
diff --git a/ndb/include/ndbapi/NdbResultSet.hpp b/ndb/include/ndbapi/NdbResultSet.hpp
deleted file mode 100644
index dc0288a380c..00000000000
--- a/ndb/include/ndbapi/NdbResultSet.hpp
+++ /dev/null
@@ -1,162 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*****************************************************************************
- * Name: NdbResultSet.hpp
- * Include:
- * Link:
- * Author: Martin Sköld
- * Date: 2002-04-01
- * Version: 0.1
- * Description: Cursor class
- * Documentation:
- * Adjust: 2002-04-01 Martin Sköld First version.
- ****************************************************************************/
-
-#ifndef NdbResultSet_H
-#define NdbResultSet_H
-
-
-#include <NdbScanOperation.hpp>
-
-/**
- * @class NdbResultSet
- * @brief NdbResultSet contains a NdbScanOperation.
- */
-class NdbResultSet
-{
- friend class NdbScanOperation;
-
-public:
-
- /**
- * Get the next tuple in a scan transaction.
- *
- * After each call to NdbResult::nextResult
- * the buffers and NdbRecAttr objects defined in
- * NdbOperation::getValue are updated with values
- * from the scanned tuple.
- *
- * @param fetchAllowed If set to false, then fetching is disabled
- *
- * The NDB API will contact the NDB Kernel for more tuples
- * when necessary to do so unless you set the fetchAllowed
- * to false.
- * This will force NDB to process any records it
- * already has in it's caches. When there are no more cached
- * records it will return 2. You must then call nextResult
- * with fetchAllowed = true in order to contact NDB for more
- * records.
- *
- * fetchAllowed = false is useful when you want to update or
- * delete all the records fetched in one transaction(This will save a
- * lot of round trip time and make updates or deletes of scanned
- * records a lot faster).
- * While nextResult(false)
- * returns 0 take over the record to another transaction. When
- * nextResult(false) returns 2 you must execute and commit the other
- * transaction. This will cause the locks to be transferred to the
- * other transaction, updates or deletes will be made and then the
- * locks will be released.
- * After that, call nextResult(true) which will fetch new records and
- * cache them in the NdbApi.
- *
- * @note If you don't take over the records to another transaction the
- * locks on those records will be released the next time NDB Kernel
- * is contacted for more records.
- *
- * @note Please contact for examples of efficient scan
- * updates and deletes.
- *
- * @note See ndb/examples/ndbapi_scan_example for usage.
- *
- * @return
- * - -1: if unsuccessful,<br>
- * - 0: if another tuple was received, and<br>
- * - 1: if there are no more tuples to scan.
- * - 2: if there are no more cached records in NdbApi
- */
- int nextResult(bool fetchAllowed = true, bool forceSend = false);
-
- /**
- * Close result set (scan)
- */
- void close(bool forceSend = false);
-
- /**
- * Restart
- */
- int restart(bool forceSend = false);
-
- /**
- * Transfer scan operation to an updating transaction. Use this function
- * when a scan has found a record that you want to update.
- * 1. Start a new transaction.
- * 2. Call the function takeOverForUpdate using your new transaction
- * as parameter, all the properties of the found record will be copied
- * to the new transaction.
- * 3. When you execute the new transaction, the lock held by the scan will
- * be transferred to the new transaction(it's taken over).
- *
- * @note You must have started the scan with openScanExclusive
- * to be able to update the found tuple.
- *
- * @param updateTrans the update transaction connection.
- * @return an NdbOperation or NULL.
- */
- NdbOperation* updateTuple();
- NdbOperation* updateTuple(NdbConnection* updateTrans);
-
- /**
- * Transfer scan operation to a deleting transaction. Use this function
- * when a scan has found a record that you want to delete.
- * 1. Start a new transaction.
- * 2. Call the function takeOverForDelete using your new transaction
- * as parameter, all the properties of the found record will be copied
- * to the new transaction.
- * 3. When you execute the new transaction, the lock held by the scan will
- * be transferred to the new transaction(its taken over).
- *
- * @note You must have started the scan with openScanExclusive
- * to be able to delete the found tuple.
- *
- * @param deleteTrans the delete transaction connection.
- * @return an NdbOperation or NULL.
- */
- int deleteTuple();
- int deleteTuple(NdbConnection* takeOverTransaction);
-
- /**
- * Get underlying operation
- */
- NdbOperation* getOperation();
-private:
- NdbResultSet(NdbScanOperation*);
-
- ~NdbResultSet();
-
- void init();
-
- NdbScanOperation* m_operation;
-};
-
-inline
-NdbOperation*
-NdbResultSet::getOperation(){
- return m_operation;
-}
-
-#endif
diff --git a/ndb/include/ndbapi/NdbScanFilter.hpp b/ndb/include/ndbapi/NdbScanFilter.hpp
index 9f8a01b1059..b5457bab99b 100644
--- a/ndb/include/ndbapi/NdbScanFilter.hpp
+++ b/ndb/include/ndbapi/NdbScanFilter.hpp
@@ -45,7 +45,19 @@ public:
NAND = 3, ///< NOT (x1 AND x2 AND x3)
NOR = 4 ///< NOT (x1 OR x2 OR x3)
};
-
+
+ enum BinaryCondition
+ {
+ COND_LE = 0, ///< lower bound
+ COND_LT = 1, ///< lower bound, strict
+ COND_GE = 2, ///< upper bound
+ COND_GT = 3, ///< upper bound, strict
+ COND_EQ = 4, ///< equality
+ COND_NE = 5, ///< not equal
+ COND_LIKE = 6, ///< like
+ COND_NOT_LIKE = 7 ///< not like
+ };
+
/**
* @name Grouping
* @{
@@ -74,7 +86,12 @@ public:
* <i>Explanation missing</i>
*/
int isfalse();
-
+
+ /**
+ * Compare column <b>ColId</b> with <b>val</b>
+ */
+ int cmp(BinaryCondition cond, int ColId, const void *val, Uint32 len = 0);
+
/**
* @name Integer Comparators
* @{
@@ -82,80 +99,61 @@ public:
/** Compare column value with integer for equal
* ®return 0 if successful, -1 otherwize
*/
- int eq(int ColId, Uint32 value);
+ int eq(int ColId, Uint32 value) { return cmp(COND_EQ, ColId, &value, 4);}
+
/** Compare column value with integer for not equal.
* ®return 0 if successful, -1 otherwize
*/
- int ne(int ColId, Uint32 value);
+ int ne(int ColId, Uint32 value) { return cmp(COND_NE, ColId, &value, 4);}
/** Compare column value with integer for less than.
* ®return 0 if successful, -1 otherwize
*/
- int lt(int ColId, Uint32 value);
+ int lt(int ColId, Uint32 value) { return cmp(COND_LT, ColId, &value, 4);}
/** Compare column value with integer for less than or equal.
* ®return 0 if successful, -1 otherwize
*/
- int le(int ColId, Uint32 value);
+ int le(int ColId, Uint32 value) { return cmp(COND_LE, ColId, &value, 4);}
/** Compare column value with integer for greater than.
* ®return 0 if successful, -1 otherwize
*/
- int gt(int ColId, Uint32 value);
+ int gt(int ColId, Uint32 value) { return cmp(COND_GT, ColId, &value, 4);}
/** Compare column value with integer for greater than or equal.
* ®return 0 if successful, -1 otherwize
*/
- int ge(int ColId, Uint32 value);
+ int ge(int ColId, Uint32 value) { return cmp(COND_GE, ColId, &value, 4);}
/** Compare column value with integer for equal. 64-bit.
* ®return 0 if successful, -1 otherwize
*/
- int eq(int ColId, Uint64 value);
+ int eq(int ColId, Uint64 value) { return cmp(COND_EQ, ColId, &value, 8);}
/** Compare column value with integer for not equal. 64-bit.
* ®return 0 if successful, -1 otherwize
*/
- int ne(int ColId, Uint64 value);
+ int ne(int ColId, Uint64 value) { return cmp(COND_NE, ColId, &value, 8);}
/** Compare column value with integer for less than. 64-bit.
* ®return 0 if successful, -1 otherwize
*/
- int lt(int ColId, Uint64 value);
+ int lt(int ColId, Uint64 value) { return cmp(COND_LT, ColId, &value, 8);}
/** Compare column value with integer for less than or equal. 64-bit.
* ®return 0 if successful, -1 otherwize
*/
- int le(int ColId, Uint64 value);
+ int le(int ColId, Uint64 value) { return cmp(COND_LE, ColId, &value, 8);}
/** Compare column value with integer for greater than. 64-bit.
* ®return 0 if successful, -1 otherwize
*/
- int gt(int ColId, Uint64 value);
+ int gt(int ColId, Uint64 value) { return cmp(COND_GT, ColId, &value, 8);}
/** Compare column value with integer for greater than or equal. 64-bit.
* ®return 0 if successful, -1 otherwize
*/
- int ge(int ColId, Uint64 value);
+ int ge(int ColId, Uint64 value) { return cmp(COND_GE, ColId, &value, 8);}
/** @} *********************************************************************/
/** Check if column value is NULL */
int isnull(int ColId);
/** Check if column value is non-NULL */
int isnotnull(int ColId);
-
- /**
- * @name String Comparators
- * @{
- */
- /**
- * Compare string against a Char or Varchar column.
- *
- * By default Char comparison blank-pads both sides to common length.
- * Varchar comparison does not blank-pad.
- *
- * The extra <i>nopad</i> argument can be used to
- * force non-padded comparison for a Char column.
- * ®return 0 if successful, -1 otherwize
- */
- int eq(int ColId, const char * val, Uint32 len, bool nopad=false);
- int ne(int ColId, const char * val, Uint32 len, bool nopad=false);
- int lt(int ColId, const char * val, Uint32 len, bool nopad=false);
- int le(int ColId, const char * val, Uint32 len, bool nopad=false);
- int gt(int ColId, const char * val, Uint32 len, bool nopad=false);
- int ge(int ColId, const char * val, Uint32 len, bool nopad=false);
-
+
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Like comparison operator.
* ®return 0 if successful, -1 otherwize
@@ -167,9 +165,12 @@ public:
*/
int notlike(int ColId, const char * val, Uint32 len, bool nopad=false);
/** @} *********************************************************************/
+#endif
private:
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class NdbScanFilterImpl;
+#endif
class NdbScanFilterImpl & m_impl;
NdbScanFilter& operator=(const NdbScanFilter&); ///< Defined not implemented
};
diff --git a/ndb/include/ndbapi/NdbScanOperation.hpp b/ndb/include/ndbapi/NdbScanOperation.hpp
index f6e68dd4abe..bf8f362cefc 100644
--- a/ndb/include/ndbapi/NdbScanOperation.hpp
+++ b/ndb/include/ndbapi/NdbScanOperation.hpp
@@ -14,18 +14,6 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbScanOperation.hpp
- * Include:
- * Link:
- * Author: Martin Sköld
- * Date: 2002-04-01
- * Version: 0.1
- * Description: Table scan support
- * Documentation:
- * Adjust: 2002-04-01 Martin Sköld First version.
- ****************************************************************************/
-
#ifndef NdbScanOperation_H
#define NdbScanOperation_H
@@ -39,68 +27,169 @@ class NdbResultSet;
* @brief Class of scan operations for use in transactions.
*/
class NdbScanOperation : public NdbOperation {
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class Ndb;
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class NdbResultSet;
friend class NdbOperation;
friend class NdbBlob;
+#endif
+
public:
/**
- * Type of cursor
+ * Scan flags. OR-ed together and passed as second argument to
+ * readTuples.
*/
- enum CursorType {
- NoCursor = 0,
- ScanCursor = 1,
- IndexCursor = 2
+ enum ScanFlag {
+ SF_TupScan = (1 << 16), // scan TUP - only LM_CommittedRead
+ SF_OrderBy = (1 << 24), // index scan in order
+ SF_Descending = (2 << 24), // index scan in descending order
+ SF_ReadRangeNo = (4 << 24) // enable @ref get_range_no
};
/**
- * Type of cursor
- */
- CursorType get_cursor_type() const;
+ * readTuples
+ *
+ * @param lock_mode Lock mode
+ * @param scan_flags see @ref ScanFlag
+ * @param parallel No of fragments to scan in parallel (0=max)
+ */
+ virtual
+ int readTuples(LockMode lock_mode = LM_Read,
+ Uint32 scan_flags = 0, Uint32 parallel = 0);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
/**
- * readTuples returns a NdbResultSet where tuples are stored.
- * Tuples are not stored in NdbResultSet until execute(NoCommit)
- * has been executed and nextResult has been called.
+ * readTuples
*
- * @param parallel Scan parallelism
+ * @param lock_mode Lock mode
* @param batch No of rows to fetch from each fragment at a time
- * @param LockMode Scan lock handling
- * @returns NdbResultSet.
+ * @param parallel No of fragments to scan in parallell
* @note specifying 0 for batch and parallall means max performance
*/
- NdbResultSet* readTuples(LockMode = LM_Read,
- Uint32 batch = 0, Uint32 parallel = 0);
+#ifdef ndb_readtuples_impossible_overload
+ int readTuples(LockMode lock_mode = LM_Read,
+ Uint32 batch = 0, Uint32 parallel = 0);
+#endif
- inline NdbResultSet* readTuples(int parallell){
+ inline int readTuples(int parallell){
return readTuples(LM_Read, 0, parallell);
}
- inline NdbResultSet* readTuplesExclusive(int parallell = 0){
+ inline int readTuplesExclusive(int parallell = 0){
return readTuples(LM_Exclusive, 0, parallell);
}
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
NdbBlob* getBlobHandle(const char* anAttrName);
NdbBlob* getBlobHandle(Uint32 anAttrId);
+#endif
-protected:
- CursorType m_cursor_type;
+ /**
+ * Get the next tuple in a scan transaction.
+ *
+ * After each call to nextResult
+ * the buffers and NdbRecAttr objects defined in
+ * NdbOperation::getValue are updated with values
+ * from the scanned tuple.
+ *
+ * @param fetchAllowed If set to false, then fetching is disabled
+ * @param forceSend If true send will occur immediately (see @ref secAdapt)
+ *
+ * The NDB API will contact the NDB Kernel for more tuples
+ * when necessary to do so unless you set the fetchAllowed
+ * to false.
+ * This will force NDB to process any records it
+ * already has in it's caches. When there are no more cached
+ * records it will return 2. You must then call nextResult
+ * with fetchAllowed = true in order to contact NDB for more
+ * records.
+ *
+ * fetchAllowed = false is useful when you want to update or
+ * delete all the records fetched in one transaction(This will save a
+ * lot of round trip time and make updates or deletes of scanned
+ * records a lot faster).
+ * While nextResult(false)
+ * returns 0 take over the record to another transaction. When
+ * nextResult(false) returns 2 you must execute and commit the other
+ * transaction. This will cause the locks to be transferred to the
+ * other transaction, updates or deletes will be made and then the
+ * locks will be released.
+ * After that, call nextResult(true) which will fetch new records and
+ * cache them in the NdbApi.
+ *
+ * @note If you don't take over the records to another transaction the
+ * locks on those records will be released the next time NDB Kernel
+ * is contacted for more records.
+ *
+ * @note Please contact for examples of efficient scan
+ * updates and deletes.
+ *
+ * @note See ndb/examples/ndbapi_scan_example for usage.
+ *
+ * @return
+ * - -1: if unsuccessful,<br>
+ * - 0: if another tuple was received, and<br>
+ * - 1: if there are no more tuples to scan.
+ * - 2: if there are no more cached records in NdbApi
+ */
+ int nextResult(bool fetchAllowed = true, bool forceSend = false);
+
+ /**
+ * Close scan
+ */
+ void close(bool forceSend = false, bool releaseOp = false);
+
+ /**
+ * Update current tuple
+ *
+ * @return an NdbOperation or NULL.
+ */
+ NdbOperation* updateCurrentTuple();
+ /**
+ * Update current tuple
+ *
+ * @param updateTrans Transaction that should perform the update
+ *
+ * @return an NdbOperation or NULL.
+ */
+ NdbOperation* updateCurrentTuple(NdbTransaction* updateTrans);
+ /**
+ * Delete current tuple
+ * @return 0 on success or -1 on failure
+ */
+ int deleteCurrentTuple();
+ /**
+ * Delete current tuple
+ *
+ * @param takeOverTransaction Transaction that should perform the delete
+ *
+ * @return 0 on success or -1 on failure
+ */
+ int deleteCurrentTuple(NdbTransaction* takeOverTransaction);
+
+ /**
+ * Restart scan with exactly the same
+ * getValues and search conditions
+ */
+ int restart(bool forceSend = false);
+
+protected:
NdbScanOperation(Ndb* aNdb);
virtual ~NdbScanOperation();
- int nextResult(bool fetchAllowed = true, bool forceSend = false);
+ int nextResultImpl(bool fetchAllowed = true, bool forceSend = false);
virtual void release();
- void closeScan(bool forceSend = false, bool releaseOp = false);
int close_impl(class TransporterFacade*, bool forceSend = false);
// Overloaded methods from NdbCursorOperation
int executeCursor(int ProcessorId);
// Overloaded private methods from NdbOperation
- int init(const NdbTableImpl* tab, NdbConnection* myConnection);
+ int init(const NdbTableImpl* tab, NdbTransaction*);
int prepareSend(Uint32 TC_ConnectPtr, Uint64 TransactionId);
int doSend(int ProcessorId);
void checkForceSend(bool forceSend);
@@ -108,14 +197,11 @@ protected:
virtual void setErrorCode(int aErrorCode);
virtual void setErrorCodeAbort(int aErrorCode);
- NdbResultSet * m_resultSet;
- NdbResultSet* getResultSet();
- NdbConnection *m_transConnection;
+ NdbTransaction *m_transConnection;
// Scan related variables
Uint32 theParallelism;
Uint32 m_keyInfo;
- NdbApiSignal* theSCAN_TABREQ;
int getFirstATTRINFOScan();
int doSendScan(int ProcessorId);
@@ -154,17 +240,41 @@ protected:
void execCLOSE_SCAN_REP();
int getKeyFromKEYINFO20(Uint32* data, unsigned size);
- NdbOperation* takeOverScanOp(OperationType opType, NdbConnection*);
+ NdbOperation* takeOverScanOp(OperationType opType, NdbTransaction*);
- Uint32 m_ordered;
-
- int restart(bool forceSend = false);
+ bool m_ordered;
+ bool m_descending;
+ Uint32 m_read_range_no;
+ NdbRecAttr *m_curr_row; // Pointer to last returned row
};
inline
-NdbScanOperation::CursorType
-NdbScanOperation::get_cursor_type() const {
- return m_cursor_type;
+NdbOperation*
+NdbScanOperation::updateCurrentTuple(){
+ return updateCurrentTuple(m_transConnection);
+}
+
+inline
+NdbOperation*
+NdbScanOperation::updateCurrentTuple(NdbTransaction* takeOverTrans){
+ return takeOverScanOp(NdbOperation::UpdateRequest,
+ takeOverTrans);
+}
+
+inline
+int
+NdbScanOperation::deleteCurrentTuple(){
+ return deleteCurrentTuple(m_transConnection);
+}
+
+inline
+int
+NdbScanOperation::deleteCurrentTuple(NdbTransaction * takeOverTrans){
+ void * res = takeOverScanOp(NdbOperation::DeleteRequest,
+ takeOverTrans);
+ if(res == 0)
+ return -1;
+ return 0;
}
#endif
diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbTransaction.hpp
index 75c3f80121d..a6ba6a11c4d 100644
--- a/ndb/include/ndbapi/NdbConnection.hpp
+++ b/ndb/include/ndbapi/NdbTransaction.hpp
@@ -14,15 +14,15 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#ifndef NdbConnection_H
-#define NdbConnection_H
+#ifndef NdbTransaction_H
+#define NdbTransaction_H
#include <ndb_types.h>
#include "NdbError.hpp"
#include "NdbDictionary.hpp"
#include "Ndb.hpp"
-class NdbConnection;
+class NdbTransaction;
class NdbOperation;
class NdbScanOperation;
class NdbIndexScanOperation;
@@ -31,106 +31,81 @@ class NdbApiSignal;
class Ndb;
class NdbBlob;
-
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+// to be documented later
/**
* NdbAsynchCallback functions are used when executing asynchronous
- * transactions (using NdbConnection::executeAsynchPrepare, or
- * NdbConnection::executeAsynch).
+ * transactions (using NdbTransaction::executeAsynchPrepare, or
+ * NdbTransaction::executeAsynch).
* The functions are called when the execute has finished.
* See @ref secAsync for more information.
*/
-typedef void (* NdbAsynchCallback)(int, NdbConnection*, void*);
+typedef void (* NdbAsynchCallback)(int, NdbTransaction*, void*);
+#endif
-/**
- * Commit type of transaction
- */
-enum AbortOption {
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
- CommitIfFailFree = 0,
- CommitAsMuchAsPossible = 2, ///< Commit transaction with as many
- TryCommit = 0, ///< <i>Missing explanation</i>
-#endif
- AbortOnError = 0, ///< Abort transaction on failed operation
- AO_IgnoreError = 2 ///< Transaction continues on failed operation
+enum AbortOption {
+ CommitIfFailFree= 0,
+ TryCommit= 0,
+ AbortOnError= 0,
+ CommitAsMuchAsPossible= 2,
+ AO_IgnoreError= 2
};
-
-typedef AbortOption CommitType;
-
-
-/**
- * Execution type of transaction
- */
enum ExecType {
- NoExecTypeDef = -1, ///< Erroneous type (Used for debugging only)
- Prepare, ///< <i>Missing explanation</i>
- NoCommit, ///< Execute the transaction as far as it has
- ///< been defined, but do not yet commit it
- Commit, ///< Execute and try to commit the transaction
- Rollback ///< Rollback transaction
+ NoExecTypeDef = -1,
+ Prepare,
+ NoCommit,
+ Commit,
+ Rollback
};
-
+#endif
/**
- * @class NdbConnection
+ * @class NdbTransaction
* @brief Represents a transaction.
*
- * A transaction (represented by an NdbConnection object)
- * belongs to an Ndb object and is typically created using
- * Ndb::startTransaction.
+ * A transaction (represented by an NdbTransaction object)
+ * belongs to an Ndb object and is created using
+ * Ndb::startTransaction().
* A transaction consists of a list of operations
- * (represented by NdbOperation objects).
+ * (represented by NdbOperation, NdbScanOperation, NdbIndexOperation,
+ * and NdbIndexScanOperation objects).
* Each operation access exactly one table.
*
- * After getting the NdbConnection object,
- * the first step is to get (allocate) an operation given the table name.
+ * After getting the NdbTransaction object,
+ * the first step is to get (allocate) an operation given the table name using
+ * one of the methods getNdbOperation(), getNdbScanOperation(),
+ * getNdbIndexOperation(), or getNdbIndexScanOperation().
* Then the operation is defined.
- * Several operations can be defined in parallel on the same
- * NdbConnection object.
- * When all operations are defined, the NdbConnection::execute
- * method sends them to the NDB kernel for execution.
+ * Several operations can be defined on the same
+ * NdbTransaction object, they will in that case be executed in parallell.
+ * When all operations are defined, the execute()
+ * method sends them to the NDB kernel for execution.
*
- * The NdbConnection::execute method returns when the NDB kernel has
+ * The execute() method returns when the NDB kernel has
* completed execution of all operations defined before the call to
- * NdbConnection::execute.
- * All allocated operations should be properly defined
- * before calling NdbConnection::execute.
+ * execute(). All allocated operations should be properly defined
+ * before calling execute().
*
- * A call to NdbConnection::execute uses one out of three types of execution:
- * -# ExecType::NoCommit Executes operations without committing them.
- * -# ExecType::Commit Executes remaining operation and commits the
+ * A call to execute() uses one out of three types of execution:
+ * -# NdbTransaction::NoCommit Executes operations without committing them.
+ * -# NdbTransaction::Commit Executes remaining operation and commits the
* complete transaction
- * -# ExecType::Rollback Rollbacks the entire transaction.
+ * -# NdbTransaction::Rollback Rollbacks the entire transaction.
*
- * NdbConnection::execute is equipped with an extra error handling parameter
+ * execute() is equipped with an extra error handling parameter.
* There are two alternatives:
- * -# AbortOption::AbortOnError (default).
+ * -# NdbTransaction::AbortOnError (default).
* The transaction is aborted if there are any error during the
* execution
- * -# AbortOption::IgnoreError
+ * -# NdbTransaction::AO_IgnoreError
* Continue execution of transaction even if operation fails
*
- * NdbConnection::execute can sometimes indicate an error
- * (return with -1) while the error code on the NdbConnection is 0.
- * This is an indication that one of the operations found a record
- * problem. The transaction is still ok and can continue as usual.
- * The NdbConnection::execute returns -1 together with error code
- * on NdbConnection object equal to 0 always means that an
- * operation was not successful but that the total transaction was OK.
- * By checking error codes on the individual operations it is possible
- * to find out which operation was not successful.
- *
- * NdbConnection::executeScan is used to setup a scan in the NDB kernel
- * after it has been defined.
- * NdbConnection::nextScanResult is used to iterate through the
- * scanned tuples.
- * After each call to NdbConnection::nextScanResult, the pointers
- * of NdbRecAttr objects defined in the NdbOperation::getValue
- * operations are updated with the values of the new the scanned tuple.
*/
/* FUTURE IMPLEMENTATION:
* Later a prepare mode will be added when Ndb supports Prepare-To-Commit
- * The NdbConnection can deliver the Transaction Id of the transaction.
+ * The NdbTransaction can deliver the Transaction Id of the transaction.
* After committing a transaction it is also possible to retrieve the
* global transaction checkpoint which the transaction was put in.
*
@@ -149,24 +124,66 @@ enum ExecType {
* not known the table of the tuple. As long as the table is
* derived from the known base class everything is ok.
* It is not possible to provide any primary key since it is
- * already supplied with the call to NdbConnection::getNdbOperation.
+ * already supplied with the call to NdbTransaction::getNdbOperation.
* -# The third method is used when a scanned tuple is to be transferred to
* another transaction. In this case it is not possible to define the
* primary key since it came along from the scanned tuple.
*
*/
-class NdbConnection
+
+class NdbTransaction
{
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
friend class Ndb;
friend class NdbOperation;
friend class NdbScanOperation;
friend class NdbIndexOperation;
friend class NdbIndexScanOperation;
friend class NdbBlob;
-
+#endif
+
public:
/**
+ * Commit type of transaction
+ */
+ enum AbortOption {
+ AbortOnError= ///< Abort transaction on failed operation
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ ::AbortOnError
+#endif
+ ,AO_IgnoreError= ///< Transaction continues on failed operation
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ ::AO_IgnoreError
+#endif
+ };
+
+ /**
+ * Execution type of transaction
+ */
+ enum ExecType {
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ NoExecTypeDef=
+ ::NoExecTypeDef, ///< Erroneous type (Used for debugging only)
+ Prepare= ::Prepare, ///< <i>Missing explanation</i>
+#endif
+ NoCommit= ///< Execute the transaction as far as it has
+ ///< been defined, but do not yet commit it
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ ::NoCommit
+#endif
+ ,Commit= ///< Execute and try to commit the transaction
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ ::Commit
+#endif
+ ,Rollback ///< Rollback transaction
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ = ::Rollback
+#endif
+ };
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
* Get an NdbOperation for a table.
* Note that the operation has to be defined before it is executed.
*
@@ -177,41 +194,100 @@ public:
* @return Pointer to an NdbOperation object if successful, otherwise NULL.
*/
NdbOperation* getNdbOperation(const char* aTableName);
+#endif
/**
+ * Get an NdbOperation for a table.
+ * Note that the operation has to be defined before it is executed.
+ *
+ * @note All operations within the same transaction need to
+ * be initialized with this method.
+ *
+ * @param aTable
+ * A table object (fetched by NdbDictionary::Dictionary::getTable)
+ * @return Pointer to an NdbOperation object if successful, otherwise NULL.
+ */
+ NdbOperation* getNdbOperation(const NdbDictionary::Table * aTable);
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
* Get an operation from NdbScanOperation idlelist and
- * get the NdbConnection object which
+ * get the NdbTransaction object which
* was fetched by startTransaction pointing to this operation.
*
- * @param aTableName a table name.
+ * @param aTableName The table name.
* @return pointer to an NdbOperation object if successful, otherwise NULL
*/
NdbScanOperation* getNdbScanOperation(const char* aTableName);
+#endif
/**
* Get an operation from NdbScanOperation idlelist and
- * get the NdbConnection object which
+ * get the NdbTransaction object which
+ * was fetched by startTransaction pointing to this operation.
+ *
+ * @param aTable
+ * A table object (fetched by NdbDictionary::Dictionary::getTable)
+ * @return pointer to an NdbOperation object if successful, otherwise NULL
+ */
+ NdbScanOperation* getNdbScanOperation(const NdbDictionary::Table * aTable);
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
+ * Get an operation from NdbIndexScanOperation idlelist and
+ * get the NdbTransaction object which
* was fetched by startTransaction pointing to this operation.
*
* @param anIndexName The index name.
- * @param aTableName a table name.
+ * @param aTableName The table name.
* @return pointer to an NdbOperation object if successful, otherwise NULL
*/
NdbIndexScanOperation* getNdbIndexScanOperation(const char* anIndexName,
const char* aTableName);
+ NdbIndexScanOperation* getNdbIndexScanOperation
+ (const NdbDictionary::Index *anIndex, const NdbDictionary::Table *aTable);
+#endif
/**
+ * Get an operation from NdbIndexScanOperation idlelist and
+ * get the NdbTransaction object which
+ * was fetched by startTransaction pointing to this operation.
+ *
+ * @param anIndex
+ An index object (fetched by NdbDictionary::Dictionary::getIndex).
+ * @return pointer to an NdbOperation object if successful, otherwise NULL
+ */
+ NdbIndexScanOperation* getNdbIndexScanOperation
+ (const NdbDictionary::Index *anIndex);
+
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ /**
* Get an operation from NdbIndexOperation idlelist and
- * get the NdbConnection object that
+ * get the NdbTransaction object that
* was fetched by startTransaction pointing to this operation.
*
- * @param indexName An index name (as created by createIndex).
- * @param tableName A table name.
+ * @param anIndexName The index name (as created by createIndex).
+ * @param aTableName The table name.
* @return Pointer to an NdbIndexOperation object if
* successful, otherwise NULL
*/
- NdbIndexOperation* getNdbIndexOperation(const char* indexName,
- const char* tableName);
+ NdbIndexOperation* getNdbIndexOperation(const char* anIndexName,
+ const char* aTableName);
+ NdbIndexOperation* getNdbIndexOperation(const NdbDictionary::Index *anIndex,
+ const NdbDictionary::Table *aTable);
+#endif
+
+ /**
+ * Get an operation from NdbIndexOperation idlelist and
+ * get the NdbTransaction object that
+ * was fetched by startTransaction pointing to this operation.
+ *
+ * @param anIndex
+ * An index object (fetched by NdbDictionary::Dictionary::getIndex).
+ * @return Pointer to an NdbIndexOperation object if
+ * successful, otherwise NULL
+ */
+ NdbIndexOperation* getNdbIndexOperation(const NdbDictionary::Index *anIndex);
/**
* @name Execute Transaction
@@ -239,10 +315,18 @@ public:
* the send.
* @return 0 if successful otherwise -1.
*/
- int execute(ExecType execType,
+ int execute(ExecType execType,
AbortOption abortOption = AbortOnError,
int force = 0 );
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ int execute(::ExecType execType,
+ ::AbortOption abortOption = ::AbortOnError,
+ int force = 0 )
+ { return execute ((ExecType)execType,(AbortOption)abortOption,force); }
+#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ // to be documented later
/**
* Prepare an asynchronous transaction.
*
@@ -256,7 +340,7 @@ public:
* ExecType::Rollback rollbacks the entire transaction.
* @param callback A callback method. This method gets
* called when the transaction has been
- * executed. See @ref ndbapi_example2.cpp
+ * executed. See @ref ndbapi_async1.cpp
* for an example on how to specify and use
* a callback method.
* @param anyObject A void pointer. This pointer is forwarded to the
@@ -270,15 +354,23 @@ public:
NdbAsynchCallback callback,
void* anyObject,
AbortOption abortOption = AbortOnError);
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ void executeAsynchPrepare(::ExecType execType,
+ NdbAsynchCallback callback,
+ void* anyObject,
+ ::AbortOption abortOption = ::AbortOnError)
+ { executeAsynchPrepare((ExecType)execType, callback, anyObject,
+ (AbortOption)abortOption); }
+#endif
/**
* Prepare and send an asynchronous transaction.
*
* This method perform the same action as
- * NdbConnection::executeAsynchPrepare
+ * NdbTransaction::executeAsynchPrepare
* but also sends the operations to the NDB kernel.
*
- * See NdbConnection::executeAsynchPrepare for information
+ * See NdbTransaction::executeAsynchPrepare for information
* about the parameters of this method.
*
* See @ref secAsync for more information on
@@ -288,7 +380,15 @@ public:
NdbAsynchCallback aCallback,
void* anyObject,
AbortOption abortOption = AbortOnError);
-
+#ifndef DOXYGEN_SHOULD_SKIP_DEPRECATED
+ void executeAsynch(::ExecType aTypeOfExec,
+ NdbAsynchCallback aCallback,
+ void* anyObject,
+ ::AbortOption abortOption= ::AbortOnError)
+ { executeAsynch((ExecType)aTypeOfExec, aCallback, anyObject,
+ (AbortOption)abortOption); }
+#endif
+#endif
/**
* Refresh
* Update timeout counter of this transaction
@@ -303,25 +403,39 @@ public:
/**
* Close transaction
- * @note It is not allowed to call NdbConnection::close after sending the
+ *
+ * @note Equivalent to to calling Ndb::closeTransaction()
+ */
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ /**
+ * @note It is not allowed to call NdbTransaction::close after sending the
* transaction asynchronously before the callback method has
* been called.
* (The application should keep track of the number of
* outstanding transactions and wait until all of them
- * has completed before calling NdbConnection::close).
+ * has completed before calling NdbTransaction::close).
* If the transaction is not committed it will be aborted.
*/
+#endif
void close();
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
* Restart transaction
*
* Once a transaction has been completed successfully
* it can be started again wo/ calling closeTransaction/startTransaction
*
- * Note this method also releases completed operations
+ * @note This method also releases completed operations
+ *
+ * @note This method does not close open scans,
+ * c.f. NdbScanOperation::close()
+ *
+ * @note This method can only be called _directly_ after commit
+ * and only if commit is successful
*/
int restart();
+#endif
/** @} *********************************************************************/
@@ -348,23 +462,20 @@ public:
* (This is because no updates are performed in scan transactions.)
*
* @return GCI of transaction or -1 if GCI is not available.
- * (Note that there has to be an NdbConnection::execute call
+ * (Note that there has to be an NdbTransaction::execute call
* with Ndb::Commit for the GCI to be available.)
*/
- int getGCI();
+ int getGCI();
/**
* Get transaction identity.
*
* @return Transaction id.
*/
- Uint64 getTransactionId();
+ Uint64 getTransactionId();
/**
- * Returns the commit status of the transaction.
- *
- * @return The commit status of the transaction, i.e. one of
- * { NotStarted, Started, TimeOut, Committed, Aborted, NeedAbort }
+ * The commit status of the transaction.
*/
enum CommitStatusType {
NotStarted, ///< Transaction not yet started
@@ -374,6 +485,11 @@ public:
NeedAbort ///< <i>Missing explanation</i>
};
+ /**
+ * Get the commit status of the transaction.
+ *
+ * @return The commit status of the transaction
+ */
CommitStatusType commitStatus();
/** @} *********************************************************************/
@@ -392,10 +508,10 @@ public:
/**
* Get the latest NdbOperation which had an error.
- * This method is used on the NdbConnection object to find the
+ * This method is used on the NdbTransaction object to find the
* NdbOperation causing an error.
* To find more information about the
- * actual error, use method NdbOperation::getNdbError
+ * actual error, use method NdbOperation::getNdbError()
* on the returned NdbOperation object.
*
* @return The NdbOperation causing the latest error.
@@ -414,9 +530,9 @@ public:
*
* This method should only be used <em>after</em> a transaction
* has been executed.
- * - NdbConnection::getNextCompletedOperation(NULL) returns the
+ * - NdbTransaction::getNextCompletedOperation(NULL) returns the
* first NdbOperation object.
- * - NdbConnection::getNextCompletedOperation(op) returns the
+ * - NdbTransaction::getNextCompletedOperation(op) returns the
* NdbOperation object defined after the NdbOperation "op".
*
* This method is typically used to fetch all NdbOperation:s of
@@ -431,6 +547,10 @@ public:
*/
const NdbOperation * getNextCompletedOperation(const NdbOperation * op)const;
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+ const NdbOperation* getFirstDefinedOperation()const{return theFirstOpInList;}
+ const NdbOperation* getLastDefinedOperation()const{return theLastOpInList;}
+
/** @} *********************************************************************/
/**
@@ -442,16 +562,12 @@ public:
*/
int executePendingBlobOps(Uint8 flags = 0xFF);
- // Fast path calls for MySQL ha_ndbcluster
- NdbOperation* getNdbOperation(const NdbDictionary::Table * table);
- NdbIndexOperation* getNdbIndexOperation(const NdbDictionary::Index *,
- const NdbDictionary::Table * table);
- NdbScanOperation* getNdbScanOperation(const NdbDictionary::Table * table);
- NdbIndexScanOperation* getNdbIndexScanOperation(const NdbDictionary::Index * index,
- const NdbDictionary::Table * table);
+ /**
+ * Get nodeId of TC for this transaction
+ */
+ Uint32 getConnectedNodeId(); // Get Connected node id
+#endif
- Uint32 getConnectedNodeId(); // Get Connected node id
-
private:
/**
* Release completed operations
@@ -466,10 +582,8 @@ private:
/**************************************************************************
* These are the create and delete methods of this class. *
**************************************************************************/
- NdbConnection(Ndb* aNdb);
- ~NdbConnection();
- NdbConnection* next(); // Returns the next pointer
- void next(NdbConnection*); // Sets the next pointer
+ NdbTransaction(Ndb* aNdb);
+ ~NdbTransaction();
void init(); // Initialize connection object for new transaction
@@ -488,6 +602,8 @@ private:
int getTC_ConnectPtr(); // Gets TC Connect pointer
void setBuddyConPtr(Uint32); // Sets Buddy Con Ptr
Uint32 getBuddyConPtr(); // Gets Buddy Con Ptr
+ NdbTransaction* next(); // Returns the next pointer
+ void next(NdbTransaction*); // Sets the next pointer
enum ConStatusType {
NotConnected,
@@ -599,7 +715,7 @@ private:
NdbOperation* theErrorOperation; // The NdbOperation where the error occurred
Ndb* theNdb; // Pointer to Ndb object
- NdbConnection* theNext; // Next pointer. Used in idle list.
+ NdbTransaction* theNext; // Next pointer. Used in idle list.
NdbOperation* theFirstOpInList; // First operation in defining list.
NdbOperation* theLastOpInList; // Last operation in defining list.
@@ -690,19 +806,21 @@ private:
void define_scan_op(NdbIndexScanOperation*);
friend class HugoOperations;
- friend struct Ndb_free_list_t<NdbConnection>;
+ friend struct Ndb_free_list_t<NdbTransaction>;
};
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+
inline
Uint32
-NdbConnection::get_send_size()
+NdbTransaction::get_send_size()
{
return 0;
}
inline
void
-NdbConnection::set_send_size(Uint32 send_size)
+NdbTransaction::set_send_size(Uint32 send_size)
{
return;
}
@@ -713,7 +831,7 @@ NdbConnection::set_send_size(Uint32 send_size)
inline
int
-NdbConnection::checkMagicNumber()
+NdbTransaction::checkMagicNumber()
{
if (theMagicNumber == 0x37412619)
return 0;
@@ -727,7 +845,7 @@ NdbConnection::checkMagicNumber()
inline
bool
-NdbConnection::checkState_TransId(const Uint32 * transId) const {
+NdbTransaction::checkState_TransId(const Uint32 * transId) const {
const Uint32 tTmp1 = transId[0];
const Uint32 tTmp2 = transId[1];
Uint64 tRecTransId = (Uint64)tTmp1 + ((Uint64)tTmp2 << 32);
@@ -742,14 +860,14 @@ Remark: Set the transaction identity.
************************************************************************************************/
inline
void
-NdbConnection::setTransactionId(Uint64 aTransactionId)
+NdbTransaction::setTransactionId(Uint64 aTransactionId)
{
theTransactionId = aTransactionId;
}
inline
void
-NdbConnection::setConnectedNodeId(Uint32 aNode, Uint32 aSequenceNo)
+NdbTransaction::setConnectedNodeId(Uint32 aNode, Uint32 aSequenceNo)
{
theDBnode = aNode;
theNodeSequence = aSequenceNo;
@@ -762,7 +880,7 @@ Remark: Get Connected node id.
******************************************************************************/
inline
Uint32
-NdbConnection::getConnectedNodeId()
+NdbTransaction::getConnectedNodeId()
{
return theDBnode;
}
@@ -774,7 +892,7 @@ Remark: Set my block refrerence.
******************************************************************************/
inline
void
-NdbConnection::setMyBlockReference(int aBlockRef)
+NdbTransaction::setMyBlockReference(int aBlockRef)
{
theMyRef = aBlockRef;
}
@@ -786,7 +904,7 @@ Remark: Sets TC Connect pointer.
******************************************************************************/
inline
void
-NdbConnection::setTC_ConnectPtr(Uint32 aTCConPtr)
+NdbTransaction::setTC_ConnectPtr(Uint32 aTCConPtr)
{
theTCConPtr = aTCConPtr;
}
@@ -799,61 +917,61 @@ Remark: Gets TC Connect pointer.
******************************************************************************/
inline
int
-NdbConnection::getTC_ConnectPtr()
+NdbTransaction::getTC_ConnectPtr()
{
return theTCConPtr;
}
inline
void
-NdbConnection::setBuddyConPtr(Uint32 aBuddyConPtr)
+NdbTransaction::setBuddyConPtr(Uint32 aBuddyConPtr)
{
theBuddyConPtr = aBuddyConPtr;
}
inline
-Uint32 NdbConnection::getBuddyConPtr()
+Uint32 NdbTransaction::getBuddyConPtr()
{
return theBuddyConPtr;
}
/******************************************************************************
-NdbConnection* next();
+NdbTransaction* next();
inline
void
-NdbConnection::setBuddyConPtr(Uint32 aBuddyConPtr)
+NdbTransaction::setBuddyConPtr(Uint32 aBuddyConPtr)
{
theBuddyConPtr = aBuddyConPtr;
}
inline
-Uint32 NdbConnection::getBuddyConPtr()
+Uint32 NdbTransaction::getBuddyConPtr()
{
return theBuddyConPtr;
}
-Return Value: Return next pointer to NdbConnection object.
+Return Value: Return next pointer to NdbTransaction object.
Remark: Get the next pointer.
******************************************************************************/
inline
-NdbConnection*
-NdbConnection::next()
+NdbTransaction*
+NdbTransaction::next()
{
return theNext;
}
/******************************************************************************
-void next(NdbConnection aConnection);
+void next(NdbTransaction aTransaction);
-Parameters: aConnection: The connection object.
+Parameters: aTransaction: The connection object.
Remark: Sets the next pointer.
******************************************************************************/
inline
void
-NdbConnection::next(NdbConnection* aConnection)
+NdbTransaction::next(NdbTransaction* aTransaction)
{
- theNext = aConnection;
+ theNext = aTransaction;
}
/******************************************************************************
@@ -864,8 +982,8 @@ Parameters: aStatus: The status.
Remark: Sets Connect status.
******************************************************************************/
inline
-NdbConnection::ConStatusType
-NdbConnection::Status()
+NdbTransaction::ConStatusType
+NdbTransaction::Status()
{
return theStatus;
}
@@ -878,7 +996,7 @@ Remark: Sets Connect status.
******************************************************************************/
inline
void
-NdbConnection::Status( ConStatusType aStatus )
+NdbTransaction::Status( ConStatusType aStatus )
{
theStatus = aStatus;
}
@@ -891,7 +1009,7 @@ Remark: Set global checkpoint identity of the transaction
******************************************************************************/
inline
void
-NdbConnection::setGCI(int aGlobalCheckpointId)
+NdbTransaction::setGCI(int aGlobalCheckpointId)
{
theGlobalCheckpointId = aGlobalCheckpointId;
}
@@ -903,7 +1021,7 @@ Remark: An operation was sent with success that expects a response.
******************************************************************************/
inline
void
-NdbConnection::OpSent()
+NdbTransaction::OpSent()
{
theNoOfOpSent++;
}
@@ -914,7 +1032,7 @@ void executePendingBlobOps();
#include <stdlib.h>
inline
int
-NdbConnection::executePendingBlobOps(Uint8 flags)
+NdbTransaction::executePendingBlobOps(Uint8 flags)
{
if (thePendingBlobOps & flags) {
// not executeNoBlobs because there can be new ops with blobs
@@ -925,8 +1043,12 @@ NdbConnection::executePendingBlobOps(Uint8 flags)
inline
Uint32
-NdbConnection::ptr2int(){
+NdbTransaction::ptr2int(){
return theId;
}
+typedef NdbTransaction NdbConnection;
+
+#endif // ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+
#endif
diff --git a/ndb/include/ndbapi/ndb_cluster_connection.hpp b/ndb/include/ndbapi/ndb_cluster_connection.hpp
index 0e559700716..97db76563aa 100644
--- a/ndb/include/ndbapi/ndb_cluster_connection.hpp
+++ b/ndb/include/ndbapi/ndb_cluster_connection.hpp
@@ -20,39 +20,42 @@
/**
* @class Ndb_cluster_connection
- * @brief Represents a connection to a cluster of storage nodes
+ * @brief Represents a connection to a cluster of storage nodes.
*
- * Always start your application program by creating a
- * Ndb_cluster_connection object. Your application should contain
- * only one Ndb_cluster_connection. Your application connects to
- * a cluster management server when method connect() is called.
- * With the method wait_until_ready() it is possible to wait
- * for the connection to one or several storage nodes.
+ * Any NDB application program should begin with the creation of a
+ * single Ndb_cluster_connection object, and should make use of one
+ * and only one Ndb_cluster_connection. The application connects to
+ * a cluster management server when this object's connect() method is called.
+ * By using the wait_until_ready() method it is possible to wait
+ * for the connection to reach one or more storage nodes.
*/
class Ndb_cluster_connection {
public:
/**
* Create a connection to a cluster of storage nodes
*
- * @param specify the connectstring for where to find the
- * management server
+ * @param connectstring The connectstring for where to find the
+ * management server
*/
- Ndb_cluster_connection(const char * connect_string = 0);
+ Ndb_cluster_connection(const char * connectstring = 0);
~Ndb_cluster_connection();
/**
* Connect to a cluster management server
*
- * @param no_retries specifies the number of retries to perform
- * if the connect fails, negative number results in infinite
- * number of retries
+ * @param no_retries specifies the number of retries to attempt
+ * in the event of connection failure; a negative value
+ * will result in the attempt to connect being repeated
+ * indefinitely
+ *
* @param retry_delay_in_seconds specifies how often retries should
* be performed
- * @param verbose specifies if the method should print progess
*
- * @return 0 if success,
- * 1 if retriable error,
- * -1 if non-retriable error
+ * @param verbose specifies if the method should print a report of its progess
+ *
+ * @return 0 = success,
+ * 1 = recoverable error,
+ * -1 = non-recoverable error
*/
int connect(int no_retries=0, int retry_delay_in_seconds=1, int verbose=0);
@@ -61,15 +64,15 @@ public:
#endif
/**
- * Wait until one or several storage nodes are connected
+ * Wait until the requested connection with one or more storage nodes is successful
*
- * @param time_out_for_first_alive number of seconds to wait until
- * first alive node is detected
- * @param timeout_after_first_alive number of seconds to wait after
- * first alive node is detected
+ * @param timeout_for_first_alive Number of seconds to wait until
+ * first live node is detected
+ * @param timeout_after_first_alive Number of seconds to wait after
+ * first live node is detected
*
- * @return 0 all nodes alive,
- * > 0 at least one node alive,
+ * @return = 0 all nodes live,
+ * > 0 at least one node live,
* < 0 error
*/
int wait_until_ready(int timeout_for_first_alive,
@@ -83,6 +86,7 @@ public:
void set_optimized_node_selection(int val);
unsigned no_db_nodes();
+ unsigned node_id();
#endif
private:
diff --git a/ndb/include/ndbapi/ndb_opt_defaults.h b/ndb/include/ndbapi/ndb_opt_defaults.h
index 63b673ed60d..d03a9dcc36f 100644
--- a/ndb/include/ndbapi/ndb_opt_defaults.h
+++ b/ndb/include/ndbapi/ndb_opt_defaults.h
@@ -17,11 +17,7 @@
#ifndef NDB_OPT_DEFAULTS_H
#define NDB_OPT_DEFAULTS_H
-#ifdef SIGRTMIN
-#define OPT_NDB_SHM_SIGNUM_DEFAULT SIGRTMIN+2
-#else
#define OPT_NDB_SHM_SIGNUM_DEFAULT 0
-#endif
#define OPT_NDB_SHM_DEFAULT 0
#endif
diff --git a/ndb/include/ndbapi/ndbapi_limits.h b/ndb/include/ndbapi/ndbapi_limits.h
index d1cb135b39d..5c4db71b747 100644
--- a/ndb/include/ndbapi/ndbapi_limits.h
+++ b/ndb/include/ndbapi/ndbapi_limits.h
@@ -19,10 +19,6 @@
#define NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY 32
#define NDB_MAX_ATTRIBUTES_IN_INDEX NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY
-#define NDB_MAX_DATABASE_NAME_SIZE 128
-#define NDB_MAX_SCHEMA_NAME_SIZE 128
-#define NDB_MAX_TAB_NAME_SIZE 128
-#define NDB_MAX_ATTR_NAME_SIZE 32
#define NDB_MAX_ATTRIBUTES_IN_TABLE 128
#define NDB_MAX_TUPLE_SIZE_IN_WORDS 2013
diff --git a/ndb/include/ndbapi/ndberror.h b/ndb/include/ndbapi/ndberror.h
index ceb1881a4cc..2225f68f08d 100644
--- a/ndb/include/ndbapi/ndberror.h
+++ b/ndb/include/ndbapi/ndberror.h
@@ -21,6 +21,8 @@
extern "C" {
#endif
+#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
+
typedef enum
{
ndberror_st_success = 0,
@@ -47,7 +49,8 @@ typedef enum
ndberror_cl_function_not_implemented = 13,
ndberror_cl_unknown_error_code = 14,
ndberror_cl_node_shutdown = 15,
- ndberror_cl_configuration = 16
+ ndberror_cl_configuration = 16,
+ ndberror_cl_schema_object_already_exists = 17
} ndberror_classification_enum;
@@ -92,6 +95,8 @@ const char *ndberror_classification_message(ndberror_classification);
void ndberror_update(ndberror_struct *);
int ndb_error_string(int err_no, char *str, unsigned int size);
+#endif /* doxygen skip internal*/
+
#ifdef __cplusplus
}
#endif
diff --git a/ndb/include/portlib/NdbTCP.h b/ndb/include/portlib/NdbTCP.h
index 308a3833ffd..9ed5b5e7f96 100644
--- a/ndb/include/portlib/NdbTCP.h
+++ b/ndb/include/portlib/NdbTCP.h
@@ -95,6 +95,8 @@ int Ndb_getInAddr(struct in_addr * dst, const char *address);
int NDB_CLOSE_SOCKET(int fd);
#endif
+int Ndb_check_socket_hup(NDB_SOCKET_TYPE sock);
+
#ifdef __cplusplus
}
#endif
diff --git a/ndb/include/transporter/TransporterCallback.hpp b/ndb/include/transporter/TransporterCallback.hpp
index 9f910f31728..f2432edd394 100644
--- a/ndb/include/transporter/TransporterCallback.hpp
+++ b/ndb/include/transporter/TransporterCallback.hpp
@@ -341,5 +341,8 @@ enum TransporterError {
*/
void
reportError(void * callbackObj, NodeId nodeId, TransporterError errorCode);
+
+void
+transporter_recv_from(void* callbackObj, NodeId node);
#endif
diff --git a/ndb/include/transporter/TransporterDefinitions.hpp b/ndb/include/transporter/TransporterDefinitions.hpp
index d4763ba4c37..e9c5ffa2c80 100644
--- a/ndb/include/transporter/TransporterDefinitions.hpp
+++ b/ndb/include/transporter/TransporterDefinitions.hpp
@@ -49,74 +49,50 @@ enum SendStatus {
const Uint32 MAX_MESSAGE_SIZE = (12+4+4+(4*25)+(3*4)+4*4096);
/**
- * TCP Transporter Configuration
+ * TransporterConfiguration
+ *
+ * used for setting up a transporter. the union member specific is for
+ * information specific to a transporter type.
*/
-struct TCP_TransporterConfiguration {
- Uint32 port;
+struct TransporterConfiguration {
+ Int32 s_port; // negative port number implies dynamic port
const char *remoteHostName;
const char *localHostName;
NodeId remoteNodeId;
NodeId localNodeId;
- Uint32 sendBufferSize; // Size of SendBuffer of priority B
- Uint32 maxReceiveSize; // Maximum no of bytes to receive
+ NodeId serverNodeId;
bool checksum;
bool signalId;
-};
-
-/**
- * SHM Transporter Configuration
- */
-struct SHM_TransporterConfiguration {
- Uint32 port;
- const char *remoteHostName;
- const char *localHostName;
- NodeId remoteNodeId;
- NodeId localNodeId;
- bool checksum;
- bool signalId;
-
- Uint32 shmKey;
- Uint32 shmSize;
- int signum;
-};
-
-/**
- * OSE Transporter Configuration
- */
-struct OSE_TransporterConfiguration {
- const char *remoteHostName;
- const char *localHostName;
- NodeId remoteNodeId;
- NodeId localNodeId;
- bool checksum;
- bool signalId;
-
- Uint32 prioASignalSize;
- Uint32 prioBSignalSize;
- Uint32 receiveBufferSize; // In number of signals
-};
-
-/**
- * SCI Transporter Configuration
- */
-struct SCI_TransporterConfiguration {
- const char *remoteHostName;
- const char *localHostName;
- Uint32 port;
- Uint32 sendLimit; // Packet size
- Uint32 bufferSize; // Buffer size
-
- Uint32 nLocalAdapters; // 1 or 2, the number of adapters on local host
-
- Uint32 remoteSciNodeId0; // SCInodeId for adapter 1
- Uint32 remoteSciNodeId1; // SCInodeId for adapter 2
-
- NodeId localNodeId; // Local node Id
- NodeId remoteNodeId; // Remote node Id
-
- bool checksum;
- bool signalId;
-
+ bool isMgmConnection; // is a mgm connection, requires transforming
+
+ union { // Transporter specific configuration information
+
+ struct {
+ Uint32 sendBufferSize; // Size of SendBuffer of priority B
+ Uint32 maxReceiveSize; // Maximum no of bytes to receive
+ } tcp;
+
+ struct {
+ Uint32 shmKey;
+ Uint32 shmSize;
+ int signum;
+ } shm;
+
+ struct {
+ Uint32 prioASignalSize;
+ Uint32 prioBSignalSize;
+ } ose;
+
+ struct {
+ Uint32 sendLimit; // Packet size
+ Uint32 bufferSize; // Buffer size
+
+ Uint32 nLocalAdapters; // 1 or 2, the number of adapters on local host
+
+ Uint32 remoteSciNodeId0; // SCInodeId for adapter 1
+ Uint32 remoteSciNodeId1; // SCInodeId for adapter 2
+ } sci;
+ };
};
struct SignalHeader {
diff --git a/ndb/include/transporter/TransporterRegistry.hpp b/ndb/include/transporter/TransporterRegistry.hpp
index 410f3e1dc12..0bb9733e8c4 100644
--- a/ndb/include/transporter/TransporterRegistry.hpp
+++ b/ndb/include/transporter/TransporterRegistry.hpp
@@ -30,9 +30,12 @@
#include "TransporterDefinitions.hpp"
#include <SocketServer.hpp>
+#include <SocketClient.hpp>
#include <NdbTCP.h>
+#include <mgmapi/mgmapi.h>
+
// A transporter is always in an IOState.
// NoHalt is used initially and as long as it is no restrictions on
// sending or receiving.
@@ -97,7 +100,15 @@ public:
TransporterRegistry(void * callback = 0 ,
unsigned maxTransporters = MAX_NTRANSPORTERS,
unsigned sizeOfLongSignalMemory = 100);
-
+
+ /**
+ * this handle will be used in the client connect thread
+ * to fetch information on dynamic ports. The old handle
+ * (if set) is destroyed, and this is destroyed by the destructor
+ */
+ void set_mgm_handle(NdbMgmHandle h);
+ NdbMgmHandle get_mgm_handle(void) { return m_mgm_handle; };
+
bool init(NodeId localNodeId);
/**
@@ -105,6 +116,20 @@ public:
*/
bool connect_server(NDB_SOCKET_TYPE sockfd);
+ bool connect_client(NdbMgmHandle *h);
+
+ /**
+ * Given a SocketClient, creates a NdbMgmHandle, turns it into a transporter
+ * and returns the socket.
+ */
+ NDB_SOCKET_TYPE connect_ndb_mgmd(SocketClient *sc);
+
+ /**
+ * Given a connected NdbMgmHandle, turns it into a transporter
+ * and returns the socket.
+ */
+ NDB_SOCKET_TYPE connect_ndb_mgmd(NdbMgmHandle *h);
+
/**
* Remove all transporters
*/
@@ -174,10 +199,10 @@ public:
* started, startServer is called. A transporter of the selected kind
* is created and it is put in the transporter arrays.
*/
- bool createTransporter(struct TCP_TransporterConfiguration * config);
- bool createTransporter(struct SCI_TransporterConfiguration * config);
- bool createTransporter(struct SHM_TransporterConfiguration * config);
- bool createTransporter(struct OSE_TransporterConfiguration * config);
+ bool createTCPTransporter(struct TransporterConfiguration * config);
+ bool createSCITransporter(struct TransporterConfiguration * config);
+ bool createSHMTransporter(struct TransporterConfiguration * config);
+ bool createOSETransporter(struct TransporterConfiguration * config);
/**
* Get free buffer space
@@ -233,11 +258,16 @@ public:
class Transporter_interface {
public:
- unsigned short m_service_port;
+ NodeId m_remote_nodeId;
+ int m_s_service_port; // signed port number
const char *m_interface;
};
Vector<Transporter_interface> m_transporter_interface;
- void add_transporter_interface(const char *interf, unsigned short port);
+ void add_transporter_interface(NodeId remoteNodeId, const char *interf,
+ int s_port); // signed port. <0 is dynamic
+ Transporter* get_transporter(NodeId nodeId);
+ NodeId get_localNodeId() { return localNodeId; };
+
struct in_addr get_connect_address(NodeId node_id) const;
protected:
@@ -245,6 +275,8 @@ protected:
private:
void * callbackObj;
+ NdbMgmHandle m_mgm_handle;
+
struct NdbThread *m_start_clients_thread;
bool m_run_start_clients_thread;
diff --git a/ndb/include/util/Base64.hpp b/ndb/include/util/Base64.hpp
index 1156636eec8..f4b11ad9214 100644
--- a/ndb/include/util/Base64.hpp
+++ b/ndb/include/util/Base64.hpp
@@ -21,6 +21,7 @@
#include <BaseString.hpp>
int base64_encode(const UtilBuffer &src, BaseString &dst);
+int base64_encode(const void * s, size_t src_len, BaseString &dst);
int base64_decode(const BaseString &src, UtilBuffer &dst);
int base64_decode(const char * s, size_t len, UtilBuffer &dst);
diff --git a/ndb/include/util/Bitmask.hpp b/ndb/include/util/Bitmask.hpp
index 19aa604e4a1..ade57a5ee57 100644
--- a/ndb/include/util/Bitmask.hpp
+++ b/ndb/include/util/Bitmask.hpp
@@ -131,10 +131,26 @@ public:
static void setField(unsigned size, Uint32 data[],
unsigned pos, unsigned len, Uint32 val);
+
+ /**
+ * getField - Get bitfield at given position and length
+ */
+ static void getField(unsigned size, const Uint32 data[],
+ unsigned pos, unsigned len, Uint32 dst[]);
+
+ /**
+ * setField - Set bitfield at given position and length
+ */
+ static void setField(unsigned size, Uint32 data[],
+ unsigned pos, unsigned len, const Uint32 src[]);
+
/**
* getText - Return as hex-digits (only for debug routines).
*/
static char* getText(unsigned size, const Uint32 data[], char* buf);
+private:
+ static void getFieldImpl(const Uint32 data[], unsigned, unsigned, Uint32 []);
+ static void setFieldImpl(Uint32 data[], unsigned, unsigned, const Uint32 []);
};
inline bool
@@ -794,4 +810,45 @@ public:
Bitmask() { this->clear();}
};
+inline void
+BitmaskImpl::getField(unsigned size, const Uint32 src[],
+ unsigned pos, unsigned len, Uint32 dst[])
+{
+ assert(pos + len < (size << 5));
+
+ src += (pos >> 5);
+ Uint32 offset = pos & 31;
+ * dst = (* src >> offset) & (len >= 32 ? ~0 : (1 << len) - 1);
+
+ if(offset + len <= 32)
+ {
+ return;
+ }
+ Uint32 used = (32 - offset);
+ assert(len > used);
+ getFieldImpl(src+1, used & 31, len-used, dst+(used >> 5));
+}
+
+inline void
+BitmaskImpl::setField(unsigned size, Uint32 dst[],
+ unsigned pos, unsigned len, const Uint32 src[])
+{
+ assert(pos + len < (size << 5));
+
+ dst += (pos >> 5);
+ Uint32 offset = pos & 31;
+ Uint32 mask = (len >= 32 ? ~0 : (1 << len) - 1) << offset;
+
+ * dst = (* dst & ~mask) | ((*src << offset) & mask);
+
+ if(offset + len <= 32)
+ {
+ return;
+ }
+ Uint32 used = (32 - offset);
+ assert(len > used);
+ setFieldImpl(dst+1, used & 31, len-used, src+(used >> 5));
+}
+
+
#endif
diff --git a/ndb/include/util/NdbSqlUtil.hpp b/ndb/include/util/NdbSqlUtil.hpp
index 5b27bd4e0c4..3e98dcd1805 100644
--- a/ndb/include/util/NdbSqlUtil.hpp
+++ b/ndb/include/util/NdbSqlUtil.hpp
@@ -20,34 +20,41 @@
#include <ndb_global.h>
#include <kernel/ndb_limits.h>
+struct charset_info_st;
+typedef struct charset_info_st CHARSET_INFO;
+
class NdbSqlUtil {
public:
/**
- * Compare strings, optionally with padded semantics. Returns
- * negative (less), zero (equal), or positive (greater).
- */
- static int char_compare(const char* s1, unsigned n1,
- const char* s2, unsigned n2, bool padded);
-
- /**
- * Like operator, optionally with padded semantics. Returns true or
- * false.
+ * Compare attribute values. Returns -1, 0, +1 for less, equal,
+ * greater, respectively. Parameters are pointers to values and their
+ * lengths in bytes. The lengths can differ.
+ *
+ * First value is a full value but second value can be partial. If
+ * the partial value is not enough to determine the result, CmpUnknown
+ * will be returned. A shorter second value is not necessarily
+ * partial. Partial values are allowed only for types where prefix
+ * comparison is possible (basically, binary strings).
+ *
+ * First parameter is a pointer to type specific extra info. Char
+ * types receive CHARSET_INFO in it.
+ *
+ * If a value cannot be parsed, it compares like NULL i.e. less than
+ * any valid value.
*/
- static bool char_like(const char* s1, unsigned n1,
- const char* s2, unsigned n2, bool padded);
+ typedef int Cmp(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full);
/**
- * Compare kernel attribute values. Returns -1, 0, +1 for less,
- * equal, greater, respectively. Parameters are pointers to values,
- * full attribute size in words, and size of available data in words.
- * There is also pointer to type specific extra info. Char types
- * receive CHARSET_INFO in it.
+ * Prototype for "like" comparison. Defined for string types. Second
+ * argument must have same type-specific format. Returns 0 on match,
+ * +1 on no match, and -1 on bad data.
+ *
+ * Uses default special chars ( \ % _ ).
*
- * If available size is less than full size, CmpUnknown may be
- * returned. If a value cannot be parsed, it compares like NULL i.e.
- * less than any valid value.
+ * TODO convert special chars to the cs so that ucs2 etc works
+ * TODO allow user-defined escape ( \ )
*/
- typedef int Cmp(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size);
+ typedef int Like(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2);
enum CmpResult {
CmpLess = -1,
@@ -56,41 +63,43 @@ public:
CmpUnknown = 2 // insufficient partial data
};
- /**
- * Kernel data types. Must match m_typeList in NdbSqlUtil.cpp.
- * Now also must match types in NdbDictionary.
- */
struct Type {
enum Enum {
- Undefined = 0, // Undefined
- Tinyint, // 8 bit
- Tinyunsigned, // 8 bit
- Smallint, // 16 bit
- Smallunsigned, // 16 bit
- Mediumint, // 24 bit
- Mediumunsigned, // 24 bit
- Int, // 32 bit
- Unsigned, // 32 bit
- Bigint, // 64 bit
- Bigunsigned, // 64 Bit
- Float, // 32-bit float
- Double, // 64-bit float
- Olddecimal, // Precision, Scale
- Char, // Len
- Varchar, // Max len
- Binary, // Len
- Varbinary, // Max len
- Datetime, // Precision down to 1 sec (size 8 bytes)
- Date, // Precision down to 1 day (size 4 bytes)
- Blob, // Blob
- Text, // Text blob
- Time = 25, // Time without date
- Year = 26, // Year (size 1 byte)
- Timestamp = 27, // Unix seconds (uint32)
- Olddecimalunsigned = 28
+ Undefined = NDB_TYPE_UNDEFINED,
+ Tinyint = NDB_TYPE_TINYINT,
+ Tinyunsigned = NDB_TYPE_TINYUNSIGNED,
+ Smallint = NDB_TYPE_SMALLINT,
+ Smallunsigned = NDB_TYPE_SMALLUNSIGNED,
+ Mediumint = NDB_TYPE_MEDIUMINT,
+ Mediumunsigned = NDB_TYPE_MEDIUMUNSIGNED,
+ Int = NDB_TYPE_INT,
+ Unsigned = NDB_TYPE_UNSIGNED,
+ Bigint = NDB_TYPE_BIGINT,
+ Bigunsigned = NDB_TYPE_BIGUNSIGNED,
+ Float = NDB_TYPE_FLOAT,
+ Double = NDB_TYPE_DOUBLE,
+ Olddecimal = NDB_TYPE_OLDDECIMAL,
+ Char = NDB_TYPE_CHAR,
+ Varchar = NDB_TYPE_VARCHAR,
+ Binary = NDB_TYPE_BINARY,
+ Varbinary = NDB_TYPE_VARBINARY,
+ Datetime = NDB_TYPE_DATETIME,
+ Date = NDB_TYPE_DATE,
+ Blob = NDB_TYPE_BLOB,
+ Text = NDB_TYPE_TEXT,
+ Bit = NDB_TYPE_BIT,
+ Longvarchar = NDB_TYPE_LONGVARCHAR,
+ Longvarbinary = NDB_TYPE_LONGVARBINARY,
+ Time = NDB_TYPE_TIME,
+ Year = NDB_TYPE_YEAR,
+ Timestamp = NDB_TYPE_TIMESTAMP,
+ Olddecimalunsigned = NDB_TYPE_OLDDECIMALUNSIGNED,
+ Decimal = NDB_TYPE_DECIMAL,
+ Decimalunsigned = NDB_TYPE_DECIMALUNSIGNED
};
- Enum m_typeId;
+ Enum m_typeId; // redundant
Cmp* m_cmp; // comparison method
+ Like* m_like; // "like" comparison method
};
/**
@@ -99,16 +108,30 @@ public:
static const Type& getType(Uint32 typeId);
/**
- * Get type by id but replace char type by corresponding binary type.
+ * Get the normalized type used in hashing and key comparisons.
+ * Maps all string types to Binary. This includes Var* strings
+ * because strxfrm result is padded to fixed (maximum) length.
*/
static const Type& getTypeBinary(Uint32 typeId);
/**
* Check character set.
*/
- static bool usable_in_pk(Uint32 typeId, const void* cs);
- static bool usable_in_hash_index(Uint32 typeId, const void* cs);
- static bool usable_in_ordered_index(Uint32 typeId, const void* cs);
+ static bool usable_in_pk(Uint32 typeId, const void* info);
+ static bool usable_in_hash_index(Uint32 typeId, const void* info);
+ static bool usable_in_ordered_index(Uint32 typeId, const void* info);
+
+ /**
+ * Get number of length bytes and length from variable length string.
+ * Returns false on error (invalid data). For other types returns
+ * zero length bytes and the fixed attribute length.
+ */
+ static bool get_var_length(Uint32 typeId, const void* p, unsigned attrlen, Uint32& lb, Uint32& len);
+
+ /**
+ * Temporary workaround for bug#7284.
+ */
+ static int strnxfrm_bug7284(CHARSET_INFO* cs, unsigned char* dst, unsigned dstLen, const unsigned char*src, unsigned srcLen);
/**
* Compare decimal numbers.
@@ -144,10 +167,22 @@ private:
static Cmp cmpDate;
static Cmp cmpBlob;
static Cmp cmpText;
+ static Cmp cmpBit;
+ static Cmp cmpLongvarchar;
+ static Cmp cmpLongvarbinary;
static Cmp cmpTime;
static Cmp cmpYear;
static Cmp cmpTimestamp;
static Cmp cmpOlddecimalunsigned;
+ static Cmp cmpDecimal;
+ static Cmp cmpDecimalunsigned;
+ //
+ static Like likeChar;
+ static Like likeBinary;
+ static Like likeVarchar;
+ static Like likeVarbinary;
+ static Like likeLongvarchar;
+ static Like likeLongvarbinary;
};
#endif
diff --git a/ndb/include/util/SimpleProperties.hpp b/ndb/include/util/SimpleProperties.hpp
index 356f3406f38..438426fb62b 100644
--- a/ndb/include/util/SimpleProperties.hpp
+++ b/ndb/include/util/SimpleProperties.hpp
@@ -172,6 +172,8 @@ public:
virtual bool reset() = 0;
virtual bool putWord(Uint32 val) = 0;
virtual bool putWords(const Uint32 * src, Uint32 len) = 0;
+ private:
+ bool add(const char* value, int len);
};
};
@@ -211,7 +213,7 @@ private:
};
/**
- * Writer for linear memory
+ * Writer for UtilBuffer
*/
class UtilBufferWriter : public SimpleProperties::Writer {
public:
diff --git a/ndb/include/util/SocketClient.hpp b/ndb/include/util/SocketClient.hpp
index de9a081464a..bf1ad7d45d6 100644
--- a/ndb/include/util/SocketClient.hpp
+++ b/ndb/include/util/SocketClient.hpp
@@ -31,6 +31,12 @@ public:
SocketClient(const char *server_name, unsigned short port, SocketAuthenticator *sa = 0);
~SocketClient();
bool init();
+ void set_port(unsigned short port) {
+ m_port = port;
+ m_servaddr.sin_port = htons(m_port);
+ };
+ unsigned short get_port() { return m_port; };
+ char *get_server_name() { return m_server_name; };
NDB_SOCKET_TYPE connect();
bool close();
};
diff --git a/ndb/include/util/SocketServer.hpp b/ndb/include/util/SocketServer.hpp
index ee2dd31c41f..4c37e63adf0 100644
--- a/ndb/include/util/SocketServer.hpp
+++ b/ndb/include/util/SocketServer.hpp
@@ -89,7 +89,7 @@ public:
* bind & listen
* Returns false if no success
*/
- bool setup(Service *, unsigned short port, const char * pinterface = 0);
+ bool setup(Service *, unsigned short *port, const char * pinterface = 0);
/**
* start/stop the server
diff --git a/ndb/include/util/Vector.hpp b/ndb/include/util/Vector.hpp
index f60817dab67..480dddf8243 100644
--- a/ndb/include/util/Vector.hpp
+++ b/ndb/include/util/Vector.hpp
@@ -61,6 +61,10 @@ Vector<T>::Vector(int i){
template<class T>
Vector<T>::~Vector(){
delete[] m_items;
+ // safety for placement new usage
+ m_items = 0;
+ m_size = 0;
+ m_arraySize = 0;
}
template<class T>
@@ -174,6 +178,10 @@ MutexVector<T>::MutexVector(int i){
template<class T>
MutexVector<T>::~MutexVector(){
delete[] m_items;
+ // safety for placement new usage
+ m_items = 0;
+ m_size = 0;
+ m_arraySize = 0;
}
template<class T>
diff --git a/ndb/include/util/md5_hash.hpp b/ndb/include/util/md5_hash.hpp
index 4c3cf239881..b79dce3b5a9 100644
--- a/ndb/include/util/md5_hash.hpp
+++ b/ndb/include/util/md5_hash.hpp
@@ -20,6 +20,15 @@
#include <ndb_types.h>
// External declaration of hash function
-Uint32 md5_hash(const Uint64* keybuf, Uint32 no_of_32_words);
+void md5_hash(Uint32 result[4], const Uint64* keybuf, Uint32 no_of_32_words);
+
+inline
+Uint32
+md5_hash(const Uint64* keybuf, Uint32 no_of_32_words)
+{
+ Uint32 result[4];
+ md5_hash(result, keybuf, no_of_32_words);
+ return result[0];
+}
#endif
diff --git a/ndb/include/util/ndb_opts.h b/ndb/include/util/ndb_opts.h
index 462d9996582..787c32f06fd 100644
--- a/ndb/include/util/ndb_opts.h
+++ b/ndb/include/util/ndb_opts.h
@@ -25,12 +25,21 @@
#include <ndb_opt_defaults.h>
#define NDB_STD_OPTS_VARS \
-const char *opt_connect_str= 0;\
my_bool opt_ndb_optimized_node_selection
+int opt_ndb_nodeid;
bool opt_endinfo= 0;
my_bool opt_ndb_shm;
my_bool opt_core;
+const char *opt_ndb_connectstring= 0;
+const char *opt_connect_str= 0;
+const char *opt_ndb_mgmd= 0;
+char opt_ndb_constrbuf[1024];
+unsigned opt_ndb_constrbuf_len= 0;
+
+#ifndef DBUG_OFF
+const char *opt_debug= 0;
+#endif
#define OPT_NDB_CONNECTSTRING 'c'
#if defined VM_TRACE && ( ! ( defined NDB_OSE || defined NDB_SOFTOSE) )
@@ -50,8 +59,17 @@ my_bool opt_core;
"Set connect string for connecting to ndb_mgmd. " \
"Syntax: \"[nodeid=<id>;][host=]<hostname>[:<port>]\". " \
"Overides specifying entries in NDB_CONNECTSTRING and Ndb.cfg", \
- (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0, \
+ (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
+ { "ndb-mgmd-host", OPT_NDB_MGMD, \
+ "Set host and port for connecting to ndb_mgmd. " \
+ "Syntax: <hostname>[:<port>].", \
+ (gptr*) &opt_ndb_mgmd, (gptr*) &opt_ndb_mgmd, 0, \
GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
+ { "ndb-nodeid", OPT_NDB_NODEID, \
+ "Set node id for this node.", \
+ (gptr*) &opt_ndb_nodeid, (gptr*) &opt_ndb_nodeid, 0, \
+ GET_INT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "ndb-shm", OPT_NDB_SHM,\
"Allow optimizing using shared memory connections when available",\
(gptr*) &opt_ndb_shm, (gptr*) &opt_ndb_shm, 0,\
@@ -62,8 +80,8 @@ my_bool opt_core;
(gptr*) &opt_ndb_optimized_node_selection, 0,\
GET_BOOL, OPT_ARG, 1, 0, 0, 0, 0, 0},\
{ "connect-string", OPT_NDB_CONNECTSTRING, "same as --ndb-connectstring",\
- (gptr*) &opt_connect_str, (gptr*) &opt_connect_str, 0,\
- GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
+ (gptr*) &opt_ndb_connectstring, (gptr*) &opt_ndb_connectstring, \
+ 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },\
{ "core-file", OPT_WANT_CORE, "Write core on errors.",\
(gptr*) &opt_core, (gptr*) &opt_core, 0,\
GET_BOOL, NO_ARG, OPT_WANT_CORE_DEFAULT, 0, 0, 0, 0, 0}
@@ -71,7 +89,8 @@ my_bool opt_core;
#ifndef DBUG_OFF
#define NDB_STD_OPTS(prog_name) \
{ "debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", \
- 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \
+ (gptr*) &opt_debug, (gptr*) &opt_debug, \
+ 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0 }, \
NDB_STD_OPTS_COMMON
#else
#define NDB_STD_OPTS(prog_name) NDB_STD_OPTS_COMMON
@@ -90,22 +109,30 @@ enum ndb_std_options {
OPT_NDB_SHM_SIGNUM,
OPT_NDB_OPTIMIZED_NODE_SELECTION,
OPT_WANT_CORE,
+ OPT_NDB_MGMD,
+ OPT_NDB_NODEID,
NDB_STD_OPTIONS_LAST /* should always be last in this enum */
};
static my_bool
ndb_std_get_one_option(int optid,
const struct my_option *opt __attribute__((unused)),
- const char *argument)
+ char *argument)
{
switch (optid) {
+#ifndef DBUG_OFF
case '#':
- if (argument)
+ if (opt_debug)
{
- DBUG_PUSH(argument);
+ DBUG_PUSH(opt_debug);
+ }
+ else
+ {
+ DBUG_PUSH("d:t");
}
opt_endinfo= 1;
break;
+#endif
case 'V':
ndb_std_print_version();
exit(0);
@@ -122,6 +149,29 @@ ndb_std_get_one_option(int optid,
#endif
}
break;
+ case OPT_NDB_MGMD:
+ case OPT_NDB_NODEID:
+ {
+ int len= my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len,
+ sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len,
+ "%s%s%s",opt_ndb_constrbuf_len > 0 ? ",":"",
+ optid == OPT_NDB_NODEID ? "nodeid=" : "",
+ argument);
+ opt_ndb_constrbuf_len+= len;
+ }
+ /* fall through to add the connectstring to the end
+ * and set opt_ndbcluster_connectstring
+ */
+ case OPT_NDB_CONNECTSTRING:
+ if (opt_ndb_connectstring && opt_ndb_connectstring[0])
+ my_snprintf(opt_ndb_constrbuf+opt_ndb_constrbuf_len,
+ sizeof(opt_ndb_constrbuf)-opt_ndb_constrbuf_len,
+ "%s%s", opt_ndb_constrbuf_len > 0 ? ",":"",
+ opt_ndb_connectstring);
+ else
+ opt_ndb_constrbuf[opt_ndb_constrbuf_len]= 0;
+ opt_connect_str= opt_ndb_constrbuf;
+ break;
}
return 0;
}
diff --git a/ndb/examples/Makefile b/ndb/ndbapi-examples/Makefile
index 0294632e23d..965dc3ec29f 100644
--- a/ndb/examples/Makefile
+++ b/ndb/ndbapi-examples/Makefile
@@ -1,12 +1,11 @@
--include .defs.mk
-
-#ifneq ($(C++),)
-#OPTS = CC=$(CC) CXX=$(C++)
-#endif
-
-# XXX ndbapi_example4 commented out until fixed
-BIN_DIRS := ndbapi_example1 ndbapi_example2 ndbapi_example3 $(ndbapi_example4) \
- ndbapi_example5 select_all
+BIN_DIRS := ndbapi_simple_example \
+ ndbapi_async_example \
+ ndbapi_async_example1 \
+ ndbapi_retries_example \
+ ndbapi_simple_index_example \
+ ndbapi_event_example \
+ ndbapi_scan_example \
+ mgmapi_logevent_example
bins: $(patsubst %, _bins_%, $(BIN_DIRS))
@@ -24,4 +23,3 @@ clean_dep: clean
cleanall: clean
tidy: clean
distclean: clean
-
diff --git a/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile b/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile
new file mode 100644
index 00000000000..c1ca32dfe17
--- /dev/null
+++ b/ndb/ndbapi-examples/mgmapi_logevent_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = mgmapi_logevent
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/mgmapi -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp b/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp
new file mode 100644
index 00000000000..5ec1fba6314
--- /dev/null
+++ b/ndb/ndbapi-examples/mgmapi_logevent_example/mgmapi_logevent.cpp
@@ -0,0 +1,140 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <mysql.h>
+#include <ndbapi/NdbApi.hpp>
+#include <mgmapi.h>
+#include <stdio.h>
+
+/*
+ * export LD_LIBRARY_PATH=../../../libmysql_r/.libs:../../../ndb/src/.libs
+ */
+
+#define MGMERROR(h) \
+{ \
+ fprintf(stderr, "code: %d msg: %s\n", \
+ ndb_mgm_get_latest_error(h), \
+ ndb_mgm_get_latest_error_msg(h)); \
+ exit(-1); \
+}
+
+#define LOGEVENTERROR(h) \
+{ \
+ fprintf(stderr, "code: %d msg: %s\n", \
+ ndb_logevent_get_latest_error(h), \
+ ndb_logevent_get_latest_error_msg(h)); \
+ exit(-1); \
+}
+
+int main()
+{
+ NdbMgmHandle h;
+ NdbLogEventHandle le;
+ int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP,
+ 15, NDB_MGM_EVENT_CATEGORY_CONNECTION,
+ 15, NDB_MGM_EVENT_CATEGORY_NODE_RESTART,
+ 15, NDB_MGM_EVENT_CATEGORY_STARTUP,
+ 15, NDB_MGM_EVENT_CATEGORY_ERROR,
+ 0 };
+ struct ndb_logevent event;
+
+ ndb_init();
+
+ h= ndb_mgm_create_handle();
+ if ( h == 0)
+ {
+ printf("Unable to create handle\n");
+ exit(-1);
+ }
+ if (ndb_mgm_connect(h,0,0,0)) MGMERROR(h);
+
+ le= ndb_mgm_create_logevent_handle(h, filter);
+ if ( le == 0 ) MGMERROR(h);
+
+ while (1)
+ {
+ int timeout= 5000;
+ int r= ndb_logevent_get_next(le,&event,timeout);
+ if (r == 0)
+ printf("No event within %d milliseconds\n", timeout);
+ else if (r < 0)
+ LOGEVENTERROR(le)
+ else
+ {
+ switch (event.type) {
+ case NDB_LE_BackupStarted:
+ printf("Node %d: BackupStarted\n", event.source_nodeid);
+ printf(" Starting node ID: %d\n", event.BackupStarted.starting_node);
+ printf(" Backup ID: %d\n", event.BackupStarted.backup_id);
+ break;
+ case NDB_LE_BackupCompleted:
+ printf("Node %d: BackupCompleted\n", event.source_nodeid);
+ printf(" Backup ID: %d\n", event.BackupStarted.backup_id);
+ break;
+ case NDB_LE_BackupAborted:
+ printf("Node %d: BackupAborted\n", event.source_nodeid);
+ break;
+ case NDB_LE_BackupFailedToStart:
+ printf("Node %d: BackupFailedToStart\n", event.source_nodeid);
+ break;
+
+ case NDB_LE_NodeFailCompleted:
+ printf("Node %d: NodeFailCompleted\n", event.source_nodeid);
+ break;
+ case NDB_LE_ArbitResult:
+ printf("Node %d: ArbitResult\n", event.source_nodeid);
+ printf(" code %d, arbit_node %d\n",
+ event.ArbitResult.code & 0xffff,
+ event.ArbitResult.arbit_node);
+ break;
+ case NDB_LE_DeadDueToHeartbeat:
+ printf("Node %d: DeadDueToHeartbeat\n", event.source_nodeid);
+ printf(" node %d\n", event.DeadDueToHeartbeat.node);
+ break;
+
+ case NDB_LE_Connected:
+ printf("Node %d: Connected\n", event.source_nodeid);
+ printf(" node %d\n", event.Connected.node);
+ break;
+ case NDB_LE_Disconnected:
+ printf("Node %d: Disconnected\n", event.source_nodeid);
+ printf(" node %d\n", event.Disconnected.node);
+ break;
+ case NDB_LE_NDBStartCompleted:
+ printf("Node %d: StartCompleted\n", event.source_nodeid);
+ printf(" version %d.%d.%d\n",
+ event.NDBStartCompleted.version >> 16 & 0xff,
+ event.NDBStartCompleted.version >> 8 & 0xff,
+ event.NDBStartCompleted.version >> 0 & 0xff);
+ break;
+ case NDB_LE_ArbitState:
+ printf("Node %d: ArbitState\n", event.source_nodeid);
+ printf(" code %d, arbit_node %d\n",
+ event.ArbitState.code & 0xffff,
+ event.ArbitResult.arbit_node);
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ ndb_mgm_destroy_logevent_handle(&le);
+ ndb_mgm_destroy_handle(&h);
+ ndb_end(0);
+ return 0;
+}
diff --git a/ndb/ndbapi-examples/ndbapi_async_example/Makefile b/ndb/ndbapi-examples/ndbapi_async_example/Makefile
new file mode 100644
index 00000000000..55e4a13343f
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_async_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_async
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS = -g
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp b/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp
index 76ce1a8efe3..aa745f4d28d 100644
--- a/ndb/examples/ndbapi_async_example/ndbapi_async.cpp
+++ b/ndb/ndbapi-examples/ndbapi_async_example/ndbapi_async.cpp
@@ -24,10 +24,12 @@
*
* Classes and methods in NDBAPI used in this example:
*
+ * Ndb_cluster_connection
+ * connect()
+ * wait_until_ready()
+ *
* Ndb
* init()
- * waitUntilRead()
- * getDictionary()
* startTransaction()
* closeTransaction()
* sendPollNdb()
@@ -38,23 +40,6 @@
* executeAsynchPrepare()
* getNdbError()
*
- * NdbDictionary::Dictionary
- * getTable()
- * dropTable()
- * createTable()
- * getNdbError()
- *
- * NdbDictionary::Column
- * setName()
- * setType()
- * setLength()
- * setPrimaryKey()
- * setNullable()
- *
- * NdbDictionary::Table
- * setName()
- * addColumn()
- *
* NdbOperation
* insertTuple()
* equal()
@@ -63,33 +48,37 @@
*/
-#include <ndb_global.h>
-
+#include <mysql.h>
+#include <mysqld_error.h>
#include <NdbApi.hpp>
-#include <NdbScanFilter.hpp>
+
#include <iostream> // Used for cout
/**
* Helper sleep function
*/
-int
+static void
milliSleep(int milliseconds){
- int result = 0;
- struct timespec sleeptime;
+ struct timeval sleeptime;
sleeptime.tv_sec = milliseconds / 1000;
- sleeptime.tv_nsec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000;
- result = nanosleep(&sleeptime, NULL);
- return result;
+ sleeptime.tv_usec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000;
+ select(0, 0, 0, 0, &sleeptime);
}
+
/**
* error printout macro
*/
-#define APIERROR(error) \
- { std::cout << "Error in " << __FILE__ << ", line:" << __LINE__ << ", code:" \
- << error.code << ", msg: " << error.message << "." << std::endl; \
- exit(-1); }
-
+#define PRINT_ERROR(code,msg) \
+ std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \
+ << ", code: " << code \
+ << ", msg: " << msg << "." << std::endl
+#define MYSQLERROR(mysql) { \
+ PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \
+ exit(-1); }
+#define APIERROR(error) { \
+ PRINT_ERROR(error.code,error.message); \
+ exit(-1); }
#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL
/**
@@ -106,10 +95,10 @@ typedef struct {
} async_callback_t;
/**
- * Structure used in "free list" to a NdbConnection
+ * Structure used in "free list" to a NdbTransaction
*/
typedef struct {
- NdbConnection* conn;
+ NdbTransaction* conn;
int used;
} transaction_t;
@@ -132,7 +121,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData);
/**
* Error handler.
*/
-bool asynchErrorHandler(NdbConnection * trans, Ndb* ndb);
+bool asynchErrorHandler(NdbTransaction * trans, Ndb* ndb);
/**
* Exit function
@@ -155,9 +144,6 @@ int create_table(Ndb * myNdb);
int tempErrors = 0;
int permErrors = 0;
-/**
- * Helper function for callback(...)
- */
void
closeTransaction(Ndb * ndb , async_callback_t * cb)
{
@@ -171,7 +157,7 @@ closeTransaction(Ndb * ndb , async_callback_t * cb)
* Callback executed when transaction has return from NDB
*/
static void
-callback(int result, NdbConnection* trans, void* aObject)
+callback(int result, NdbTransaction* trans, void* aObject)
{
async_callback_t * cbData = (async_callback_t *)aObject;
if (result<0)
@@ -207,61 +193,26 @@ callback(int result, NdbConnection* trans, void* aObject)
/**
* Create table "GARAGE"
*/
-int create_table(Ndb * myNdb)
+int create_table(MYSQL &mysql)
{
- NdbDictionary::Table myTable;
- NdbDictionary::Column myColumn;
-
- NdbDictionary::Dictionary* myDict = myNdb->getDictionary();
-
- /*********************************************************
- * Create a table named GARAGE if it does not exist *
- *********************************************************/
- if (myDict->getTable("GARAGE") != NULL)
+ while (mysql_query(&mysql,
+ "CREATE TABLE"
+ " GARAGE"
+ " (REG_NO INT UNSIGNED NOT NULL,"
+ " BRAND CHAR(20) NOT NULL,"
+ " COLOR CHAR(20) NOT NULL,"
+ " PRIMARY KEY USING HASH (REG_NO))"
+ " ENGINE=NDB"))
{
- std::cout << "NDB already has example table: GARAGE. "
+ if (mysql_errno(&mysql) != ER_TABLE_EXISTS_ERROR)
+ MYSQLERROR(mysql);
+ std::cout << "MySQL Cluster already has example table: GARAGE. "
<< "Dropping it..." << std::endl;
- if(myDict->dropTable("GARAGE") == -1)
- {
- std::cout << "Failed to drop: GARAGE." << std::endl;
- exit(1);
- }
- }
-
- myTable.setName("GARAGE");
-
-/**
- * Column REG_NO
- */
- myColumn.setName("REG_NO");
- myColumn.setType(NdbDictionary::Column::Unsigned);
- myColumn.setLength(1);
- myColumn.setPrimaryKey(true);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
-/**
- * Column BRAND
- */
- myColumn.setName("BRAND");
- myColumn.setType(NdbDictionary::Column::Char);
- myColumn.setLength(20);
- myColumn.setPrimaryKey(false);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
-/**
- * Column COLOR
- */
- myColumn.setName("COLOR");
- myColumn.setType(NdbDictionary::Column::Char);
- myColumn.setLength(20);
- myColumn.setPrimaryKey(false);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
- if (myDict->createTable(myTable) == -1) {
- APIERROR(myDict->getNdbError());
+ /**************
+ * Drop table *
+ **************/
+ if (mysql_query(&mysql, "DROP TABLE GARAGE"))
+ MYSQLERROR(mysql);
}
return 1;
}
@@ -276,7 +227,7 @@ void asynchExitHandler(Ndb * m_ndb)
/* returns true if is recoverable (temporary),
* false if it is an error that is permanent.
*/
-bool asynchErrorHandler(NdbConnection * trans, Ndb* ndb)
+bool asynchErrorHandler(NdbTransaction * trans, Ndb* ndb)
{
NdbError error = trans->getNdbError();
switch(error.status)
@@ -339,6 +290,10 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData)
{
NdbOperation* myNdbOperation; // For operations
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("GARAGE");
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
async_callback_t * cb;
int retries = 0;
@@ -395,8 +350,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData)
}
asynchExitHandler(myNdb);
}
- // Error check. If error, then maybe table GARAGE is not in database
- myNdbOperation = transaction[current].conn->getNdbOperation("GARAGE");
+ myNdbOperation = transaction[current].conn->getNdbOperation(myTable);
if (myNdbOperation == NULL)
{
if (asynchErrorHandler(transaction[current].conn, myNdb))
@@ -426,7 +380,7 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData)
}
/*Prepare transaction (the transaction is NOT yet sent to NDB)*/
- transaction[current].conn->executeAsynchPrepare(Commit,
+ transaction[current].conn->executeAsynchPrepare(NdbTransaction::Commit,
&callback,
cb);
/**
@@ -455,22 +409,48 @@ int populate(Ndb * myNdb, int data, async_callback_t * cbData)
int main()
{
ndb_init();
- Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database
-
- /*******************************************
- * Initialize NDB and wait until its ready *
- *******************************************/
- if (myNdb->init(1024) == -1) { // Set max 1024 parallel transactions
- APIERROR(myNdb->getNdbError());
+ MYSQL mysql;
+
+ /**************************************************************
+ * Connect to mysql server and create table *
+ **************************************************************/
+ {
+ if ( !mysql_init(&mysql) ) {
+ std::cout << "mysql_init failed\n";
+ exit(-1);
+ }
+ if ( !mysql_real_connect(&mysql, "localhost", "root", "", "",
+ 3306, "/tmp/mysql.sock", 0) )
+ MYSQLERROR(mysql);
+
+ mysql_query(&mysql, "CREATE DATABASE TEST_DB");
+ if (mysql_query(&mysql, "USE TEST_DB") != 0) MYSQLERROR(mysql);
+
+ create_table(mysql);
}
- if (myNdb->waitUntilReady(30) != 0) {
- std::cout << "NDB was not ready within 30 secs." << std::endl;
+ /**************************************************************
+ * Connect to ndb cluster *
+ **************************************************************/
+ Ndb_cluster_connection cluster_connection;
+ if (cluster_connection.connect(4, 5, 1))
+ {
+ std::cout << "Unable to connect to cluster within 30 secs." << std::endl;
exit(-1);
}
- create_table(myNdb);
+ // Optionally connect and wait for the storage nodes (ndbd's)
+ if (cluster_connection.wait_until_ready(30,0) < 0)
+ {
+ std::cout << "Cluster was not ready within 30 secs.\n";
+ exit(-1);
+ }
+
+ Ndb* myNdb = new Ndb( &cluster_connection,
+ "TEST_DB" ); // Object representing the database
+ if (myNdb->init(1024) == -1) { // Set max 1024 parallel transactions
+ APIERROR(myNdb->getNdbError());
+ }
-
/**
* Initialise transaction array
*/
diff --git a/ndb/examples/ndbapi_async_example/readme.txt b/ndb/ndbapi-examples/ndbapi_async_example/readme.txt
index 47cb4bf9ffa..47cb4bf9ffa 100644
--- a/ndb/examples/ndbapi_async_example/readme.txt
+++ b/ndb/ndbapi-examples/ndbapi_async_example/readme.txt
diff --git a/ndb/ndbapi-examples/ndbapi_async_example1/Makefile b/ndb/ndbapi-examples/ndbapi_async_example1/Makefile
new file mode 100644
index 00000000000..7f6ea0b4d25
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_async_example1/Makefile
@@ -0,0 +1,21 @@
+TARGET = ndbapi_async1
+SRCS = ndbapi_async1.cpp
+OBJS = ndbapi_async1.o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+DEBUG =
+LFLAGS = -Wall
+INCLUDE_DIR = ../../include
+LIB_DIR = -L../../src/.libs \
+ -L../../../libmysql_r/.libs \
+ -L../../../mysys -L../../../strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp b/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp
index 95a7bae66b8..e8bc19e267b 100644
--- a/ndb/examples/ndbapi_example2/ndbapi_example2.cpp
+++ b/ndb/ndbapi-examples/ndbapi_async_example1/ndbapi_async1.cpp
@@ -15,7 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
//
-// ndbapi_example2.cpp: Using asynchronous transactions in NDB API
+// ndbapi_async1.cpp: Using asynchronous transactions in NDB API
//
// Execute ndbapi_example1 to create the table "MYTABLENAME"
// before executing this program.
@@ -35,46 +35,75 @@
<< error.code << ", msg: " << error.message << "." << std::endl; \
exit(-1); }
-static void callback(int result, NdbConnection* NdbObject, void* aObject);
+static void callback(int result, NdbTransaction* NdbObject, void* aObject);
int main()
{
ndb_init();
- Ndb* myNdb = new Ndb( "TEST_DB_2" ); // Object representing the database
- NdbConnection* myNdbConnection[2]; // For transactions
+ Ndb_cluster_connection *cluster_connection=
+ new Ndb_cluster_connection(); // Object representing the cluster
+
+ if (cluster_connection->wait_until_ready(30,30))
+ {
+ std::cout << "Cluster was not ready within 30 secs." << std::endl;
+ exit(-1);
+ }
+
+ int r= cluster_connection->connect(5 /* retries */,
+ 3 /* delay between retries */,
+ 1 /* verbose */);
+ if (r > 0)
+ {
+ std::cout
+ << "Cluster connect failed, possibly resolved with more retries.\n";
+ exit(-1);
+ }
+ else if (r < 0)
+ {
+ std::cout
+ << "Cluster connect failed.\n";
+ exit(-1);
+ }
+
+ if (cluster_connection->wait_until_ready(30,30))
+ {
+ std::cout << "Cluster was not ready within 30 secs." << std::endl;
+ exit(-1);
+ }
+
+ Ndb* myNdb = new Ndb( cluster_connection,
+ "TEST_DB_2" ); // Object representing the database
+
+ NdbTransaction* myNdbTransaction[2]; // For transactions
NdbOperation* myNdbOperation; // For operations
- /*******************************************
- * Initialize NDB and wait until its ready *
- *******************************************/
if (myNdb->init(2) == -1) { // Want two parallel insert transactions
APIERROR(myNdb->getNdbError());
exit(-1);
}
- if (myNdb->waitUntilReady(30) != 0) {
- std::cout << "NDB was not ready within 30 secs." << std::endl;
- exit(-1);
- }
-
/******************************************************
* Insert (we do two insert transactions in parallel) *
******************************************************/
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME");
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
for (int i = 0; i < 2; i++) {
- myNdbConnection[i] = myNdb->startTransaction();
- if (myNdbConnection[i] == NULL) APIERROR(myNdb->getNdbError());
+ myNdbTransaction[i] = myNdb->startTransaction();
+ if (myNdbTransaction[i] == NULL) APIERROR(myNdb->getNdbError());
- myNdbOperation = myNdbConnection[i]->getNdbOperation("MYTABLENAME");
- // Error check. If error, then maybe table MYTABLENAME is not in database
- if (myNdbOperation == NULL) APIERROR(myNdbConnection[i]->getNdbError());
+ myNdbOperation = myNdbTransaction[i]->getNdbOperation(myTable);
+ if (myNdbOperation == NULL) APIERROR(myNdbTransaction[i]->getNdbError());
myNdbOperation->insertTuple();
myNdbOperation->equal("ATTR1", 20 + i);
myNdbOperation->setValue("ATTR2", 20 + i);
// Prepare transaction (the transaction is NOT yet sent to NDB)
- myNdbConnection[i]->executeAsynchPrepare(Commit, &callback, NULL);
+ myNdbTransaction[i]->executeAsynchPrepare(NdbTransaction::Commit,
+ &callback, NULL);
}
// Send all transactions to NDB
@@ -85,9 +114,13 @@ int main()
// Close all transactions
for (int i = 0; i < 2; i++)
- myNdb->closeTransaction(myNdbConnection[i]);
+ myNdb->closeTransaction(myNdbTransaction[i]);
delete myNdb;
+ delete cluster_connection;
+
+ ndb_end(0);
+ return 0;
}
/*
@@ -95,12 +128,12 @@ int main()
*
* (This function must have three arguments:
* - The result of the transaction,
- * - The NdbConnection object, and
+ * - The NdbTransaction object, and
* - A pointer to an arbitrary object.)
*/
static void
-callback(int result, NdbConnection* myTrans, void* aObject)
+callback(int result, NdbTransaction* myTrans, void* aObject)
{
if (result == -1) {
std::cout << "Poll error: " << std::endl;
diff --git a/ndb/ndbapi-examples/ndbapi_event_example/Makefile b/ndb/ndbapi-examples/ndbapi_event_example/Makefile
new file mode 100644
index 00000000000..12e109c654f
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_event_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_event
+SRCS = ndbapi_event.cpp
+OBJS = ndbapi_event.o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../..
+INCLUDE_DIR = $(TOP_SRCDIR)/ndb/include
+LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp b/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp
index 77f74e7bb63..286f6fafbab 100644
--- a/ndb/examples/ndbapi_example5/ndbapi_example5.cpp
+++ b/ndb/ndbapi-examples/ndbapi_event_example/ndbapi_event.cpp
@@ -15,11 +15,41 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
/**
- * ndbapi_example5.cpp: Using API level events in NDB API
+ * ndbapi_event.cpp: Using API level events in NDB API
+ *
+ * Classes and methods used in this example:
+ *
+ * Ndb_cluster_connection
+ * connect()
+ * wait_until_ready()
+ *
+ * Ndb
+ * init()
+ * getDictionary()
+ * createEventOperation()
+ * dropEventOperation()
+ * pollEvents()
+ *
+ * NdbDictionary
+ * createEvent()
+ * dropEvent()
+ *
+ * NdbDictionary::Event
+ * setTable()
+ * addTableEvent()
+ * addEventColumn()
+ *
+ * NdbEventOperation
+ * getValue()
+ * getPreValue()
+ * execute()
+ * next()
+ * isConsistent()
+ * getEventType()
+ *
*/
#include <NdbApi.hpp>
-#include <NdbEventOperation.hpp>
// Used for cout
#include <stdio.h>
@@ -33,19 +63,20 @@
* another process (e.g. flexBench -l 0 -stdtables).
* We want to monitor what happens with columns COL0, COL2, COL11
*
- * or together with the mysqlcluster client;
+ * or together with the mysql client;
*
- * shell> mysqlcluster -u root
+ * shell> mysql -u root
* mysql> create database TEST_DB;
* mysql> use TEST_DB;
- * mysql> create table TAB0 (COL0 int primary key, COL1 int, COL11 int);
+ * mysql> create table TAB0 (COL0 int primary key, COL1 int, COL11 int) engine=ndb;
*
- * In another window start ndbapi_example5, wait until properly started
+ * In another window start ndbapi_event, wait until properly started
*
- * mysql> insert into TAB0 values (1,2,3);
- * mysql> insert into TAB0 values (2,2,3);
- * mysql> insert into TAB0 values (3,2,9);
- * mysql>
+ insert into TAB0 values (1,2,3);
+ insert into TAB0 values (2,2,3);
+ insert into TAB0 values (3,2,9);
+ update TAB0 set COL1=10 where COL0=1;
+ delete from TAB0 where COL0=1;
*
* you should see the data popping up in the example window
*
@@ -56,45 +87,69 @@
<< error.code << ", msg: " << error.message << "." << std::endl; \
exit(-1); }
-Ndb* myCreateNdb();
int myCreateEvent(Ndb* myNdb,
const char *eventName,
const char *eventTableName,
- const char **eventComlumnName,
- const int noEventComlumnName);
+ const char **eventColumnName,
+ const int noEventColumnName);
int main()
{
ndb_init();
- Ndb* myNdb = myCreateNdb();
- NdbDictionary::Dictionary *myDict;
- const char *eventName = "CHNG_IN_TAB0";
- const char *eventTableName = "TAB0";
- const int noEventColumnName = 3;
- const char *eventColumnName[noEventColumnName] =
+ Ndb_cluster_connection *cluster_connection=
+ new Ndb_cluster_connection(); // Object representing the cluster
+
+ int r= cluster_connection->connect(5 /* retries */,
+ 3 /* delay between retries */,
+ 1 /* verbose */);
+ if (r > 0)
+ {
+ std::cout
+ << "Cluster connect failed, possibly resolved with more retries.\n";
+ exit(-1);
+ }
+ else if (r < 0)
+ {
+ std::cout
+ << "Cluster connect failed.\n";
+ exit(-1);
+ }
+
+ if (cluster_connection->wait_until_ready(30,30))
+ {
+ std::cout << "Cluster was not ready within 30 secs." << std::endl;
+ exit(-1);
+ }
+
+ Ndb* myNdb= new Ndb(cluster_connection,
+ "TEST_DB"); // Object representing the database
+
+ if (myNdb->init() == -1) APIERROR(myNdb->getNdbError());
+
+ const char *eventName= "CHNG_IN_TAB0";
+ const char *eventTableName= "TAB0";
+ const int noEventColumnName= 3;
+ const char *eventColumnName[noEventColumnName]=
{"COL0",
"COL1",
"COL11"};
- myDict = myNdb->getDictionary();
-
// Create events
myCreateEvent(myNdb,
eventName,
eventTableName,
eventColumnName,
noEventColumnName);
- int j = 0;
+
+ int j= 0;
while (j < 5) {
// Start "transaction" for handling events
NdbEventOperation* op;
printf("create EventOperation\n");
- if ((op = myNdb->createEventOperation(eventName,100)) == NULL) {
- printf("Event operation creation failed\n");
- exit(-1);
- }
+ if ((op = myNdb->createEventOperation(eventName,100)) == NULL)
+ APIERROR(myNdb->getNdbError());
printf("get values\n");
NdbRecAttr* recAttr[noEventColumnName];
@@ -107,23 +162,21 @@ int main()
// set up the callbacks
printf("execute\n");
- if (op->execute()) { // This starts changes to "start flowing"
- printf("operationd execution failed\n");
- exit(-1);
- }
-
- int i = 0;
+ // This starts changes to "start flowing"
+ if (op->execute())
+ APIERROR(op->getNdbError());
+ int i= 0;
while(i < 40) {
- //printf("now waiting for event...\n");
- int r = myNdb->pollEvents(1000); // wait for event or 1000 ms
- if (r>0) {
- //printf("got data! %d\n", r);
+ // printf("now waiting for event...\n");
+ int r= myNdb->pollEvents(1000); // wait for event or 1000 ms
+ if (r > 0) {
+ // printf("got data! %d\n", r);
int overrun;
while (op->next(&overrun) > 0) {
i++;
if (!op->isConsistent())
- printf("A node failiure has occured and events might be missing\n");
+ printf("A node failure has occured and events might be missing\n");
switch (op->getEventType()) {
case NdbDictionary::Event::TE_INSERT:
printf("%u INSERT: ", i);
@@ -134,6 +187,8 @@ int main()
case NdbDictionary::Event::TE_UPDATE:
printf("%u UPDATE: ", i);
break;
+ default:
+ abort(); // should not happen
}
printf("overrun %u pk %u: ", overrun, recAttr[0]->u_32_value());
for (int i = 1; i < noEventColumnName; i++) {
@@ -145,7 +200,7 @@ int main()
printf("NULL");
}
if (recAttrPre[i]->isNULL() >= 0) { // we have a value
- printf(" post[%u]=", i);
+ printf(" pre[%u]=", i);
if (recAttrPre[i]->isNULL() == 0) // we have a non-null value
printf("%u", recAttrPre[i]->u_32_value());
else // we have a null value
@@ -157,75 +212,58 @@ int main()
} else
;//printf("timed out\n");
}
- // don't want to listen to eventsanymore
- myNdb->dropEventOperation(op);
+ // don't want to listen to events anymore
+ if (myNdb->dropEventOperation(op)) APIERROR(myNdb->getNdbError());
j++;
}
- myDict->dropEvent(eventName); // remove event from database
-
- delete myNdb;
-}
-
-Ndb* myCreateNdb()
-{
- Ndb* myNdb = new Ndb("TEST_DB");
-
- /********************************************
- * Initialize NDB and wait until it's ready *
- ********************************************/
- if (myNdb->init() == -1) {
- APIERROR(myNdb->getNdbError());
- exit(-1);
- }
-
- if (myNdb->waitUntilReady(30) != 0) {
- std::cout << "NDB was not ready within 30 secs." << std::endl;
- exit(-1);
+ {
+ NdbDictionary::Dictionary *myDict = myNdb->getDictionary();
+ if (!myDict) APIERROR(myNdb->getNdbError());
+ // remove event from database
+ if (myDict->dropEvent(eventName)) APIERROR(myDict->getNdbError());
}
- return myNdb;
+ delete myNdb;
+ delete cluster_connection;
+ ndb_end(0);
+ return 0;
}
int myCreateEvent(Ndb* myNdb,
const char *eventName,
const char *eventTableName,
- const char **eventColumnName,
- const int noEventColumnName)
+ const char **eventColumnNames,
+ const int noEventColumnNames)
{
- NdbDictionary::Dictionary *myDict = myNdb->getDictionary();
+ NdbDictionary::Dictionary *myDict= myNdb->getDictionary();
+ if (!myDict) APIERROR(myNdb->getNdbError());
- if (!myDict) {
- printf("Event Creation failedDictionary not found");
- exit(-1);
- }
+ const NdbDictionary::Table *table= myDict->getTable(eventTableName);
+ if (!table) APIERROR(myDict->getNdbError());
- NdbDictionary::Event myEvent(eventName);
- myEvent.setTable(eventTableName);
+ NdbDictionary::Event myEvent(eventName, *table);
myEvent.addTableEvent(NdbDictionary::Event::TE_ALL);
// myEvent.addTableEvent(NdbDictionary::Event::TE_INSERT);
// myEvent.addTableEvent(NdbDictionary::Event::TE_UPDATE);
// myEvent.addTableEvent(NdbDictionary::Event::TE_DELETE);
- for (int i = 0; i < noEventColumnName; i++)
- myEvent.addEventColumn(eventColumnName[i]);
+ myEvent.addEventColumns(noEventColumnNames, eventColumnNames);
- int res = myDict->createEvent(myEvent); // Add event to database
-
- if (res == 0)
+ // Add event to database
+ if (myDict->createEvent(myEvent) == 0)
myEvent.print();
- else {
- printf("Event creation failed\n");
- printf("trying drop Event, maybe event exists\n");
- res = myDict->dropEvent(eventName);
- if (res)
- exit(-1);
+ else if (myDict->getNdbError().classification ==
+ NdbError::SchemaObjectExists) {
+ printf("Event creation failed, event exists\n");
+ printf("dropping Event...\n");
+ if (myDict->dropEvent(eventName)) APIERROR(myDict->getNdbError());
// try again
- res = myDict->createEvent(myEvent); // Add event to database
- if (res)
- exit(-1);
- }
+ // Add event to database
+ if ( myDict->createEvent(myEvent)) APIERROR(myDict->getNdbError());
+ } else
+ APIERROR(myDict->getNdbError());
- return res;
+ return 0;
}
diff --git a/ndb/ndbapi-examples/ndbapi_retries_example/Makefile b/ndb/ndbapi-examples/ndbapi_retries_example/Makefile
new file mode 100644
index 00000000000..829a7009031
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_retries_example/Makefile
@@ -0,0 +1,21 @@
+TARGET = ndbapi_retries
+SRCS = ndbapi_retries.cpp
+OBJS = ndbapi_retries.o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+DEBUG =
+LFLAGS = -Wall
+INCLUDE_DIR = ../../include
+LIB_DIR = -L../../src/.libs \
+ -L../../../libmysql_r/.libs \
+ -L../../../mysys -L../../../strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR) -I$(INCLUDE_DIR)/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp b/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp
index 91d9ff122ba..8c29fe31446 100644
--- a/ndb/examples/ndbapi_example3/ndbapi_example3.cpp
+++ b/ndb/ndbapi-examples/ndbapi_retries_example/ndbapi_retries.cpp
@@ -15,14 +15,14 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
//
-// ndbapi_example3.cpp: Error handling and transaction retries
+// ndbapi_retries.cpp: Error handling and transaction retries
//
-// Execute ndbapi_example1 to create the table "MYTABLENAME"
+// Execute ndbapi_simple to create the table "MYTABLENAME"
// before executing this program.
//
// There are many ways to program using the NDB API. In this example
// we execute two inserts in the same transaction using
-// NdbConnection::Ndbexecute(NoCommit).
+// NdbConnection::execute(NoCommit).
//
// Transaction failing is handled by re-executing the transaction
// in case of non-permanent transaction errors.
@@ -52,28 +52,28 @@
}
//
-// CONERROR prints all error info regarding an NdbConnection
+// TRANSERROR prints all error info regarding an NdbTransaction
//
-#define CONERROR(ndbConnection) \
- { NdbError error = ndbConnection->getNdbError(); \
- std::cout << "CON ERROR: " << error.code << " " << error.message \
+#define TRANSERROR(ndbTransaction) \
+ { NdbError error = ndbTransaction->getNdbError(); \
+ std::cout << "TRANS ERROR: " << error.code << " " << error.message \
<< std::endl \
<< " " << "Status: " << error.status \
<< ", Classification: " << error.classification << std::endl \
<< " " << "File: " << __FILE__ \
<< " (Line: " << __LINE__ << ")" << std::endl \
; \
- printTransactionError(ndbConnection); \
+ printTransactionError(ndbTransaction); \
}
-void printTransactionError(NdbConnection *ndbConnection) {
+void printTransactionError(NdbTransaction *ndbTransaction) {
const NdbOperation *ndbOp = NULL;
int i=0;
/****************************************************************
* Print NdbError object of every operations in the transaction *
****************************************************************/
- while ((ndbOp = ndbConnection->getNextCompletedOperation(ndbOp)) != NULL) {
+ while ((ndbOp = ndbTransaction->getNextCompletedOperation(ndbOp)) != NULL) {
NdbError error = ndbOp->getNdbError();
std::cout << " OPERATION " << i+1 << ": "
<< error.code << " " << error.message << std::endl
@@ -86,15 +86,17 @@ void printTransactionError(NdbConnection *ndbConnection) {
//
// Example insert
-// @param myNdb Ndb object representing NDB Cluster
-// @param myConnection NdbConnection used for transaction
-// @param error NdbError object returned in case of errors
+// @param myNdb Ndb object representing NDB Cluster
+// @param myTransaction NdbTransaction used for transaction
+// @param myTable Table to insert into
+// @param error NdbError object returned in case of errors
// @return -1 in case of failures, 0 otherwise
//
-int insert(int transactionId, NdbConnection* myConnection) {
+int insert(int transactionId, NdbTransaction* myTransaction,
+ const NdbDictionary::Table *myTable) {
NdbOperation *myOperation; // For other operations
- myOperation = myConnection->getNdbOperation("MYTABLENAME");
+ myOperation = myTransaction->getNdbOperation(myTable);
if (myOperation == NULL) return -1;
if (myOperation->insertTuple() ||
@@ -104,7 +106,7 @@ int insert(int transactionId, NdbConnection* myConnection) {
exit(-1);
}
- return myConnection->execute(NoCommit);
+ return myTransaction->execute(NdbTransaction::NoCommit);
}
@@ -113,10 +115,11 @@ int insert(int transactionId, NdbConnection* myConnection) {
// if there are temporary errors (e.g. the NDB Cluster is overloaded).
// @return -1 failure, 1 success
//
-int executeInsertTransaction(int transactionId, Ndb* myNdb) {
+int executeInsertTransaction(int transactionId, Ndb* myNdb,
+ const NdbDictionary::Table *myTable) {
int result = 0; // No result yet
int noOfRetriesLeft = 10;
- NdbConnection *myConnection; // For other transactions
+ NdbTransaction *myTransaction; // For other transactions
NdbError ndberror;
while (noOfRetriesLeft > 0 && !result) {
@@ -124,16 +127,16 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) {
/*********************************
* Start and execute transaction *
*********************************/
- myConnection = myNdb->startTransaction();
- if (myConnection == NULL) {
+ myTransaction = myNdb->startTransaction();
+ if (myTransaction == NULL) {
APIERROR(myNdb->getNdbError());
ndberror = myNdb->getNdbError();
result = -1; // Failure
- } else if (insert(transactionId, myConnection) ||
- insert(10000+transactionId, myConnection) ||
- myConnection->execute(Commit)) {
- CONERROR(myConnection);
- ndberror = myConnection->getNdbError();
+ } else if (insert(transactionId, myTransaction, myTable) ||
+ insert(10000+transactionId, myTransaction, myTable) ||
+ myTransaction->execute(NdbTransaction::Commit)) {
+ TRANSERROR(myTransaction);
+ ndberror = myTransaction->getNdbError();
result = -1; // Failure
} else {
result = 1; // Success
@@ -164,8 +167,8 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) {
/*********************
* Close transaction *
*********************/
- if (myConnection != NULL) {
- myNdb->closeTransaction(myConnection);
+ if (myTransaction != NULL) {
+ myNdb->closeTransaction(myTransaction);
}
}
@@ -177,27 +180,57 @@ int executeInsertTransaction(int transactionId, Ndb* myNdb) {
int main()
{
ndb_init();
- Ndb* myNdb = new Ndb( "TEST_DB_1" ); // Object representing the database
+
+ Ndb_cluster_connection *cluster_connection=
+ new Ndb_cluster_connection(); // Object representing the cluster
+
+ int r= cluster_connection->connect(5 /* retries */,
+ 3 /* delay between retries */,
+ 1 /* verbose */);
+ if (r > 0)
+ {
+ std::cout
+ << "Cluster connect failed, possibly resolved with more retries.\n";
+ exit(-1);
+ }
+ else if (r < 0)
+ {
+ std::cout
+ << "Cluster connect failed.\n";
+ exit(-1);
+ }
+
+ if (cluster_connection->wait_until_ready(30,30))
+ {
+ std::cout << "Cluster was not ready within 30 secs." << std::endl;
+ exit(-1);
+ }
+
+ Ndb* myNdb= new Ndb( cluster_connection,
+ "TEST_DB_1" ); // Object representing the database
- /*******************************************
- * Initialize NDB and wait until its ready *
- *******************************************/
- if (myNdb->init() == -1) {
+ if (myNdb->init() == -1) {
APIERROR(myNdb->getNdbError());
exit(-1);
}
- if (myNdb->waitUntilReady(30) != 0) {
- std::cout << "NDB was not ready within 30 secs." << std::endl;
- exit(-1);
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME");
+ if (myTable == NULL)
+ {
+ APIERROR(myDict->getNdbError());
+ return -1;
}
-
/************************************
* Execute some insert transactions *
************************************/
for (int i = 10000; i < 20000; i++) {
- executeInsertTransaction(i, myNdb);
+ executeInsertTransaction(i, myNdb, myTable);
}
delete myNdb;
+ delete cluster_connection;
+
+ ndb_end(0);
+ return 0;
}
diff --git a/ndb/ndbapi-examples/ndbapi_scan_example/Makefile b/ndb/ndbapi-examples/ndbapi_scan_example/Makefile
new file mode 100644
index 00000000000..31886b02bf1
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_scan_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_scan
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -g -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS = -g
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/extra -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp b/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp
index 22641bc5b57..69ffd99b8ca 100644
--- a/ndb/examples/ndbapi_scan_example/ndbapi_scan.cpp
+++ b/ndb/ndbapi-examples/ndbapi_scan_example/ndbapi_scan.cpp
@@ -24,171 +24,118 @@
*
* Classes and methods used in this example:
*
+ * Ndb_cluster_connection
+ * connect()
+ * wait_until_ready()
+ *
* Ndb
* init()
- * waitUntilRead()
* getDictionary()
* startTransaction()
* closeTransaction()
- * sendPreparedTransactions()
- * pollNdb()
*
- * NdbConnection
- * getNdbOperation()
- * executeAsynchPrepare()
- * getNdbError()
- * executeScan()
- * nextScanResult()
+ * NdbTransaction
+ * getNdbScanOperation()
+ * execute()
+ *
+ * NdbScanOperation
+ * getValue()
+ * readTuples()
+ * nextResult()
+ * deleteCurrentTuple()
+ * updateCurrentTuple()
*
- * NdbDictionary::Dictionary
+ * const NdbDictionary::Dictionary
* getTable()
- * dropTable()
- * createTable()
*
- * NdbDictionary::Column
- * setName()
- * setType()
- * setLength()
- * setPrimaryKey()
- * setNullable()
+ * const NdbDictionary::Table
+ * getColumn()
*
- * NdbDictionary::Table
- * setName()
- * addColumn()
+ * const NdbDictionary::Column
+ * getLength()
*
* NdbOperation
* insertTuple()
* equal()
* setValue()
- * openScanRead()
- * openScanExclusive()
- *
- * NdbRecAttr
- * aRef()
- * u_32_value()
- *
- * NdbResultSet
- * nextResult()
- * deleteTuple()
- * updateTuple()
- *
- * NdbScanOperation
- * getValue()
- * readTuplesExclusive()
*
* NdbScanFilter
* begin()
* eq()
* end()
*
- *
*/
-#include <ndb_global.h>
-
+#include <mysql.h>
+#include <mysqld_error.h>
#include <NdbApi.hpp>
-#include <NdbScanFilter.hpp>
// Used for cout
#include <iostream>
+#include <stdio.h>
/**
* Helper sleep function
*/
-int
+static void
milliSleep(int milliseconds){
- int result = 0;
- struct timespec sleeptime;
+ struct timeval sleeptime;
sleeptime.tv_sec = milliseconds / 1000;
- sleeptime.tv_nsec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000;
- result = nanosleep(&sleeptime, NULL);
- return result;
+ sleeptime.tv_usec = (milliseconds - (sleeptime.tv_sec * 1000)) * 1000000;
+ select(0, 0, 0, 0, &sleeptime);
}
/**
* Helper sleep function
*/
-#define APIERROR(error) \
- { std::cout << "Error in " << __FILE__ << ", line:" << __LINE__ << ", code:" \
- << error.code << ", msg: " << error.message << "." << std::endl; \
- exit(-1); }
-
-/*
- * callback : This is called when the transaction is polled
- *
- * (This function must have three arguments:
- * - The result of the transaction,
- * - The NdbConnection object, and
- * - A pointer to an arbitrary object.)
- */
-static void
-callback(int result, NdbConnection* myTrans, void* aObject)
+#define PRINT_ERROR(code,msg) \
+ std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \
+ << ", code: " << code \
+ << ", msg: " << msg << "." << std::endl
+#define MYSQLERROR(mysql) { \
+ PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \
+ exit(-1); }
+#define APIERROR(error) { \
+ PRINT_ERROR(error.code,error.message); \
+ exit(-1); }
+
+struct Car
{
- if (result == -1) {
- std::cout << "In callback: " << std::endl;
- /**
- * Put error checking code here (see ndb_async_example)
- */
- APIERROR(myTrans->getNdbError());
- } else {
- /**
- * Ok!
- */
- return;
- }
-}
+ /**
+ * Note memset, so that entire char-fields are cleared
+ * as all 20 bytes are significant (as type is char)
+ */
+ Car() { memset(this, 0, sizeof(* this)); }
+
+ unsigned int reg_no;
+ char brand[20];
+ char color[20];
+};
/**
* Function to create table
*/
-int create_table(Ndb * myNdb)
+int create_table(MYSQL &mysql)
{
- NdbDictionary::Table myTable;
- NdbDictionary::Column myColumn;
-
- NdbDictionary::Dictionary* myDict = myNdb->getDictionary();
-
- /*********************************************************
- * Create a table named GARAGE if it does not exist *
- *********************************************************/
- if (myDict->getTable("GARAGE") != NULL) {
- std::cout << "NDB already has example table: GARAGE. "
+ while (mysql_query(&mysql,
+ "CREATE TABLE"
+ " GARAGE"
+ " (REG_NO INT UNSIGNED NOT NULL,"
+ " BRAND CHAR(20) NOT NULL,"
+ " COLOR CHAR(20) NOT NULL,"
+ " PRIMARY KEY USING HASH (REG_NO))"
+ " ENGINE=NDB"))
+ {
+ if (mysql_errno(&mysql) != ER_TABLE_EXISTS_ERROR)
+ MYSQLERROR(mysql);
+ std::cout << "MySQL Cluster already has example table: GARAGE. "
<< "Dropping it..." << std::endl;
- if(myDict->dropTable("GARAGE") == -1)
- {
- std::cout << "Failed to drop: GARAGE." << std::endl;
- exit(1);
- }
- }
-
- myTable.setName("GARAGE");
-
- myColumn.setName("REG_NO");
- myColumn.setType(NdbDictionary::Column::Unsigned);
- myColumn.setLength(1);
- myColumn.setPrimaryKey(true);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
- myColumn.setName("BRAND");
- myColumn.setType(NdbDictionary::Column::Char);
- myColumn.setLength(20);
- myColumn.setPrimaryKey(false);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
-
- myColumn.setName("COLOR");
- myColumn.setType(NdbDictionary::Column::Char);
- myColumn.setLength(20);
- myColumn.setPrimaryKey(false);
- myColumn.setNullable(false);
- myTable.addColumn(myColumn);
-
- if (myDict->createTable(myTable) == -1) {
- APIERROR(myDict->getNdbError());
- return -1;
+ /**************
+ * Drop table *
+ **************/
+ if (mysql_query(&mysql, "DROP TABLE GARAGE"))
+ MYSQLERROR(mysql);
}
return 1;
}
@@ -196,93 +143,71 @@ int create_table(Ndb * myNdb)
int populate(Ndb * myNdb)
{
- NdbConnection* myNdbConnection[15]; // For transactions
- NdbOperation* myNdbOperation; // For operations
- /******************************************************
- * Insert (we do 15 insert transactions in parallel) *
- ******************************************************/
+ int i;
+ Car cars[15];
+
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("GARAGE");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
/**
* Five blue mercedes
*/
- for (int i = 0; i < 5; i++)
+ for (i = 0; i < 5; i++)
{
- myNdbConnection[i] = myNdb->startTransaction();
- if (myNdbConnection[i] == NULL)
- APIERROR(myNdb->getNdbError());
- myNdbOperation = myNdbConnection[i]->getNdbOperation("GARAGE");
- // Error check. If error, then maybe table GARAGE is not in database
- if (myNdbOperation == NULL)
- APIERROR(myNdbConnection[i]->getNdbError());
- myNdbOperation->insertTuple();
- myNdbOperation->equal("REG_NO", i);
- myNdbOperation->setValue("BRAND", "Mercedes");
- myNdbOperation->setValue("COLOR", "Blue");
- // Prepare transaction (the transaction is NOT yet sent to NDB)
- myNdbConnection[i]->executeAsynchPrepare(Commit, &callback, NULL);
+ cars[i].reg_no = i;
+ sprintf(cars[i].brand, "Mercedes");
+ sprintf(cars[i].color, "Blue");
}
-
/**
* Five black bmw
*/
- for (int i = 5; i < 10; i++)
+ for (i = 5; i < 10; i++)
{
- myNdbConnection[i] = myNdb->startTransaction();
- if (myNdbConnection[i] == NULL)
- APIERROR(myNdb->getNdbError());
- myNdbOperation = myNdbConnection[i]->getNdbOperation("GARAGE");
- // Error check. If error, then maybe table MYTABLENAME is not in database
- if (myNdbOperation == NULL)
- APIERROR(myNdbConnection[i]->getNdbError());
- myNdbOperation->insertTuple();
- myNdbOperation->equal("REG_NO", i);
- myNdbOperation->setValue("BRAND", "BMW");
- myNdbOperation->setValue("COLOR", "Black");
- // Prepare transaction (the transaction is NOT yet sent to NDB)
- myNdbConnection[i]->executeAsynchPrepare(Commit, &callback, NULL);
+ cars[i].reg_no = i;
+ sprintf(cars[i].brand, "BMW");
+ sprintf(cars[i].color, "Black");
}
/**
* Five pink toyotas
*/
- for (int i = 10; i < 15; i++) {
- myNdbConnection[i] = myNdb->startTransaction();
- if (myNdbConnection[i] == NULL) APIERROR(myNdb->getNdbError());
- myNdbOperation = myNdbConnection[i]->getNdbOperation("GARAGE");
- // Error check. If error, then maybe table MYTABLENAME is not in database
- if (myNdbOperation == NULL) APIERROR(myNdbConnection[i]->getNdbError());
+ for (i = 10; i < 15; i++)
+ {
+ cars[i].reg_no = i;
+ sprintf(cars[i].brand, "Toyota");
+ sprintf(cars[i].color, "Pink");
+ }
+
+ NdbTransaction* myTrans = myNdb->startTransaction();
+ if (myTrans == NULL)
+ APIERROR(myNdb->getNdbError());
+
+ for (i = 0; i < 15; i++)
+ {
+ NdbOperation* myNdbOperation = myTrans->getNdbOperation(myTable);
+ if (myNdbOperation == NULL)
+ APIERROR(myTrans->getNdbError());
myNdbOperation->insertTuple();
- myNdbOperation->equal("REG_NO", i);
- myNdbOperation->setValue("BRAND", "Toyota");
- myNdbOperation->setValue("COLOR", "Pink");
- // Prepare transaction (the transaction is NOT yet sent to NDB)
- myNdbConnection[i]->executeAsynchPrepare(Commit, &callback, NULL);
+ myNdbOperation->equal("REG_NO", cars[i].reg_no);
+ myNdbOperation->setValue("BRAND", cars[i].brand);
+ myNdbOperation->setValue("COLOR", cars[i].color);
}
- // Send all transactions to NDB
- myNdb->sendPreparedTransactions(0);
- // Poll all transactions
- myNdb->pollNdb(3000, 0);
-
- // it is also possible to use sendPollNdb instead of
- // myNdb->sendPreparedTransactions(0); and myNdb->pollNdb(3000, 15); above.
- // myNdb->sendPollNdb(3000,0);
- // Note! Neither sendPollNdb or pollNdb returs until all 15 callbacks have
- // executed.
-
- // Close all transactions. It is also possible to close transactions
- // in the callback.
- for (int i = 0; i < 15; i++)
- myNdb->closeTransaction(myNdbConnection[i]);
- return 1;
+ int check = myTrans->execute(NdbTransaction::Commit);
+
+ myTrans->close();
+
+ return check != -1;
}
int scan_delete(Ndb* myNdb,
- int parallelism,
int column,
- int column_len,
const char * color)
-
+
{
// Scan all records exclusive and delete
@@ -292,9 +217,15 @@ int scan_delete(Ndb* myNdb,
int deletedRows = 0;
int check;
NdbError err;
- NdbConnection *myTrans;
+ NdbTransaction *myTrans;
NdbScanOperation *myScanOp;
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("GARAGE");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
/**
* Loop as long as :
* retryMax not reached
@@ -331,7 +262,7 @@ int scan_delete(Ndb* myNdb,
/**
* Get a scan operation.
*/
- myScanOp = myTrans->getNdbScanOperation("GARAGE");
+ myScanOp = myTrans->getNdbScanOperation(myTable);
if (myScanOp == NULL)
{
std::cout << myTrans->getNdbError().message << std::endl;
@@ -342,20 +273,20 @@ int scan_delete(Ndb* myNdb,
/**
* Define a result set for the scan.
*/
- NdbResultSet * rs = myScanOp->readTuplesExclusive(parallelism);
- if( rs == 0 ) {
+ if(myScanOp->readTuples(NdbOperation::LM_Exclusive) != 0)
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
return -1;
}
-
+
/**
* Use NdbScanFilter to define a search critera
*/
NdbScanFilter filter(myScanOp) ;
if(filter.begin(NdbScanFilter::AND) < 0 ||
- filter.eq(column, color, column_len, false) <0||
- filter.end() <0)
+ filter.cmp(NdbScanFilter::COND_EQ, column, color) < 0 ||
+ filter.end() < 0)
{
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
@@ -365,7 +296,7 @@ int scan_delete(Ndb* myNdb,
/**
* Start scan (NoCommit since we are only reading at this stage);
*/
- if(myTrans->execute(NoCommit) != 0){
+ if(myTrans->execute(NdbTransaction::NoCommit) != 0){
err = myTrans->getNdbError();
if(err.status == NdbError::TemporaryError){
std::cout << myTrans->getNdbError().message << std::endl;
@@ -384,9 +315,11 @@ int scan_delete(Ndb* myNdb,
* start of loop: nextResult(true) means that "parallelism" number of
* rows are fetched from NDB and cached in NDBAPI
*/
- while((check = rs->nextResult(true)) == 0){
- do {
- if (rs->deleteTuple() != 0){
+ while((check = myScanOp->nextResult(true)) == 0){
+ do
+ {
+ if (myScanOp->deleteCurrentTuple() != 0)
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
return -1;
@@ -398,21 +331,32 @@ int scan_delete(Ndb* myNdb,
* cached in the NDBAPI are modified before
* fetching more rows from NDB.
*/
- } while((check = rs->nextResult(false)) == 0);
+ } while((check = myScanOp->nextResult(false)) == 0);
/**
* Commit when all cached tuple have been marked for deletion
*/
- if(check != -1){
- check = myTrans->execute(Commit);
- myTrans->releaseCompletedOperations();
+ if(check != -1)
+ {
+ check = myTrans->execute(NdbTransaction::Commit);
+ }
+
+ if(check == -1)
+ {
+ /**
+ * Create a new transaction, while keeping scan open
+ */
+ check = myTrans->restart();
}
+
/**
* Check for errors
*/
err = myTrans->getNdbError();
- if(check == -1){
- if(err.status == NdbError::TemporaryError){
+ if(check == -1)
+ {
+ if(err.status == NdbError::TemporaryError)
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
milliSleep(50);
@@ -426,10 +370,10 @@ int scan_delete(Ndb* myNdb,
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
return 0;
-
-
}
- if(myTrans!=0) {
+
+ if(myTrans!=0)
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
}
@@ -438,10 +382,7 @@ int scan_delete(Ndb* myNdb,
int scan_update(Ndb* myNdb,
- int parallelism,
- int column_len,
int update_column,
- const char * column_name,
const char * before_color,
const char * after_color)
@@ -454,9 +395,15 @@ int scan_update(Ndb* myNdb,
int updatedRows = 0;
int check;
NdbError err;
- NdbConnection *myTrans;
+ NdbTransaction *myTrans;
NdbScanOperation *myScanOp;
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("GARAGE");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
/**
* Loop as long as :
* retryMax not reached
@@ -494,7 +441,7 @@ int scan_update(Ndb* myNdb,
/**
* Get a scan operation.
*/
- myScanOp = myTrans->getNdbScanOperation("GARAGE");
+ myScanOp = myTrans->getNdbScanOperation(myTable);
if (myScanOp == NULL)
{
std::cout << myTrans->getNdbError().message << std::endl;
@@ -505,8 +452,8 @@ int scan_update(Ndb* myNdb,
/**
* Define a result set for the scan.
*/
- NdbResultSet * rs = myScanOp->readTuplesExclusive(parallelism);
- if( rs == 0 ) {
+ if( myScanOp->readTuples(NdbOperation::LM_Exclusive) )
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
return -1;
@@ -517,7 +464,7 @@ int scan_update(Ndb* myNdb,
*/
NdbScanFilter filter(myScanOp) ;
if(filter.begin(NdbScanFilter::AND) < 0 ||
- filter.eq(update_column, before_color, column_len, false) <0||
+ filter.cmp(NdbScanFilter::COND_EQ, update_column, before_color) <0||
filter.end() <0)
{
std::cout << myTrans->getNdbError().message << std::endl;
@@ -528,7 +475,8 @@ int scan_update(Ndb* myNdb,
/**
* Start scan (NoCommit since we are only reading at this stage);
*/
- if(myTrans->execute(NoCommit) != 0){
+ if(myTrans->execute(NdbTransaction::NoCommit) != 0)
+ {
err = myTrans->getNdbError();
if(err.status == NdbError::TemporaryError){
std::cout << myTrans->getNdbError().message << std::endl;
@@ -541,49 +489,49 @@ int scan_update(Ndb* myNdb,
return -1;
}
- /**
- * Define an update operation
- */
- NdbOperation * myUpdateOp;
- /**
- * start of loop: nextResult(true) means that "parallelism" number of
- * rows are fetched from NDB and cached in NDBAPI
- */
- while((check = rs->nextResult(true)) == 0){
+ /**
+ * start of loop: nextResult(true) means that "parallelism" number of
+ * rows are fetched from NDB and cached in NDBAPI
+ */
+ while((check = myScanOp->nextResult(true)) == 0){
do {
/**
* Get update operation
*/
- myUpdateOp = rs->updateTuple();
- if (myUpdateOp == 0){
+ NdbOperation * myUpdateOp = myScanOp->updateCurrentTuple();
+ if (myUpdateOp == 0)
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
return -1;
}
updatedRows++;
+
/**
* do the update
*/
- myUpdateOp->setValue(update_column,after_color);
+ myUpdateOp->setValue(update_column, after_color);
/**
* nextResult(false) means that the records
* cached in the NDBAPI are modified before
* fetching more rows from NDB.
*/
- } while((check = rs->nextResult(false)) == 0);
+ } while((check = myScanOp->nextResult(false)) == 0);
/**
- * Commit when all cached tuple have been updated
+ * NoCommit when all cached tuple have been updated
*/
- if(check != -1){
- check = myTrans->execute(Commit);
- myTrans->releaseCompletedOperations();
+ if(check != -1)
+ {
+ check = myTrans->execute(NdbTransaction::NoCommit);
}
+
/**
* Check for errors
*/
err = myTrans->getNdbError();
- if(check == -1){
+ if(check == -1)
+ {
if(err.status == NdbError::TemporaryError){
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
@@ -595,13 +543,28 @@ int scan_update(Ndb* myNdb,
* End of loop
*/
}
+
+ /**
+ * Commit all prepared operations
+ */
+ if(myTrans->execute(NdbTransaction::Commit) == -1)
+ {
+ if(err.status == NdbError::TemporaryError){
+ std::cout << myTrans->getNdbError().message << std::endl;
+ myNdb->closeTransaction(myTrans);
+ milliSleep(50);
+ continue;
+ }
+ }
+
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
- return 0;
-
-
+ return 0;
}
- if(myTrans!=0) {
+
+
+ if(myTrans!=0)
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
}
@@ -610,9 +573,7 @@ int scan_update(Ndb* myNdb,
-int scan_print(Ndb * myNdb, int parallelism,
- int column_len_brand,
- int column_len_color)
+int scan_print(Ndb * myNdb)
{
// Scan all records exclusive and update
// them one by one
@@ -621,13 +582,19 @@ int scan_print(Ndb * myNdb, int parallelism,
int fetchedRows = 0;
int check;
NdbError err;
- NdbConnection *myTrans;
+ NdbTransaction *myTrans;
NdbScanOperation *myScanOp;
/* Result of reading attribute value, three columns:
REG_NO, BRAND, and COLOR
*/
NdbRecAttr * myRecAttr[3];
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("GARAGE");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
/**
* Loop as long as :
* retryMax not reached
@@ -665,7 +632,7 @@ int scan_print(Ndb * myNdb, int parallelism,
* Define a scan operation.
* NDBAPI.
*/
- myScanOp = myTrans->getNdbScanOperation("GARAGE");
+ myScanOp = myTrans->getNdbScanOperation(myTable);
if (myScanOp == NULL)
{
std::cout << myTrans->getNdbError().message << std::endl;
@@ -674,10 +641,10 @@ int scan_print(Ndb * myNdb, int parallelism,
}
/**
- * Define a result set for the scan.
- */
- NdbResultSet * rs = myScanOp->readTuplesExclusive(parallelism);
- if( rs == 0 ) {
+ * Read without locks, without being placed in lock queue
+ */
+ if( myScanOp->readTuples(NdbOperation::LM_CommittedRead) == -1)
+ {
std::cout << myTrans->getNdbError().message << std::endl;
myNdb->closeTransaction(myTrans);
return -1;
@@ -701,7 +668,7 @@ int scan_print(Ndb * myNdb, int parallelism,
/**
* Start scan (NoCommit since we are only reading at this stage);
*/
- if(myTrans->execute(NoCommit) != 0){
+ if(myTrans->execute(NdbTransaction::NoCommit) != 0){
err = myTrans->getNdbError();
if(err.status == NdbError::TemporaryError){
std::cout << myTrans->getNdbError().message << std::endl;
@@ -719,7 +686,7 @@ int scan_print(Ndb * myNdb, int parallelism,
* start of loop: nextResult(true) means that "parallelism" number of
* rows are fetched from NDB and cached in NDBAPI
*/
- while((check = rs->nextResult(true)) == 0){
+ while((check = myScanOp->nextResult(true)) == 0){
do {
fetchedRows++;
@@ -727,28 +694,23 @@ int scan_print(Ndb * myNdb, int parallelism,
* print REG_NO unsigned int
*/
std::cout << myRecAttr[0]->u_32_value() << "\t";
- char * buf_brand = new char[column_len_brand+1];
- char * buf_color = new char[column_len_color+1];
+
/**
* print BRAND character string
*/
- memcpy(buf_brand, myRecAttr[1]->aRef(), column_len_brand);
- buf_brand[column_len_brand] = 0;
- std::cout << buf_brand << "\t";
- delete [] buf_brand;
+ std::cout << myRecAttr[1]->aRef() << "\t";
+
/**
* print COLOR character string
*/
- memcpy(buf_color, myRecAttr[2]->aRef(), column_len_color);
- buf_brand[column_len_color] = 0;
- std::cout << buf_color << std::endl;
- delete [] buf_color;
+ std::cout << myRecAttr[2]->aRef() << std::endl;
+
/**
* nextResult(false) means that the records
* cached in the NDBAPI are modified before
* fetching more rows from NDB.
*/
- } while((check = rs->nextResult(false)) == 0);
+ } while((check = myScanOp->nextResult(false)) == 0);
}
myNdb->closeTransaction(myTrans);
@@ -762,55 +724,102 @@ int scan_print(Ndb * myNdb, int parallelism,
int main()
{
ndb_init();
- Ndb* myNdb = new Ndb( "TEST_DB" ); // Object representing the database
+ MYSQL mysql;
-
-
- /*******************************************
- * Initialize NDB and wait until its ready *
- *******************************************/
- if (myNdb->init(1024) == -1) { // Set max 1024 parallel transactions
- APIERROR(myNdb->getNdbError());
+ /**************************************************************
+ * Connect to mysql server and create table *
+ **************************************************************/
+ {
+ if ( !mysql_init(&mysql) ) {
+ std::cout << "mysql_init failed\n";
+ exit(-1);
+ }
+ if ( !mysql_real_connect(&mysql, "localhost", "root", "", "",
+ 3306, "/tmp/mysql.sock", 0) )
+ MYSQLERROR(mysql);
+
+ mysql_query(&mysql, "CREATE DATABASE TEST_DB");
+ if (mysql_query(&mysql, "USE TEST_DB") != 0) MYSQLERROR(mysql);
+
+ create_table(mysql);
+ }
+
+ /**************************************************************
+ * Connect to ndb cluster *
+ **************************************************************/
+
+ Ndb_cluster_connection cluster_connection;
+ if (cluster_connection.connect(4, 5, 1))
+ {
+ std::cout << "Unable to connect to cluster within 30 secs." << std::endl;
+ exit(-1);
+ }
+ // Optionally connect and wait for the storage nodes (ndbd's)
+ if (cluster_connection.wait_until_ready(30,0) < 0)
+ {
+ std::cout << "Cluster was not ready within 30 secs.\n";
exit(-1);
}
- if (myNdb->waitUntilReady(30) != 0) {
- std::cout << "NDB was not ready within 30 secs." << std::endl;
+ Ndb myNdb(&cluster_connection,"TEST_DB");
+ if (myNdb.init(1024) == -1) { // Set max 1024 parallel transactions
+ APIERROR(myNdb.getNdbError());
exit(-1);
}
- create_table(myNdb);
-
- NdbDictionary::Dictionary* myDict = myNdb->getDictionary();
- int column_color = myDict->getTable("GARAGE")->getColumn("COLOR")->getColumnNo();
- int column_len_color =
- myDict->getTable("GARAGE")->getColumn("COLOR")->getLength();
- int column_len_brand =
- myDict->getTable("GARAGE")->getColumn("BRAND")->getLength();
- int parallelism = 16;
-
- if(populate(myNdb) > 0)
- std::cout << "populate: Success!" << std::endl;
+ /*******************************************
+ * Check table definition *
+ *******************************************/
+ int column_color;
+ {
+ const NdbDictionary::Dictionary* myDict= myNdb.getDictionary();
+ const NdbDictionary::Table *t= myDict->getTable("GARAGE");
+
+ Car car;
+ if (t->getColumn("COLOR")->getLength() != sizeof(car.color) ||
+ t->getColumn("BRAND")->getLength() != sizeof(car.brand))
+ {
+ std::cout << "Wrong table definition" << std::endl;
+ exit(-1);
+ }
+ column_color= t->getColumn("COLOR")->getColumnNo();
+ }
- if(scan_print(myNdb, parallelism, column_len_brand, column_len_color) > 0)
+ if(populate(&myNdb) > 0)
+ std::cout << "populate: Success!" << std::endl;
+
+ if(scan_print(&myNdb) > 0)
std::cout << "scan_print: Success!" << std::endl << std::endl;
std::cout << "Going to delete all pink cars!" << std::endl;
- if(scan_delete(myNdb, parallelism, column_color,
- column_len_color, "Pink") > 0)
- std::cout << "scan_delete: Success!" << std::endl << std::endl;
+
+ {
+ /**
+ * Note! color needs to be of exact the same size as column defined
+ */
+ Car tmp;
+ sprintf(tmp.color, "Pink");
+ if(scan_delete(&myNdb, column_color, tmp.color) > 0)
+ std::cout << "scan_delete: Success!" << std::endl << std::endl;
+ }
- if(scan_print(myNdb, parallelism, column_len_brand, column_len_color) > 0)
+ if(scan_print(&myNdb) > 0)
std::cout << "scan_print: Success!" << std::endl << std::endl;
- std::cout << "Going to update all blue cars to black cars!" << std::endl;
- if(scan_update(myNdb, parallelism, column_len_color, column_color,
- "COLOR", "Blue", "Black") > 0)
{
- std::cout << "scan_update: Success!" << std::endl << std::endl;
+ /**
+ * Note! color1 & 2 need to be of exact the same size as column defined
+ */
+ Car tmp1, tmp2;
+ sprintf(tmp1.color, "Blue");
+ sprintf(tmp2.color, "Black");
+ std::cout << "Going to update all " << tmp1.color
+ << " cars to " << tmp2.color << " cars!" << std::endl;
+ if(scan_update(&myNdb, column_color, tmp1.color, tmp2.color) > 0)
+ std::cout << "scan_update: Success!" << std::endl << std::endl;
}
- if(scan_print(myNdb, parallelism, column_len_brand, column_len_color) > 0)
+ if(scan_print(&myNdb) > 0)
std::cout << "scan_print: Success!" << std::endl << std::endl;
- delete myNdb;
+ return 0;
}
diff --git a/ndb/examples/ndbapi_scan_example/readme.txt b/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt
index 47cb4bf9ffa..47cb4bf9ffa 100644
--- a/ndb/examples/ndbapi_scan_example/readme.txt
+++ b/ndb/ndbapi-examples/ndbapi_scan_example/readme.txt
diff --git a/ndb/ndbapi-examples/ndbapi_simple_example/Makefile b/ndb/ndbapi-examples/ndbapi_simple_example/Makefile
new file mode 100644
index 00000000000..0a59584fb66
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_simple_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_simple
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp b/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp
new file mode 100644
index 00000000000..152d4fa44af
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_simple_example/ndbapi_simple.cpp
@@ -0,0 +1,278 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+/*
+ * ndbapi_simple.cpp: Using synchronous transactions in NDB API
+ *
+ * Correct output from this program is:
+ *
+ * ATTR1 ATTR2
+ * 0 10
+ * 1 1
+ * 2 12
+ * Detected that deleted tuple doesn't exist!
+ * 4 14
+ * 5 5
+ * 6 16
+ * 7 7
+ * 8 18
+ * 9 9
+ *
+ */
+
+#include <mysql.h>
+#include <NdbApi.hpp>
+// Used for cout
+#include <stdio.h>
+#include <iostream>
+
+static void run_application(MYSQL &, Ndb_cluster_connection &);
+
+#define PRINT_ERROR(code,msg) \
+ std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \
+ << ", code: " << code \
+ << ", msg: " << msg << "." << std::endl
+#define MYSQLERROR(mysql) { \
+ PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \
+ exit(-1); }
+#define APIERROR(error) { \
+ PRINT_ERROR(error.code,error.message); \
+ exit(-1); }
+
+int main()
+{
+ // ndb_init must be called first
+ ndb_init();
+
+ // connect to mysql server and cluster and run application
+ {
+ // Object representing the cluster
+ Ndb_cluster_connection cluster_connection;
+
+ // Connect to cluster management server (ndb_mgmd)
+ if (cluster_connection.connect(4 /* retries */,
+ 5 /* delay between retries */,
+ 1 /* verbose */))
+ {
+ std::cout << "Cluster management server was not ready within 30 secs.\n";
+ exit(-1);
+ }
+
+ // Optionally connect and wait for the storage nodes (ndbd's)
+ if (cluster_connection.wait_until_ready(30,0) < 0)
+ {
+ std::cout << "Cluster was not ready within 30 secs.\n";
+ exit(-1);
+ }
+
+ // connect to mysql server
+ MYSQL mysql;
+ if ( !mysql_init(&mysql) ) {
+ std::cout << "mysql_init failed\n";
+ exit(-1);
+ }
+ if ( !mysql_real_connect(&mysql, "localhost", "root", "", "",
+ 3306, "/tmp/mysql.sock", 0) )
+ MYSQLERROR(mysql);
+
+ // run the application code
+ run_application(mysql, cluster_connection);
+ }
+
+ ndb_end(0);
+
+ std::cout << "\nTo drop created table use:\n"
+ << "echo \"drop table MYTABLENAME\" | mysql TEST_DB_1 -u root\n";
+
+ return 0;
+}
+
+static void create_table(MYSQL &);
+static void do_insert(Ndb &);
+static void do_update(Ndb &);
+static void do_delete(Ndb &);
+static void do_read(Ndb &);
+
+static void run_application(MYSQL &mysql,
+ Ndb_cluster_connection &cluster_connection)
+{
+ /********************************************
+ * Connect to database via mysql-c *
+ ********************************************/
+ mysql_query(&mysql, "CREATE DATABASE TEST_DB_1");
+ if (mysql_query(&mysql, "USE TEST_DB_1") != 0) MYSQLERROR(mysql);
+ create_table(mysql);
+
+ /********************************************
+ * Connect to database via NdbApi *
+ ********************************************/
+ // Object representing the database
+ Ndb myNdb( &cluster_connection, "TEST_DB_1" );
+ if (myNdb.init()) APIERROR(myNdb.getNdbError());
+
+ /*
+ * Do different operations on database
+ */
+ do_insert(myNdb);
+ do_update(myNdb);
+ do_delete(myNdb);
+ do_read(myNdb);
+}
+
+/*********************************************************
+ * Create a table named MYTABLENAME if it does not exist *
+ *********************************************************/
+static void create_table(MYSQL &mysql)
+{
+ if (mysql_query(&mysql,
+ "CREATE TABLE"
+ " MYTABLENAME"
+ " (ATTR1 INT UNSIGNED NOT NULL PRIMARY KEY,"
+ " ATTR2 INT UNSIGNED NOT NULL)"
+ " ENGINE=NDB"))
+ MYSQLERROR(mysql);
+}
+
+/**************************************************************************
+ * Using 5 transactions, insert 10 tuples in table: (0,0),(1,1),...,(9,9) *
+ **************************************************************************/
+static void do_insert(Ndb &myNdb)
+{
+ const NdbDictionary::Dictionary* myDict= myNdb.getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
+ for (int i = 0; i < 5; i++) {
+ NdbTransaction *myTransaction= myNdb.startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb.getNdbError());
+
+ NdbOperation *myOperation= myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->insertTuple();
+ myOperation->equal("ATTR1", i);
+ myOperation->setValue("ATTR2", i);
+
+ myOperation= myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->insertTuple();
+ myOperation->equal("ATTR1", i+5);
+ myOperation->setValue("ATTR2", i+5);
+
+ if (myTransaction->execute( NdbTransaction::Commit ) == -1)
+ APIERROR(myTransaction->getNdbError());
+
+ myNdb.closeTransaction(myTransaction);
+ }
+}
+
+/*****************************************************************
+ * Update the second attribute in half of the tuples (adding 10) *
+ *****************************************************************/
+static void do_update(Ndb &myNdb)
+{
+ const NdbDictionary::Dictionary* myDict= myNdb.getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
+ for (int i = 0; i < 10; i+=2) {
+ NdbTransaction *myTransaction= myNdb.startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb.getNdbError());
+
+ NdbOperation *myOperation= myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->updateTuple();
+ myOperation->equal( "ATTR1", i );
+ myOperation->setValue( "ATTR2", i+10);
+
+ if( myTransaction->execute( NdbTransaction::Commit ) == -1 )
+ APIERROR(myTransaction->getNdbError());
+
+ myNdb.closeTransaction(myTransaction);
+ }
+}
+
+/*************************************************
+ * Delete one tuple (the one with primary key 3) *
+ *************************************************/
+static void do_delete(Ndb &myNdb)
+{
+ const NdbDictionary::Dictionary* myDict= myNdb.getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
+ NdbTransaction *myTransaction= myNdb.startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb.getNdbError());
+
+ NdbOperation *myOperation= myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->deleteTuple();
+ myOperation->equal( "ATTR1", 3 );
+
+ if (myTransaction->execute(NdbTransaction::Commit) == -1)
+ APIERROR(myTransaction->getNdbError());
+
+ myNdb.closeTransaction(myTransaction);
+}
+
+/*****************************
+ * Read and print all tuples *
+ *****************************/
+static void do_read(Ndb &myNdb)
+{
+ const NdbDictionary::Dictionary* myDict= myNdb.getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME");
+
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+
+ std::cout << "ATTR1 ATTR2" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ NdbTransaction *myTransaction= myNdb.startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb.getNdbError());
+
+ NdbOperation *myOperation= myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->readTuple(NdbOperation::LM_Read);
+ myOperation->equal("ATTR1", i);
+
+ NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL);
+ if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
+
+ if(myTransaction->execute( NdbTransaction::Commit ) == -1)
+ if (i == 3) {
+ std::cout << "Detected that deleted tuple doesn't exist!" << std::endl;
+ } else {
+ APIERROR(myTransaction->getNdbError());
+ }
+
+ if (i != 3) {
+ printf(" %2d %2d\n", i, myRecAttr->u_32_value());
+ }
+ myNdb.closeTransaction(myTransaction);
+ }
+}
diff --git a/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile b/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile
new file mode 100644
index 00000000000..d4356055935
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_simple_index_example/Makefile
@@ -0,0 +1,23 @@
+TARGET = ndbapi_simple_index
+SRCS = $(TARGET).cpp
+OBJS = $(TARGET).o
+CXX = g++
+CFLAGS = -c -Wall -fno-rtti -fno-exceptions
+CXXFLAGS =
+DEBUG =
+LFLAGS = -Wall
+TOP_SRCDIR = ../../..
+INCLUDE_DIR = $(TOP_SRCDIR)
+LIB_DIR = -L$(TOP_SRCDIR)/ndb/src/.libs \
+ -L$(TOP_SRCDIR)/libmysql_r/.libs \
+ -L$(TOP_SRCDIR)/mysys -L$(TOP_SRCDIR)/strings
+SYS_LIB =
+
+$(TARGET): $(OBJS)
+ $(CXX) $(CXXFLAGS) $(LFLAGS) $(LIB_DIR) $(OBJS) -lndbclient -lmysqlclient_r -lmysys -lmystrings -lz $(SYS_LIB) -o $(TARGET)
+
+$(TARGET).o: $(SRCS)
+ $(CXX) $(CFLAGS) -I$(INCLUDE_DIR)/include -I$(INCLUDE_DIR)/ndb/include -I$(INCLUDE_DIR)/ndb/include/ndbapi $(SRCS)
+
+clean:
+ rm -f *.o $(TARGET)
diff --git a/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp b/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp
new file mode 100644
index 00000000000..5afaf6078d1
--- /dev/null
+++ b/ndb/ndbapi-examples/ndbapi_simple_index_example/ndbapi_simple_index.cpp
@@ -0,0 +1,254 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+//
+// ndbapi_simple_index.cpp: Using secondary indexes in NDB API
+//
+// Correct output from this program is:
+//
+// ATTR1 ATTR2
+// 0 10
+// 1 1
+// 2 12
+// Detected that deleted tuple doesn't exist!
+// 4 14
+// 5 5
+// 6 16
+// 7 7
+// 8 18
+// 9 9
+
+#include <mysql.h>
+#include <NdbApi.hpp>
+
+// Used for cout
+#include <stdio.h>
+#include <iostream>
+
+#define PRINT_ERROR(code,msg) \
+ std::cout << "Error in " << __FILE__ << ", line: " << __LINE__ \
+ << ", code: " << code \
+ << ", msg: " << msg << "." << std::endl
+#define MYSQLERROR(mysql) { \
+ PRINT_ERROR(mysql_errno(&mysql),mysql_error(&mysql)); \
+ exit(-1); }
+#define APIERROR(error) { \
+ PRINT_ERROR(error.code,error.message); \
+ exit(-1); }
+
+int main()
+{
+ ndb_init();
+ MYSQL mysql;
+
+ /**************************************************************
+ * Connect to mysql server and create table *
+ **************************************************************/
+ {
+ if ( !mysql_init(&mysql) ) {
+ std::cout << "mysql_init failed\n";
+ exit(-1);
+ }
+ if ( !mysql_real_connect(&mysql, "localhost", "root", "", "",
+ 3306, "/tmp/mysql.sock", 0) )
+ MYSQLERROR(mysql);
+
+ mysql_query(&mysql, "CREATE DATABASE TEST_DB_1");
+ if (mysql_query(&mysql, "USE TEST_DB_1") != 0) MYSQLERROR(mysql);
+
+ if (mysql_query(&mysql,
+ "CREATE TABLE"
+ " MYTABLENAME"
+ " (ATTR1 INT UNSIGNED,"
+ " ATTR2 INT UNSIGNED NOT NULL,"
+ " PRIMARY KEY USING HASH (ATTR1),"
+ " UNIQUE MYINDEXNAME USING HASH (ATTR2))"
+ " ENGINE=NDB"))
+ MYSQLERROR(mysql);
+ }
+
+ /**************************************************************
+ * Connect to ndb cluster *
+ **************************************************************/
+
+ Ndb_cluster_connection *cluster_connection=
+ new Ndb_cluster_connection(); // Object representing the cluster
+
+ if (cluster_connection->connect(5,3,1))
+ {
+ std::cout << "Connect to cluster management server failed.\n";
+ exit(-1);
+ }
+
+ if (cluster_connection->wait_until_ready(30,30))
+ {
+ std::cout << "Cluster was not ready within 30 secs.\n";
+ exit(-1);
+ }
+
+ Ndb* myNdb = new Ndb( cluster_connection,
+ "TEST_DB_1" ); // Object representing the database
+ if (myNdb->init() == -1) {
+ APIERROR(myNdb->getNdbError());
+ exit(-1);
+ }
+
+ const NdbDictionary::Dictionary* myDict= myNdb->getDictionary();
+ const NdbDictionary::Table *myTable= myDict->getTable("MYTABLENAME");
+ if (myTable == NULL)
+ APIERROR(myDict->getNdbError());
+ const NdbDictionary::Index *myIndex= myDict->getIndex("MYINDEXNAME","MYTABLENAME");
+ if (myIndex == NULL)
+ APIERROR(myDict->getNdbError());
+
+ /**************************************************************************
+ * Using 5 transactions, insert 10 tuples in table: (0,0),(1,1),...,(9,9) *
+ **************************************************************************/
+ for (int i = 0; i < 5; i++) {
+ NdbTransaction *myTransaction= myNdb->startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb->getNdbError());
+
+ NdbOperation *myOperation= myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->insertTuple();
+ myOperation->equal("ATTR1", i);
+ myOperation->setValue("ATTR2", i);
+
+ myOperation = myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->insertTuple();
+ myOperation->equal("ATTR1", i+5);
+ myOperation->setValue("ATTR2", i+5);
+
+ if (myTransaction->execute( NdbTransaction::Commit ) == -1)
+ APIERROR(myTransaction->getNdbError());
+
+ myNdb->closeTransaction(myTransaction);
+ }
+
+ /*****************************************
+ * Read and print all tuples using index *
+ *****************************************/
+ std::cout << "ATTR1 ATTR2" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ NdbTransaction *myTransaction= myNdb->startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb->getNdbError());
+
+ NdbIndexOperation *myIndexOperation=
+ myTransaction->getNdbIndexOperation(myIndex);
+ if (myIndexOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myIndexOperation->readTuple(NdbOperation::LM_Read);
+ myIndexOperation->equal("ATTR2", i);
+
+ NdbRecAttr *myRecAttr= myIndexOperation->getValue("ATTR1", NULL);
+ if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
+
+ if(myTransaction->execute( NdbTransaction::Commit ) != -1)
+ printf(" %2d %2d\n", myRecAttr->u_32_value(), i);
+
+ myNdb->closeTransaction(myTransaction);
+ }
+
+ /*****************************************************************
+ * Update the second attribute in half of the tuples (adding 10) *
+ *****************************************************************/
+ for (int i = 0; i < 10; i+=2) {
+ NdbTransaction *myTransaction= myNdb->startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb->getNdbError());
+
+ NdbIndexOperation *myIndexOperation=
+ myTransaction->getNdbIndexOperation(myIndex);
+ if (myIndexOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myIndexOperation->updateTuple();
+ myIndexOperation->equal( "ATTR2", i );
+ myIndexOperation->setValue( "ATTR2", i+10);
+
+ if( myTransaction->execute( NdbTransaction::Commit ) == -1 )
+ APIERROR(myTransaction->getNdbError());
+
+ myNdb->closeTransaction(myTransaction);
+ }
+
+ /*************************************************
+ * Delete one tuple (the one with primary key 3) *
+ *************************************************/
+ {
+ NdbTransaction *myTransaction= myNdb->startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb->getNdbError());
+
+ NdbIndexOperation *myIndexOperation=
+ myTransaction->getNdbIndexOperation(myIndex);
+ if (myIndexOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myIndexOperation->deleteTuple();
+ myIndexOperation->equal( "ATTR2", 3 );
+
+ if (myTransaction->execute(NdbTransaction::Commit) == -1)
+ APIERROR(myTransaction->getNdbError());
+
+ myNdb->closeTransaction(myTransaction);
+ }
+
+ /*****************************
+ * Read and print all tuples *
+ *****************************/
+ {
+ std::cout << "ATTR1 ATTR2" << std::endl;
+
+ for (int i = 0; i < 10; i++) {
+ NdbTransaction *myTransaction= myNdb->startTransaction();
+ if (myTransaction == NULL) APIERROR(myNdb->getNdbError());
+
+ NdbOperation *myOperation= myTransaction->getNdbOperation(myTable);
+ if (myOperation == NULL) APIERROR(myTransaction->getNdbError());
+
+ myOperation->readTuple(NdbOperation::LM_Read);
+ myOperation->equal("ATTR1", i);
+
+ NdbRecAttr *myRecAttr= myOperation->getValue("ATTR2", NULL);
+ if (myRecAttr == NULL) APIERROR(myTransaction->getNdbError());
+
+ if(myTransaction->execute( NdbTransaction::Commit ) == -1)
+ if (i == 3) {
+ std::cout << "Detected that deleted tuple doesn't exist!\n";
+ } else {
+ APIERROR(myTransaction->getNdbError());
+ }
+
+ if (i != 3) {
+ printf(" %2d %2d\n", i, myRecAttr->u_32_value());
+ }
+ myNdb->closeTransaction(myTransaction);
+ }
+ }
+
+ /**************
+ * Drop table *
+ **************/
+ if (mysql_query(&mysql, "DROP TABLE MYTABLENAME"))
+ MYSQLERROR(mysql);
+
+ delete myNdb;
+ delete cluster_connection;
+
+ ndb_end(0);
+ return 0;
+}
diff --git a/ndb/src/Makefile.am b/ndb/src/Makefile.am
index eb1cf1c6543..d35790a2e43 100644
--- a/ndb/src/Makefile.am
+++ b/ndb/src/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = common mgmapi ndbapi . kernel mgmsrv mgmclient cw
+SUBDIRS = common mgmapi ndbapi . kernel mgmclient mgmsrv cw
include $(top_srcdir)/ndb/config/common.mk.am
diff --git a/ndb/src/common/debugger/EventLogger.cpp b/ndb/src/common/debugger/EventLogger.cpp
index d7e4fbd53aa..9a1dcb8a3e1 100644
--- a/ndb/src/common/debugger/EventLogger.cpp
+++ b/ndb/src/common/debugger/EventLogger.cpp
@@ -33,838 +33,723 @@ EventLoggerBase::~EventLoggerBase()
}
-/**
- * This matrix defines which event should be printed when
- *
- * threshold - is in range [0-15]
- * severity - DEBUG to ALERT (Type of log message)
- */
-const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
- // CONNECTION
- { EventReport::Connected, LogLevel::llConnection, 8, Logger::LL_INFO },
- { EventReport::Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT },
- { EventReport::CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO },
- { EventReport::CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO },
- { EventReport::ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO },
- // CHECKPOINT
- { EventReport::GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO },
- { EventReport::GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO },
- { EventReport::LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO },
- { EventReport::LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO },
- { EventReport::LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT },
- { EventReport::LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO },
- { EventReport::UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO },
-
- // STARTUP
- { EventReport::NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO },
- { EventReport::NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO },
- { EventReport::STTORRYRecieved, LogLevel::llStartUp,15, Logger::LL_INFO },
- { EventReport::StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO },
- { EventReport::CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO },
- { EventReport::CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO },
- { EventReport::FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO },
- { EventReport::NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO },
- { EventReport::NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO },
- { EventReport::StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO },
- { EventReport::StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO },
- { EventReport::UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO },
-
- // NODERESTART
- { EventReport::NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
- { EventReport::NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
- { EventReport::NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
- { EventReport::NR_CopyFragDone, LogLevel::llNodeRestart, 10, Logger::LL_INFO },
- { EventReport::NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO },
-
- { EventReport::NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT},
- { EventReport::NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT},
- { EventReport::ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO },
- { EventReport::ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT},
- { EventReport::GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
- { EventReport::GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
- { EventReport::LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
- { EventReport::LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO },
-
- // STATISTIC
- { EventReport::TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO },
- { EventReport::OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO },
- { EventReport::TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO },
- { EventReport::JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
- { EventReport::SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
- { EventReport::ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO },
- { EventReport::MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO },
-
- // ERROR
- { EventReport::TransporterError, LogLevel::llError, 2, Logger::LL_ERROR },
- { EventReport::TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING },
- { EventReport::MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING },
- { EventReport::DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT },
- { EventReport::WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING },
- // INFO
- { EventReport::SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO },
- { EventReport::CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO },
- { EventReport::InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO },
-
- //Single User
- { EventReport::SingleUser, LogLevel::llInfo, 7, Logger::LL_INFO},
-
- // Backup
- { EventReport::BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO },
- { EventReport::BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO },
- { EventReport::BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT},
- { EventReport::BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT }
-};
+#define QQQQ char *m_text, size_t m_text_len, const Uint32* theData
-const Uint32 EventLoggerBase::matrixSize = sizeof(EventLoggerBase::matrix)/
- sizeof(EventRepLogLevelMatrix);
-
-const char*
-EventLogger::getText(char * m_text, size_t m_text_len,
- int type,
- const Uint32* theData, NodeId nodeId)
-{
- // TODO: Change the switch implementation...
- char theNodeId[32];
- if (nodeId != 0){
- BaseString::snprintf(theNodeId, 32, "Node %u: ", nodeId);
- } else {
- theNodeId[0] = 0;
+void getTextConnected(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node %u Connected",
+ theData[1]);
+}
+void getTextConnectedApiVersion(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node %u: API version %d.%d.%d",
+ theData[1],
+ getMajor(theData[2]),
+ getMinor(theData[2]),
+ getBuild(theData[2]));
+}
+void getTextDisconnected(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node %u Disconnected",
+ theData[1]);
+}
+void getTextCommunicationClosed(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT communication to node closed.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Communication to Node %u closed",
+ theData[1]);
+}
+void getTextCommunicationOpened(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT communication to node opened.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Communication to Node %u opened",
+ theData[1]);
+}
+void getTextNDBStartStarted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // Start of NDB has been initiated.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Start initiated (version %d.%d.%d)",
+ getMajor(theData[1]),
+ getMinor(theData[1]),
+ getBuild(theData[1]));
+}
+void getTextNDBStopStarted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "%s shutdown initiated",
+ (theData[1] == 1 ? "Cluster" : "Node"));
+}
+void getTextNDBStopAborted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node shutdown aborted");
+}
+void getTextNDBStartCompleted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // Start of NDB has been completed.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Started (version %d.%d.%d)",
+ getMajor(theData[1]),
+ getMinor(theData[1]),
+ getBuild(theData[1]));
+}
+void getTextSTTORRYRecieved(QQQQ) {
+ //-----------------------------------------------------------------------
+ // STTORRY recevied after restart finished.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "STTORRY received after restart finished");
+}
+void getTextStartPhaseCompleted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Start phase completed.
+ //-----------------------------------------------------------------------
+ const char *type = "<Unknown>";
+ switch((NodeState::StartType)theData[2]){
+ case NodeState::ST_INITIAL_START:
+ type = "(initial start)";
+ break;
+ case NodeState::ST_SYSTEM_RESTART:
+ type = "(system restart)";
+ break;
+ case NodeState::ST_NODE_RESTART:
+ type = "(node restart)";
+ break;
+ case NodeState::ST_INITIAL_NODE_RESTART:
+ type = "(initial node restart)";
+ break;
+ case NodeState::ST_ILLEGAL_TYPE:
+ type = "";
+ break;
+ default:
+ BaseString::snprintf(m_text, m_text_len,
+ "Start phase %u completed (unknown = %d)",
+ theData[1],
+ theData[2]);
+ return;
}
+ BaseString::snprintf(m_text, m_text_len,
+ "Start phase %u completed %s",
+ theData[1],
+ type);
+}
+void getTextCM_REGCONF(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "CM_REGCONF president = %u, own Node = %u, our dynamic id = %u",
+ theData[2],
+ theData[1],
+ theData[3]);
+}
+void getTextCM_REGREF(QQQQ) {
+ const char* line = "";
+ switch (theData[3]) {
+ case 0:
+ line = "Busy";
+ break;
+ case 1:
+ line = "Election with wait = false";
+ break;
+ case 2:
+ line = "Election with wait = false";
+ break;
+ case 3:
+ line = "Not president";
+ break;
+ case 4:
+ line = "Election without selecting new candidate";
+ break;
+ default:
+ line = "No such cause";
+ break;
+ }//switch
- EventReport::EventType eventType = (EventReport::EventType)type;
- switch (eventType){
- case EventReport::Connected:
- BaseString::snprintf(m_text, m_text_len,
- "%sNode %u Connected",
- theNodeId,
- theData[1]);
- break;
- case EventReport::ConnectedApiVersion:
- BaseString::snprintf(m_text, m_text_len,
- "%sNode %u: API version %d.%d.%d",
- theNodeId,
- theData[1],
- getMajor(theData[2]),
- getMinor(theData[2]),
- getBuild(theData[2]));
- break;
- case EventReport::Disconnected:
- BaseString::snprintf(m_text, m_text_len,
- "%sNode %u Disconnected",
- theNodeId,
- theData[1]);
- break;
- case EventReport::CommunicationClosed:
- //-----------------------------------------------------------------------
- // REPORT communication to node closed.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text, m_text_len,
- "%sCommunication to Node %u closed",
- theNodeId,
- theData[1]);
- break;
- case EventReport::CommunicationOpened:
- //-----------------------------------------------------------------------
- // REPORT communication to node opened.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text, m_text_len,
- "%sCommunication to Node %u opened",
- theNodeId,
- theData[1]);
- break;
- case EventReport::NDBStartStarted:
- //-----------------------------------------------------------------------
- // Start of NDB has been initiated.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text, m_text_len,
- "%sStart initiated (version %d.%d.%d)",
- theNodeId ,
- getMajor(theData[1]),
- getMinor(theData[1]),
- getBuild(theData[1]));
- break;
- case EventReport::NDBStopStarted:
- BaseString::snprintf(m_text, m_text_len,
- "%s%s shutdown initiated",
- theNodeId,
- (theData[1] == 1 ? "Cluster" : "Node"));
- break;
- case EventReport::NDBStopAborted:
- BaseString::snprintf(m_text, m_text_len,
- "%sNode shutdown aborted",
- theNodeId);
- break;
- case EventReport::NDBStartCompleted:
- //-----------------------------------------------------------------------
- // Start of NDB has been completed.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text, m_text_len,
- "%sStarted (version %d.%d.%d)",
- theNodeId ,
- getMajor(theData[1]),
- getMinor(theData[1]),
- getBuild(theData[1]));
-
- break;
- case EventReport::STTORRYRecieved:
- //-----------------------------------------------------------------------
- // STTORRY recevied after restart finished.
- //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "CM_REGREF from Node %u to our Node %u. Cause = %s",
+ theData[2],
+ theData[1],
+ line);
+}
+void getTextFIND_NEIGHBOURS(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Node Restart copied a fragment.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "We are Node %u with dynamic ID %u, our left neighbour "
+ "is Node %u, our right is Node %u",
+ theData[1],
+ theData[4],
+ theData[2],
+ theData[3]);
+}
+void getTextNodeFailCompleted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Node failure phase completed.
+ //-----------------------------------------------------------------------
+ if (theData[1] == 0)
+ {
+ if (theData[3] != 0) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node %u completed failure of Node %u",
+ theData[3],
+ theData[2]);
+ } else {
+ BaseString::snprintf(m_text, m_text_len,
+ "All nodes completed failure of Node %u",
+ theData[2]);
+ }//if
+ } else {
+ const char* line = "";
+ if (theData[1] == DBTC){
+ line = "DBTC";
+ }else if (theData[1] == DBDICT){
+ line = "DBDICT";
+ }else if (theData[1] == DBDIH){
+ line = "DBDIH";
+ }else if (theData[1] == DBLQH){
+ line = "DBLQH";
+ }
BaseString::snprintf(m_text, m_text_len,
- "%sSTTORRY received after restart finished",
- theNodeId);
- break;
- case EventReport::StartPhaseCompleted:{
- //-----------------------------------------------------------------------
- // REPORT Start phase completed.
- //-----------------------------------------------------------------------
- const char * type = "<Unknown>";
- switch((NodeState::StartType)theData[2]){
- case NodeState::ST_INITIAL_START:
- type = "(initial start)";
+ "Node failure of %u %s completed",
+ theData[2],
+ line);
+ }
+}
+void getTextNODE_FAILREP(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node %u has failed. The Node state at failure "
+ "was %u",
+ theData[1],
+ theData[2]);
+}
+void getTextArbitState(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT arbitrator found or lost.
+ //-----------------------------------------------------------------------
+ {
+ const ArbitSignalData* sd = (ArbitSignalData*)theData;
+ char ticketText[ArbitTicket::TextLength + 1];
+ char errText[ArbitCode::ErrTextLength + 1];
+ const unsigned code = sd->code & 0xFFFF;
+ const unsigned state = sd->code >> 16;
+ switch (code) {
+ case ArbitCode::ThreadStart:
+ BaseString::snprintf(m_text, m_text_len,
+ "President restarts arbitration thread [state=%u]",
+ state);
break;
- case NodeState::ST_SYSTEM_RESTART:
- type = "(system restart)";
+ case ArbitCode::PrepPart2:
+ sd->ticket.getText(ticketText, sizeof(ticketText));
+ BaseString::snprintf(m_text, m_text_len,
+ "Prepare arbitrator node %u [ticket=%s]",
+ sd->node, ticketText);
break;
- case NodeState::ST_NODE_RESTART:
- type = "(node restart)";
+ case ArbitCode::PrepAtrun:
+ sd->ticket.getText(ticketText, sizeof(ticketText));
+ BaseString::snprintf(m_text, m_text_len,
+ "Receive arbitrator node %u [ticket=%s]",
+ sd->node, ticketText);
break;
- case NodeState::ST_INITIAL_NODE_RESTART:
- type = "(initial node restart)";
+ case ArbitCode::ApiStart:
+ sd->ticket.getText(ticketText, sizeof(ticketText));
+ BaseString::snprintf(m_text, m_text_len,
+ "Started arbitrator node %u [ticket=%s]",
+ sd->node, ticketText);
break;
- case NodeState::ST_ILLEGAL_TYPE:
- type = "";
+ case ArbitCode::ApiFail:
+ BaseString::snprintf(m_text, m_text_len,
+ "Lost arbitrator node %u - process failure [state=%u]",
+ sd->node, state);
+ break;
+ case ArbitCode::ApiExit:
+ BaseString::snprintf(m_text, m_text_len,
+ "Lost arbitrator node %u - process exit [state=%u]",
+ sd->node, state);
+ break;
+ default:
+ ArbitCode::getErrText(code, errText, sizeof(errText));
+ BaseString::snprintf(m_text, m_text_len,
+ "Lost arbitrator node %u - %s [state=%u]",
+ sd->node, errText, state);
break;
- default:{
- BaseString::snprintf(m_text, m_text_len,
- "%sStart phase %u completed (unknown = %d)",
- theNodeId,
- theData[1],
- theData[2]);
- return m_text;
- }
}
- BaseString::snprintf(m_text, m_text_len,
- "%sStart phase %u completed %s",
- theNodeId,
- theData[1],
- type);
- return m_text;
- break;
}
- case EventReport::CM_REGCONF:
- BaseString::snprintf(m_text, m_text_len,
- "%sCM_REGCONF president = %u, own Node = %u, our dynamic id = %u"
- ,
- theNodeId,
- theData[2],
- theData[1],
- theData[3]);
- break;
- case EventReport::CM_REGREF:
+}
+
+void getTextArbitResult(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT arbitration result (the failures may not reach us).
+ //-----------------------------------------------------------------------
{
- const char* line = "";
- switch (theData[3]) {
- case 0:
- line = "Busy";
+ const ArbitSignalData* sd = (ArbitSignalData*)theData;
+ char errText[ArbitCode::ErrTextLength + 1];
+ const unsigned code = sd->code & 0xFFFF;
+ const unsigned state = sd->code >> 16;
+ switch (code) {
+ case ArbitCode::LoseNodes:
+ BaseString::snprintf(m_text, m_text_len,
+ "Arbitration check lost - less than 1/2 nodes left");
+ break;
+ case ArbitCode::WinNodes:
+ BaseString::snprintf(m_text, m_text_len,
+ "Arbitration check won - all node groups and more than 1/2 nodes left");
+ break;
+ case ArbitCode::WinGroups:
+ BaseString::snprintf(m_text, m_text_len,
+ "Arbitration check won - node group majority");
+ break;
+ case ArbitCode::LoseGroups:
+ BaseString::snprintf(m_text, m_text_len,
+ "Arbitration check lost - missing node group");
break;
- case 1:
- line = "Election with wait = false";
+ case ArbitCode::Partitioning:
+ BaseString::snprintf(m_text, m_text_len,
+ "Network partitioning - arbitration required");
break;
- case 2:
- line = "Election with wait = false";
+ case ArbitCode::WinChoose:
+ BaseString::snprintf(m_text, m_text_len,
+ "Arbitration won - positive reply from node %u",
+ sd->node);
break;
- case 3:
- line = "Not president";
+ case ArbitCode::LoseChoose:
+ BaseString::snprintf(m_text, m_text_len,
+ "Arbitration lost - negative reply from node %u",
+ sd->node);
break;
- case 4:
- line = "Election without selecting new candidate";
+ case ArbitCode::LoseNorun:
+ BaseString::snprintf(m_text, m_text_len,
+ "Network partitioning - no arbitrator available");
+ break;
+ case ArbitCode::LoseNocfg:
+ BaseString::snprintf(m_text, m_text_len,
+ "Network partitioning - no arbitrator configured");
break;
default:
- line = "No such cause";
+ ArbitCode::getErrText(code, errText, sizeof(errText));
+ BaseString::snprintf(m_text, m_text_len,
+ "Arbitration failure - %s [state=%u]",
+ errText, state);
break;
- }//switch
-
- BaseString::snprintf(m_text, m_text_len,
- "%sCM_REGREF from Node %u to our Node %u. Cause = %s",
- theNodeId,
- theData[2],
- theData[1],
- line);
- }
- break;
- case EventReport::FIND_NEIGHBOURS:
- //-----------------------------------------------------------------------
- // REPORT Node Restart copied a fragment.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sWe are Node %u with dynamic ID %u, our left neighbour "
- "is Node %u, our right is Node %u",
- theNodeId,
- theData[1],
- theData[4],
- theData[2],
- theData[3]);
- break;
- case EventReport::NodeFailCompleted:
- //-----------------------------------------------------------------------
- // REPORT Node failure phase completed.
- //-----------------------------------------------------------------------
- if (theData[1] == 0)
- {
- if (theData[3] != 0) {
- BaseString::snprintf(m_text, m_text_len,
- "%sNode %u completed failure of Node %u",
- theNodeId,
- theData[3],
- theData[2]);
- } else {
- BaseString::snprintf(m_text, m_text_len,
- "%sAll nodes completed failure of Node %u",
- theNodeId,
- theData[2]);
- }//if
- } else {
- const char* line = "";
- if (theData[1] == DBTC){
- line = "DBTC";
- }else if (theData[1] == DBDICT){
- line = "DBDICT";
- }else if (theData[1] == DBDIH){
- line = "DBDIH";
- }else if (theData[1] == DBLQH){
- line = "DBLQH";
- }
-
- BaseString::snprintf(m_text, m_text_len,
- "%sNode failure of %u %s completed",
- theNodeId,
- theData[2],
- line);
}
- break;
- case EventReport::NODE_FAILREP:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode %u has failed. The Node state at failure "
- "was %u",
- theNodeId,
- theData[1],
- theData[2]);
-
- break;
- case EventReport::ArbitState:
- //-----------------------------------------------------------------------
- // REPORT arbitrator found or lost.
- //-----------------------------------------------------------------------
- { const ArbitSignalData* sd = (ArbitSignalData*)theData;
- char ticketText[ArbitTicket::TextLength + 1];
- char errText[ArbitCode::ErrTextLength + 1];
- const unsigned code = sd->code & 0xFFFF;
- const unsigned state = sd->code >> 16;
- switch (code) {
- case ArbitCode::ThreadStart:
- BaseString::snprintf(m_text, m_text_len,
- "%sPresident restarts arbitration thread [state=%u]",
- theNodeId, state);
- break;
- case ArbitCode::PrepPart2:
- sd->ticket.getText(ticketText, sizeof(ticketText));
- BaseString::snprintf(m_text, m_text_len,
- "%sPrepare arbitrator node %u [ticket=%s]",
- theNodeId, sd->node, ticketText);
- break;
- case ArbitCode::PrepAtrun:
- sd->ticket.getText(ticketText, sizeof(ticketText));
- BaseString::snprintf(m_text, m_text_len,
- "%sReceive arbitrator node %u [ticket=%s]",
- theNodeId, sd->node, ticketText);
- break;
- case ArbitCode::ApiStart:
- sd->ticket.getText(ticketText, sizeof(ticketText));
- BaseString::snprintf(m_text, m_text_len,
- "%sStarted arbitrator node %u [ticket=%s]",
- theNodeId, sd->node, ticketText);
- break;
- case ArbitCode::ApiFail:
- BaseString::snprintf(m_text, m_text_len,
- "%sLost arbitrator node %u - process failure [state=%u]",
- theNodeId, sd->node, state);
- break;
- case ArbitCode::ApiExit:
- BaseString::snprintf(m_text, m_text_len,
- "%sLost arbitrator node %u - process exit [state=%u]",
- theNodeId, sd->node, state);
- break;
- default:
- ArbitCode::getErrText(code, errText, sizeof(errText));
- BaseString::snprintf(m_text, m_text_len,
- "%sLost arbitrator node %u - %s [state=%u]",
- theNodeId, sd->node, errText, state);
- break;
- }
- }
- break;
- case EventReport::ArbitResult:
- //-----------------------------------------------------------------------
- // REPORT arbitration result (the failures may not reach us).
- //-----------------------------------------------------------------------
- { const ArbitSignalData* sd = (ArbitSignalData*)theData;
- char errText[ArbitCode::ErrTextLength + 1];
- const unsigned code = sd->code & 0xFFFF;
- const unsigned state = sd->code >> 16;
- switch (code) {
- case ArbitCode::LoseNodes:
- BaseString::snprintf(m_text, m_text_len,
- "%sArbitration check lost - less than 1/2 nodes left",
- theNodeId);
- break;
- case ArbitCode::WinNodes:
- BaseString::snprintf(m_text, m_text_len,
- "%sArbitration check won - all node groups and more than 1/2 nodes left",
- theNodeId);
- break;
- case ArbitCode::WinGroups:
- BaseString::snprintf(m_text, m_text_len,
- "%sArbitration check won - node group majority",
- theNodeId);
- break;
- case ArbitCode::LoseGroups:
- BaseString::snprintf(m_text, m_text_len,
- "%sArbitration check lost - missing node group",
- theNodeId);
- break;
- case ArbitCode::Partitioning:
- BaseString::snprintf(m_text, m_text_len,
- "%sNetwork partitioning - arbitration required",
- theNodeId);
- break;
- case ArbitCode::WinChoose:
- BaseString::snprintf(m_text, m_text_len,
- "%sArbitration won - positive reply from node %u",
- theNodeId, sd->node);
- break;
- case ArbitCode::LoseChoose:
- BaseString::snprintf(m_text, m_text_len,
- "%sArbitration lost - negative reply from node %u",
- theNodeId, sd->node);
- break;
- case ArbitCode::LoseNorun:
- BaseString::snprintf(m_text, m_text_len,
- "%sNetwork partitioning - no arbitrator available",
- theNodeId);
- break;
- case ArbitCode::LoseNocfg:
- BaseString::snprintf(m_text, m_text_len,
- "%sNetwork partitioning - no arbitrator configured",
- theNodeId);
- break;
- default:
- ArbitCode::getErrText(code, errText, sizeof(errText));
- BaseString::snprintf(m_text, m_text_len,
- "%sArbitration failure - %s [state=%u]",
- theNodeId, errText, state);
- break;
- }
- }
- break;
- case EventReport::GlobalCheckpointStarted:
- //-----------------------------------------------------------------------
- // This event reports that a global checkpoint has been started and this
- // node is the master of this global checkpoint.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sGlobal checkpoint %u started",
- theNodeId,
- theData[1]);
- break;
- case EventReport::GlobalCheckpointCompleted:
- //-----------------------------------------------------------------------
- // This event reports that a global checkpoint has been completed on this
- // node and the node is the master of this global checkpoint.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text, m_text_len,
- "%sGlobal checkpoint %u completed",
- theNodeId,
- theData[1]);
- break;
- case EventReport::LocalCheckpointStarted:
- //-----------------------------------------------------------------------
- // This event reports that a local checkpoint has been started and this
- // node is the master of this local checkpoint.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sLocal checkpoint %u started. "
- "Keep GCI = %u oldest restorable GCI = %u",
- theNodeId,
- theData[1],
- theData[2],
- theData[3]);
- break;
- case EventReport::LocalCheckpointCompleted:
- //-----------------------------------------------------------------------
- // This event reports that a local checkpoint has been completed on this
- // node and the node is the master of this local checkpoint.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sLocal checkpoint %u completed",
- theNodeId,
- theData[1]);
- break;
- case EventReport::TableCreated:
- //-----------------------------------------------------------------------
- // This event reports that a table has been created.
- //-----------------------------------------------------------------------
+ }
+}
+void getTextGlobalCheckpointStarted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // This event reports that a global checkpoint has been started and this
+ // node is the master of this global checkpoint.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Global checkpoint %u started",
+ theData[1]);
+}
+void getTextGlobalCheckpointCompleted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // This event reports that a global checkpoint has been completed on this
+ // node and the node is the master of this global checkpoint.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Global checkpoint %u completed",
+ theData[1]);
+}
+void getTextLocalCheckpointStarted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // This event reports that a local checkpoint has been started and this
+ // node is the master of this local checkpoint.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Local checkpoint %u started. "
+ "Keep GCI = %u oldest restorable GCI = %u",
+ theData[1],
+ theData[2],
+ theData[3]);
+}
+void getTextLocalCheckpointCompleted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // This event reports that a local checkpoint has been completed on this
+ // node and the node is the master of this local checkpoint.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Local checkpoint %u completed",
+ theData[1]);
+}
+void getTextTableCreated(QQQQ) {
+ //-----------------------------------------------------------------------
+ // This event reports that a table has been created.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Table with ID = %u created",
+ theData[1]);
+}
+/* STRANGE */
+void getTextLCPStoppedInCalcKeepGci(QQQQ) {
+ if (theData[1] == 0)
BaseString::snprintf(m_text, m_text_len,
- "%sTable with ID = %u created",
- theNodeId,
- theData[1]);
- break;
- case EventReport::LCPStoppedInCalcKeepGci:
- if (theData[1] == 0)
- BaseString::snprintf(m_text, m_text_len,
- "%sLocal Checkpoint stopped in CALCULATED_KEEP_GCI",
- theNodeId);
- break;
- case EventReport::NR_CopyDict:
- //-----------------------------------------------------------------------
- // REPORT Node Restart completed copy of dictionary information.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode restart completed copy of dictionary information",
- theNodeId);
- break;
- case EventReport::NR_CopyDistr:
- //-----------------------------------------------------------------------
- // REPORT Node Restart completed copy of distribution information.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode restart completed copy of distribution information",
- theNodeId);
- break;
- case EventReport::NR_CopyFragsStarted:
- //-----------------------------------------------------------------------
- // REPORT Node Restart is starting to copy the fragments.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode restart starting to copy the fragments "
- "to Node %u",
- theNodeId,
- theData[1]);
- break;
- case EventReport::NR_CopyFragDone:
- //-----------------------------------------------------------------------
- // REPORT Node Restart copied a fragment.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sTable ID = %u, fragment ID = %u have been copied "
- "to Node %u",
- theNodeId,
- theData[2],
- theData[3],
- theData[1]);
- break;
- case EventReport::NR_CopyFragsCompleted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode restart completed copying the fragments "
- "to Node %u",
- theNodeId,
- theData[1]);
- break;
- case EventReport::LCPFragmentCompleted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sTable ID = %u, fragment ID = %u has completed LCP "
- "on Node %u maxGciStarted: %d maxGciCompleted: %d",
- theNodeId,
- theData[2],
- theData[3],
- theData[1],
- theData[4],
- theData[5]);
- break;
- case EventReport::TransReportCounters:
- // -------------------------------------------------------------------
- // Report information about transaction activity once per 10 seconds.
- // -------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sTrans. Count = %u, Commit Count = %u, "
- "Read Count = %u, Simple Read Count = %u,\n"
- "Write Count = %u, AttrInfo Count = %u, "
- "Concurrent Operations = %u, Abort Count = %u\n"
- " Scans: %u Range scans: %u",
- theNodeId,
- theData[1],
- theData[2],
- theData[3],
- theData[4],
- theData[5],
- theData[6],
- theData[7],
- theData[8],
- theData[9],
- theData[10]);
- break;
- case EventReport::OperationReportCounters:
- BaseString::snprintf(m_text, m_text_len,
- "%sOperations=%u",
- theNodeId,
- theData[1]);
- break;
- case EventReport::UndoLogBlocked:
- //-----------------------------------------------------------------------
- // REPORT Undo Logging blocked due to buffer near to overflow.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sACC Blocked %u and TUP Blocked %u times last second",
- theNodeId,
- theData[1],
- theData[2]);
- break;
- case EventReport::TransporterError:
- case EventReport::TransporterWarning:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sTransporter to node %d reported error 0x%x",
- theNodeId,
- theData[1],
- theData[2]);
- break;
- case EventReport::MissedHeartbeat:
- //-----------------------------------------------------------------------
- // REPORT Undo Logging blocked due to buffer near to overflow.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode %d missed heartbeat %d",
- theNodeId,
- theData[1],
- theData[2]);
- break;
- case EventReport::DeadDueToHeartbeat:
- //-----------------------------------------------------------------------
- // REPORT Undo Logging blocked due to buffer near to overflow.
- //-----------------------------------------------------------------------
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode %d declared dead due to missed heartbeat",
- theNodeId,
- theData[1]);
- break;
- case EventReport::JobStatistic:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sMean loop Counter in doJob last 8192 times = %u",
- theNodeId,
- theData[1]);
- break;
- case EventReport::SendBytesStatistic:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sMean send size to Node = %d last 4096 sends = %u bytes",
- theNodeId,
- theData[1],
- theData[2]);
- break;
- case EventReport::ReceiveBytesStatistic:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sMean receive size to Node = %d last 4096 sends = %u bytes",
- theNodeId,
- theData[1],
- theData[2]);
- break;
- case EventReport::SentHeartbeat:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode Sent Heartbeat to node = %d",
- theNodeId,
- theData[1]);
- break;
- case EventReport::CreateLogBytes:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sLog part %u, log file %u, MB %u",
- theNodeId,
- theData[1],
- theData[2],
- theData[3]);
- break;
- case EventReport::StartLog:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sLog part %u, start MB %u, stop MB %u, last GCI, log exec %u",
- theNodeId,
- theData[1],
- theData[2],
- theData[3],
- theData[4]);
- break;
- case EventReport::StartREDOLog:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sNode: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]",
- theNodeId,
- theData[1],
- theData[2],
- theData[3],
- theData[4]);
- break;
- case EventReport::UNDORecordsExecuted:{
- const char* line = "";
- if (theData[1] == DBTUP){
- line = "DBTUP";
- }else if (theData[1] == DBACC){
- line = "DBACC";
- }
-
- BaseString::snprintf(m_text,
- m_text_len,
- "%s UNDO %s %d [%d %d %d %d %d %d %d %d %d]",
- theNodeId,
- line,
- theData[2],
- theData[3],
- theData[4],
- theData[5],
- theData[6],
- theData[7],
- theData[8],
- theData[9],
- theData[10],
- theData[11]);
+ "Local Checkpoint stopped in CALCULATED_KEEP_GCI");
+}
+void getTextNR_CopyDict(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Node Restart completed copy of dictionary information.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Node restart completed copy of dictionary information");
+}
+void getTextNR_CopyDistr(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Node Restart completed copy of distribution information.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Node restart completed copy of distribution information");
+}
+void getTextNR_CopyFragsStarted(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Node Restart is starting to copy the fragments.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Node restart starting to copy the fragments "
+ "to Node %u",
+ theData[1]);
+}
+void getTextNR_CopyFragDone(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Node Restart copied a fragment.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Table ID = %u, fragment ID = %u have been copied "
+ "to Node %u",
+ theData[2],
+ theData[3],
+ theData[1]);
+}
+void getTextNR_CopyFragsCompleted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node restart completed copying the fragments "
+ "to Node %u",
+ theData[1]);
+}
+void getTextLCPFragmentCompleted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Table ID = %u, fragment ID = %u has completed LCP "
+ "on Node %u maxGciStarted: %d maxGciCompleted: %d",
+ theData[2],
+ theData[3],
+ theData[1],
+ theData[4],
+ theData[5]);
+}
+void getTextTransReportCounters(QQQQ) {
+ // -------------------------------------------------------------------
+ // Report information about transaction activity once per 10 seconds.
+ // -------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Trans. Count = %u, Commit Count = %u, "
+ "Read Count = %u, Simple Read Count = %u,\n"
+ "Write Count = %u, AttrInfo Count = %u, "
+ "Concurrent Operations = %u, Abort Count = %u\n"
+ " Scans: %u Range scans: %u",
+ theData[1],
+ theData[2],
+ theData[3],
+ theData[4],
+ theData[5],
+ theData[6],
+ theData[7],
+ theData[8],
+ theData[9],
+ theData[10]);
+}
+void getTextOperationReportCounters(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Operations=%u",
+ theData[1]);
+}
+void getTextUndoLogBlocked(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Undo Logging blocked due to buffer near to overflow.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "ACC Blocked %u and TUP Blocked %u times last second",
+ theData[1],
+ theData[2]);
+}
+void getTextTransporterError(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Transporter to node %d reported error 0x%x",
+ theData[1],
+ theData[2]);
+}
+void getTextTransporterWarning(QQQQ) {
+ getTextTransporterError(m_text, m_text_len, theData);
+}
+void getTextMissedHeartbeat(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Undo Logging blocked due to buffer near to overflow.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Node %d missed heartbeat %d",
+ theData[1],
+ theData[2]);
+}
+void getTextDeadDueToHeartbeat(QQQQ) {
+ //-----------------------------------------------------------------------
+ // REPORT Undo Logging blocked due to buffer near to overflow.
+ //-----------------------------------------------------------------------
+ BaseString::snprintf(m_text, m_text_len,
+ "Node %d declared dead due to missed heartbeat",
+ theData[1]);
+}
+void getTextJobStatistic(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Mean loop Counter in doJob last 8192 times = %u",
+ theData[1]);
+}
+void getTextSendBytesStatistic(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Mean send size to Node = %d last 4096 sends = %u bytes",
+ theData[1],
+ theData[2]);
+}
+void getTextReceiveBytesStatistic(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Mean receive size to Node = %d last 4096 sends = %u bytes",
+ theData[1],
+ theData[2]);
+}
+void getTextSentHeartbeat(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node Sent Heartbeat to node = %d",
+ theData[1]);
+}
+void getTextCreateLogBytes(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Log part %u, log file %u, MB %u",
+ theData[1],
+ theData[2],
+ theData[3]);
+}
+void getTextStartLog(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Log part %u, start MB %u, stop MB %u, last GCI, log exec %u",
+ theData[1],
+ theData[2],
+ theData[3],
+ theData[4]);
+}
+void getTextStartREDOLog(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Node: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]",
+ theData[1],
+ theData[2],
+ theData[3],
+ theData[4]);
+}
+void getTextUNDORecordsExecuted(QQQQ) {
+ const char* line = "";
+ if (theData[1] == DBTUP){
+ line = "DBTUP";
+ }else if (theData[1] == DBACC){
+ line = "DBACC";
}
- break;
- case EventReport::InfoEvent:
- BaseString::snprintf(m_text,
- m_text_len,
- "%s%s",
- theNodeId,
- (char *)&theData[1]);
- break;
- case EventReport::WarningEvent:
- BaseString::snprintf(m_text,
- m_text_len,
- "%s%s",
- theNodeId,
- (char *)&theData[1]);
- break;
- case EventReport::GCP_TakeoverStarted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sGCP Take over started", theNodeId);
- break;
- case EventReport::GCP_TakeoverCompleted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sGCP Take over completed", theNodeId);
- break;
- case EventReport::LCP_TakeoverStarted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sLCP Take over started", theNodeId);
- break;
- case EventReport::LCP_TakeoverCompleted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sLCP Take over completed (state = %d)",
- theNodeId, theData[1]);
- break;
- case EventReport::MemoryUsage:{
- const int gth = theData[1];
- const int size = theData[2];
- const int used = theData[3];
- const int total = theData[4];
- const int block = theData[5];
- const int percent = (used*100)/total;
+ BaseString::snprintf(m_text, m_text_len,
+ " UNDO %s %d [%d %d %d %d %d %d %d %d %d]",
+ line,
+ theData[2],
+ theData[3],
+ theData[4],
+ theData[5],
+ theData[6],
+ theData[7],
+ theData[8],
+ theData[9],
+ theData[10],
+ theData[11]);
+}
+void getTextInfoEvent(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len, (char *)&theData[1]);
+}
+void getTextWarningEvent(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len, (char *)&theData[1]);
+}
+void getTextGCP_TakeoverStarted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len, "GCP Take over started");
+}
+void getTextGCP_TakeoverCompleted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len, "GCP Take over completed");
+}
+void getTextLCP_TakeoverStarted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len, "LCP Take over started");
+}
+void getTextLCP_TakeoverCompleted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "LCP Take over completed (state = %d)",
+ theData[1]);
+}
+void getTextMemoryUsage(QQQQ) {
+ const int gth = theData[1];
+ const int size = theData[2];
+ const int used = theData[3];
+ const int total = theData[4];
+ const int block = theData[5];
+ const int percent = (used*100)/total;
+
+ BaseString::snprintf(m_text, m_text_len,
+ "%s usage %s %d%s(%d %dK pages of total %d)",
+ (block==DBACC ? "Index" : (block == DBTUP ?"Data":"<unknown>")),
+ (gth == 0 ? "is" : (gth > 0 ? "increased to" : "decreased to")),
+ percent, "%",
+ used, size/1024, total
+ );
+}
+
+void getTextBackupStarted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Backup %d started from node %d",
+ theData[2], refToNode(theData[1]));
+}
+void getTextBackupFailedToStart(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Backup request from %d failed to start. Error: %d",
+ refToNode(theData[1]), theData[2]);
+}
+void getTextBackupCompleted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Backup %u started from node %u completed\n"
+ " StartGCP: %u StopGCP: %u\n"
+ " #Records: %u #LogRecords: %u\n"
+ " Data: %u bytes Log: %u bytes",
+ theData[2], refToNode(theData[1]),
+ theData[3], theData[4], theData[6], theData[8],
+ theData[5], theData[7]);
+}
+void getTextBackupAborted(QQQQ) {
+ BaseString::snprintf(m_text, m_text_len,
+ "Backup %d started from %d has been aborted. Error: %d",
+ theData[2],
+ refToNode(theData[1]),
+ theData[3]);
+}
+
+void getTextSingleUser(QQQQ) {
+ switch (theData[1])
+ {
+ case 0:
+ BaseString::snprintf(m_text, m_text_len, "Entering single user mode");
+ break;
+ case 1:
BaseString::snprintf(m_text, m_text_len,
- "%s%s usage %s %d%s(%d %dK pages of total %d)",
- theNodeId,
- (block==DBACC ? "Index" : (block == DBTUP ?"Data":"<unknown>")),
- (gth == 0 ? "is" : (gth > 0 ? "increased to" : "decreased to")),
- percent, "%",
- used, size/1024, total
- );
+ "Entered single user mode "
+ "Node %d has exclusive access", theData[2]);
break;
- }
- case EventReport::SingleUser :
- {
- switch (theData[1])
- {
- case 0:
- BaseString::snprintf(m_text, m_text_len,
- "%sEntering single user mode", theNodeId);
- break;
- case 1:
- BaseString::snprintf(m_text, m_text_len,
- "%sEntered single user mode %d", theNodeId, theData[2]);
- break;
- case 2:
- BaseString::snprintf(m_text, m_text_len,
- "%sExiting single user mode", theNodeId);
- break;
- default:
- BaseString::snprintf(m_text, m_text_len,
- "%sUnknown single user report %d", theNodeId, theData[1]);
- break;
- }
+ case 2:
+ BaseString::snprintf(m_text, m_text_len,"Exiting single user mode");
break;
- }
- case EventReport::BackupStarted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sBackup %d started from node %d",
- theNodeId, theData[2], refToNode(theData[1]));
- break;
- case EventReport::BackupFailedToStart:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sBackup request from %d failed to start. Error: %d",
- theNodeId, refToNode(theData[1]), theData[2]);
- break;
- case EventReport::BackupCompleted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sBackup %u started from node %u completed\n"
- " StartGCP: %u StopGCP: %u\n"
- " #Records: %u #LogRecords: %u\n"
- " Data: %u bytes Log: %u bytes",
- theNodeId, theData[2], refToNode(theData[1]),
- theData[3], theData[4], theData[6], theData[8],
- theData[5], theData[7]);
- break;
- case EventReport::BackupAborted:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sBackup %d started from %d has been aborted. Error: %d",
- theNodeId,
- theData[2],
- refToNode(theData[1]),
- theData[3]);
- break;
default:
- BaseString::snprintf(m_text,
- m_text_len,
- "%sUnknown event: %d",
- theNodeId,
- theData[0]);
-
+ BaseString::snprintf(m_text, m_text_len,
+ "Unknown single user report %d", theData[1]);
+ break;
}
- return m_text;
}
+#if 0
+BaseString::snprintf(m_text,
+ m_text_len,
+ "Unknown event: %d",
+ theData[0]);
+#endif
+
+/**
+ * This matrix defines which event should be printed when
+ *
+ * threshold - is in range [0-15]
+ * severity - DEBUG to ALERT (Type of log message)
+ */
+
+#define ROW(a,b,c,d) \
+{ NDB_LE_ ## a, b, c, d, getText ## a}
+
+const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
+ // CONNECTION
+ ROW(Connected, LogLevel::llConnection, 8, Logger::LL_INFO ),
+ ROW(Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT ),
+ ROW(CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO ),
+ ROW(CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO ),
+ ROW(ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO ),
+ // CHECKPOINT
+ ROW(GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO ),
+ ROW(GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO ),
+ ROW(LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO ),
+ ROW(LocalCheckpointCompleted,LogLevel::llCheckpoint, 8, Logger::LL_INFO ),
+ ROW(LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT ),
+ ROW(LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO ),
+ ROW(UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO ),
+
+ // STARTUP
+ ROW(NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
+ ROW(NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
+ ROW(STTORRYRecieved, LogLevel::llStartUp, 15, Logger::LL_INFO ),
+ ROW(StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO ),
+ ROW(CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO ),
+ ROW(CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO ),
+ ROW(FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO ),
+ ROW(NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
+ ROW(NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
+ ROW(StartREDOLog, LogLevel::llStartUp, 10, Logger::LL_INFO ),
+ ROW(StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO ),
+ ROW(UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO ),
+
+ // NODERESTART
+ ROW(NR_CopyDict, LogLevel::llNodeRestart, 8, Logger::LL_INFO ),
+ ROW(NR_CopyDistr, LogLevel::llNodeRestart, 8, Logger::LL_INFO ),
+ ROW(NR_CopyFragsStarted, LogLevel::llNodeRestart, 8, Logger::LL_INFO ),
+ ROW(NR_CopyFragDone, LogLevel::llNodeRestart,10, Logger::LL_INFO ),
+ ROW(NR_CopyFragsCompleted, LogLevel::llNodeRestart, 8, Logger::LL_INFO ),
+
+ ROW(NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT),
+ ROW(NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT),
+ ROW(ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO ),
+ ROW(ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT),
+ ROW(GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
+ ROW(GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
+ ROW(LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
+ ROW(LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
+
+ // STATISTIC
+ ROW(TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ),
+ ROW(OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ),
+ ROW(TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO ),
+ ROW(JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ),
+ ROW(SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ),
+ ROW(ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ),
+ ROW(MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO ),
+
+ // ERROR
+ ROW(TransporterError, LogLevel::llError, 2, Logger::LL_ERROR ),
+ ROW(TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING ),
+ ROW(MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING ),
+ ROW(DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT ),
+ ROW(WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING ),
+ // INFO
+ ROW(SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO ),
+ ROW(CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO ),
+ ROW(InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO ),
+
+ //Single User
+ ROW(SingleUser, LogLevel::llInfo, 7, Logger::LL_INFO ),
+
+ // Backup
+ ROW(BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO ),
+ ROW(BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO ),
+ ROW(BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT),
+ ROW(BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT )
+};
+
+const Uint32 EventLoggerBase::matrixSize=
+sizeof(EventLoggerBase::matrix)/sizeof(EventRepLogLevelMatrix);
+
EventLogger::EventLogger() : m_filterLevel(15)
{
setCategory("EventLogger");
@@ -903,19 +788,37 @@ int
EventLoggerBase::event_lookup(int eventType,
LogLevel::EventCategory &cat,
Uint32 &threshold,
- Logger::LoggerLevel &severity)
+ Logger::LoggerLevel &severity,
+ EventTextFunction &textF)
{
for(unsigned i = 0; i<EventLoggerBase::matrixSize; i++){
if(EventLoggerBase::matrix[i].eventType == eventType){
cat = EventLoggerBase::matrix[i].eventCategory;
threshold = EventLoggerBase::matrix[i].threshold;
severity = EventLoggerBase::matrix[i].severity;
+ textF= EventLoggerBase::matrix[i].textF;
return 0;
}
}
return 1;
}
+const char*
+EventLogger::getText(char * dst, size_t dst_len,
+ EventTextFunction textF,
+ const Uint32* theData, NodeId nodeId )
+{
+ int pos= 0;
+ if (nodeId != 0)
+ {
+ BaseString::snprintf(dst, dst_len, "Node %u: ", nodeId);
+ pos= strlen(dst);
+ }
+ if (dst_len-pos > 0)
+ textF(dst+pos,dst_len-pos,theData);
+ return dst;
+}
+
void
EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId,
const LogLevel* ll)
@@ -923,52 +826,43 @@ EventLogger::log(int eventType, const Uint32* theData, NodeId nodeId,
Uint32 threshold = 0;
Logger::LoggerLevel severity = Logger::LL_WARNING;
LogLevel::EventCategory cat= LogLevel::llInvalid;
+ EventTextFunction textF;
DBUG_ENTER("EventLogger::log");
DBUG_PRINT("enter",("eventType=%d, nodeid=%d", eventType, nodeId));
- if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity))
+ if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF))
DBUG_VOID_RETURN;
Uint32 set = ll?ll->getLogLevel(cat) : m_logLevel.getLogLevel(cat);
DBUG_PRINT("info",("threshold=%d, set=%d", threshold, set));
if (ll)
DBUG_PRINT("info",("m_logLevel.getLogLevel=%d", m_logLevel.getLogLevel(cat)));
+
if (threshold <= set){
+ getText(m_text,sizeof(m_text),textF,theData,nodeId);
+
switch (severity){
case Logger::LL_ALERT:
- alert(EventLogger::getText(m_text, sizeof(m_text),
- eventType, theData, nodeId));
+ alert(m_text);
break;
-
case Logger::LL_CRITICAL:
- critical(EventLogger::getText(m_text, sizeof(m_text),
- eventType, theData, nodeId));
+ critical(m_text);
break;
-
case Logger::LL_WARNING:
- warning(EventLogger::getText(m_text, sizeof(m_text),
- eventType, theData, nodeId));
+ warning(m_text);
break;
-
case Logger::LL_ERROR:
- error(EventLogger::getText(m_text, sizeof(m_text),
- eventType, theData, nodeId));
+ error(m_text);
break;
-
case Logger::LL_INFO:
- info(EventLogger::getText(m_text, sizeof(m_text),
- eventType, theData, nodeId));
+ info(m_text);
break;
-
case Logger::LL_DEBUG:
- debug(EventLogger::getText(m_text, sizeof(m_text),
- eventType, theData, nodeId));
+ debug(m_text);
break;
-
default:
- info(EventLogger::getText(m_text, sizeof(m_text),
- eventType, theData, nodeId));
+ info(m_text);
break;
}
} // if (..
diff --git a/ndb/src/common/debugger/SignalLoggerManager.cpp b/ndb/src/common/debugger/SignalLoggerManager.cpp
index d642ed09a68..d8710d2058f 100644
--- a/ndb/src/common/debugger/SignalLoggerManager.cpp
+++ b/ndb/src/common/debugger/SignalLoggerManager.cpp
@@ -383,7 +383,7 @@ SignalLoggerManager::sendSignalWithDelay(Uint32 delayInMilliSeconds,
* Generic messages in the signal log
*/
void
-SignalLoggerManager::log(BlockNumber bno, const char * msg)
+SignalLoggerManager::log(BlockNumber bno, const char * msg, ...)
{
// Normalise blocknumber for use in logModes array
const BlockNumber bno2 = bno - MIN_BLOCK_NO;
@@ -391,7 +391,12 @@ SignalLoggerManager::log(BlockNumber bno, const char * msg)
if(outputStream != 0 &&
logModes[bno2] != LogOff){
- fprintf(outputStream, "%s: %s\n", getBlockName(bno, "API"), msg);
+ va_list ap;
+ va_start(ap, msg);
+ fprintf(outputStream, "%s: ", getBlockName(bno, "API"));
+ vfprintf(outputStream, msg, ap);
+ fprintf(outputStream, "\n");
+ va_end(ap);
}
}
diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
index c6165532ddb..43c129347c0 100644
--- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
+++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp
@@ -23,7 +23,6 @@ SimpleProperties::SP2StructMapping
DictTabInfo::TableMapping[] = {
DTIMAPS(Table, TableName, TableName, 0, MAX_TAB_NAME_SIZE),
DTIMAP(Table, TableId, TableId),
- DTIMAP(Table, SecondTableId, SecondTableId),
DTIMAPS(Table, PrimaryTable, PrimaryTable, 0, MAX_TAB_NAME_SIZE),
DTIMAP(Table, PrimaryTableId, PrimaryTableId),
DTIMAP2(Table, TableLoggedFlag, TableLoggedFlag, 0, 1),
@@ -32,8 +31,6 @@ DictTabInfo::TableMapping[] = {
DTIMAP2(Table, MaxLoadFactor, MaxLoadFactor, 25, 110),
DTIMAP2(Table, FragmentTypeVal, FragmentType, 0, 3),
DTIMAP2(Table, TableStorageVal, TableStorage, 0, 0),
- DTIMAP2(Table, ScanOptimised, ScanOptimised, 0, 0),
- DTIMAP2(Table, FragmentKeyTypeVal, FragmentKeyType, 0, 2),
DTIMAP2(Table, TableTypeVal, TableType, 1, 3),
DTIMAP(Table, NoOfKeyAttr, NoOfKeyAttr),
DTIMAP2(Table, NoOfAttributes, NoOfAttributes, 1, MAX_ATTRIBUTES_IN_TABLE),
@@ -49,6 +46,8 @@ DictTabInfo::TableMapping[] = {
DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE),
DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen),
DTIMAP(Table, FragmentCount, FragmentCount),
+ DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES),
+ DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen),
DTIBREAK(AttributeName)
};
@@ -62,16 +61,12 @@ SimpleProperties::SP2StructMapping
DictTabInfo::AttributeMapping[] = {
DTIMAPS(Attribute, AttributeName, AttributeName, 0, MAX_ATTR_NAME_SIZE),
DTIMAP(Attribute, AttributeId, AttributeId),
- DTIMAP2(Attribute, AttributeType, AttributeType, 0, 3),
+ DTIMAP(Attribute, AttributeType, AttributeType),
DTIMAP2(Attribute, AttributeSize, AttributeSize, 3, 7),
DTIMAP2(Attribute, AttributeArraySize, AttributeArraySize, 0, 65535),
DTIMAP2(Attribute, AttributeKeyFlag, AttributeKeyFlag, 0, 1),
- DTIMAP2(Attribute, AttributeStorage, AttributeStorage, 0, 0),
DTIMAP2(Attribute, AttributeNullableFlag, AttributeNullableFlag, 0, 1),
- DTIMAP2(Attribute, AttributeDGroup, AttributeDGroup, 0, 1),
DTIMAP2(Attribute, AttributeDKey, AttributeDKey, 0, 1),
- DTIMAP2(Attribute, AttributeStoredInd, AttributeStoredInd, 0, 1),
- DTIMAP2(Attribute, AttributeGroup, AttributeGroup, 0, 0),
DTIMAP(Attribute, AttributeExtType, AttributeExtType),
DTIMAP(Attribute, AttributeExtPrecision, AttributeExtPrecision),
DTIMAP(Attribute, AttributeExtScale, AttributeExtScale),
@@ -104,7 +99,6 @@ void
DictTabInfo::Table::init(){
memset(TableName, 0, sizeof(TableName));//TableName[0] = 0;
TableId = ~0;
- SecondTableId = ~0;
memset(PrimaryTable, 0, sizeof(PrimaryTable));//PrimaryTable[0] = 0; // Only used when "index"
PrimaryTableId = RNIL;
TableLoggedFlag = 1;
@@ -118,8 +112,6 @@ DictTabInfo::Table::init(){
KeyLength = 0;
FragmentType = DictTabInfo::AllNodesSmallTable;
TableStorage = 0;
- ScanOptimised = 0;
- FragmentKeyType = DictTabInfo::PrimaryKey;
TableType = DictTabInfo::UndefTableType;
TableVersion = 0;
IndexState = ~0;
@@ -130,23 +122,21 @@ DictTabInfo::Table::init(){
FrmLen = 0;
memset(FrmData, 0, sizeof(FrmData));
FragmentCount = 0;
+ FragmentDataLen = 0;
+ memset(FragmentData, 0, sizeof(FragmentData));
}
void
DictTabInfo::Attribute::init(){
memset(AttributeName, 0, sizeof(AttributeName));//AttributeName[0] = 0;
AttributeId = 0;
- AttributeType = DictTabInfo::UnSignedType;
+ AttributeType = ~0, // deprecated
AttributeSize = DictTabInfo::a32Bit;
AttributeArraySize = 1;
AttributeKeyFlag = 0;
- AttributeStorage = 1;
AttributeNullableFlag = 0;
- AttributeDGroup = 0;
AttributeDKey = 0;
- AttributeStoredInd = 1;
- AttributeGroup = 0;
- AttributeExtType = 0,
+ AttributeExtType = DictTabInfo::ExtUnsigned,
AttributeExtPrecision = 0,
AttributeExtScale = 0,
AttributeExtLength = 0,
diff --git a/ndb/src/common/debugger/signaldata/ScanTab.cpp b/ndb/src/common/debugger/signaldata/ScanTab.cpp
index 0755ee0a856..d78beb4740a 100644
--- a/ndb/src/common/debugger/signaldata/ScanTab.cpp
+++ b/ndb/src/common/debugger/signaldata/ScanTab.cpp
@@ -30,20 +30,26 @@ printSCANTABREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiv
fprintf(output, " apiConnectPtr: H\'%.8x",
sig->apiConnectPtr);
fprintf(output, " requestInfo: H\'%.8x:\n", requestInfo);
- fprintf(output, " Parallellism: %u, Batch: %u LockMode: %u, Keyinfo: %u Holdlock: %u, RangeScan: %u ReadCommitted: %u\n",
+ fprintf(output, " Parallellism: %u Batch: %u LockMode: %u Keyinfo: %u Holdlock: %u RangeScan: %u Descending: %u TupScan: %u\n ReadCommitted: %u DistributionKeyFlag: %u",
sig->getParallelism(requestInfo),
sig->getScanBatch(requestInfo),
sig->getLockMode(requestInfo),
sig->getKeyinfoFlag(requestInfo),
sig->getHoldLockFlag(requestInfo),
sig->getRangeScanFlag(requestInfo),
- sig->getReadCommittedFlag(requestInfo));
+ sig->getDescendingFlag(requestInfo),
+ sig->getTupScanFlag(requestInfo),
+ sig->getReadCommittedFlag(requestInfo),
+ sig->getDistributionKeyFlag(requestInfo));
+
+ if(sig->getDistributionKeyFlag(requestInfo))
+ fprintf(output, " DKey: %x", sig->distributionKey);
Uint32 keyLen = (sig->attrLenKeyLen >> 16);
Uint32 attrLen = (sig->attrLenKeyLen & 0xFFFF);
fprintf(output, " attrLen: %d, keyLen: %d tableId: %d, tableSchemaVer: %d\n",
attrLen, keyLen, sig->tableId, sig->tableSchemaVersion);
-
+
fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x) storedProcId: H\'%.8x\n",
sig->transId1, sig->transId2, sig->storedProcId);
fprintf(output, " batch_byte_size: %d, first_batch_size: %d\n",
diff --git a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
index bd1dff074f9..34cae9f618f 100644
--- a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp
@@ -27,6 +27,7 @@
const NameFunctionPair
SignalDataPrintFunctions[] = {
{ GSN_TCKEYREQ, printTCKEYREQ },
+ { GSN_TCINDXREQ, printTCKEYREQ },
{ GSN_TCKEYCONF, printTCKEYCONF },
{ GSN_TCKEYREF, printTCKEYREF },
{ GSN_LQHKEYREQ, printLQHKEYREQ },
@@ -82,14 +83,10 @@ SignalDataPrintFunctions[] = {
{ GSN_ALTER_INDX_REQ, printALTER_INDX_REQ },
{ GSN_ALTER_INDX_CONF, printALTER_INDX_CONF },
{ GSN_ALTER_INDX_REF, printALTER_INDX_REF },
- { GSN_TCINDXREQ, printTCINDXREQ },
{ GSN_TCINDXCONF, printTCINDXCONF },
{ GSN_TCINDXREF, printTCINDXREF },
{ GSN_INDXKEYINFO, printINDXKEYINFO },
{ GSN_INDXATTRINFO, printINDXATTRINFO },
- //{ GSN_TCINDXNEXTREQ, printTCINDXNEXTREQ },
- //{ GSN_TCINDEXNEXTCONF, printTCINDEXNEXTCONF },
- //{ GSN_TCINDEXNEXREF, printTCINDEXNEXREF },
{ GSN_FSAPPENDREQ, printFSAPPENDREQ },
{ GSN_BACKUP_REQ, printBACKUP_REQ },
{ GSN_BACKUP_DATA, printBACKUP_DATA },
@@ -154,11 +151,17 @@ SignalDataPrintFunctions[] = {
{ GSN_DISCONNECT_REP, printDISCONNECT_REP },
{ GSN_SUB_CREATE_REQ, printSUB_CREATE_REQ },
- //{ GSN_SUB_CREATE_REF, printSUB_CREATE_REF },
+ { GSN_SUB_CREATE_REF, printSUB_CREATE_REF },
{ GSN_SUB_CREATE_CONF, printSUB_CREATE_CONF },
+ { GSN_SUB_REMOVE_REQ, printSUB_REMOVE_REQ },
+ { GSN_SUB_REMOVE_REF, printSUB_REMOVE_REF },
+ { GSN_SUB_REMOVE_CONF, printSUB_REMOVE_CONF },
{ GSN_SUB_START_REQ, printSUB_START_REQ },
{ GSN_SUB_START_REF, printSUB_START_REF },
{ GSN_SUB_START_CONF, printSUB_START_CONF },
+ { GSN_SUB_STOP_REQ, printSUB_STOP_REQ },
+ { GSN_SUB_STOP_REF, printSUB_STOP_REF },
+ { GSN_SUB_STOP_CONF, printSUB_STOP_CONF },
{ GSN_SUB_SYNC_REQ, printSUB_SYNC_REQ },
{ GSN_SUB_SYNC_REF, printSUB_SYNC_REF },
{ GSN_SUB_SYNC_CONF, printSUB_SYNC_CONF },
diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp
index 9228e305677..984d28819c0 100644
--- a/ndb/src/common/debugger/signaldata/SignalNames.cpp
+++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp
@@ -578,6 +578,9 @@ const GsnName SignalNames [] = {
,{ GSN_SUB_CREATE_REQ, "SUB_CREATE_REQ" }
,{ GSN_SUB_CREATE_REF, "SUB_CREATE_REF" }
,{ GSN_SUB_CREATE_CONF, "SUB_CREATE_CONF" }
+ ,{ GSN_SUB_REMOVE_REQ, "SUB_REMOVE_REQ" }
+ ,{ GSN_SUB_REMOVE_REF, "SUB_REMOVE_REF" }
+ ,{ GSN_SUB_REMOVE_CONF, "SUB_REMOVE_CONF" }
,{ GSN_SUB_START_REQ, "SUB_START_REQ" }
,{ GSN_SUB_START_REF, "SUB_START_REF" }
,{ GSN_SUB_START_CONF, "SUB_START_CONF" }
diff --git a/ndb/src/common/debugger/signaldata/SumaImpl.cpp b/ndb/src/common/debugger/signaldata/SumaImpl.cpp
index 558842ed2ba..e50a3040fe3 100644
--- a/ndb/src/common/debugger/signaldata/SumaImpl.cpp
+++ b/ndb/src/common/debugger/signaldata/SumaImpl.cpp
@@ -40,12 +40,55 @@ printSUB_CREATE_CONF(FILE * output, const Uint32 * theData,
}
bool
+printSUB_CREATE_REF(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const SubCreateRef * const sig = (SubCreateRef *)theData;
+ fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
+ fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
+ fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ return false;
+}
+
+bool
+printSUB_REMOVE_REQ(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo)
+{
+ const SubRemoveReq * const sig = (SubRemoveReq *)theData;
+ fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
+ fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
+ return false;
+}
+
+bool
+printSUB_REMOVE_CONF(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo)
+{
+ const SubRemoveConf * const sig = (SubRemoveConf *)theData;
+ fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
+ fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
+ fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ return false;
+}
+
+bool
+printSUB_REMOVE_REF(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo)
+{
+ const SubRemoveRef * const sig = (SubRemoveRef *)theData;
+ fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
+ fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
+ fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " err: %x\n", sig->err);
+ return false;
+}
+
+bool
printSUB_START_REQ(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubStartReq * const sig = (SubStartReq *)theData;
fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
- fprintf(output, " startPart: %x\n", sig->part);
+ fprintf(output, " subscriberData: %x\n", sig->subscriberData);
return false;
}
@@ -73,6 +116,37 @@ printSUB_START_CONF(FILE * output, const Uint32 * theData,
}
bool
+printSUB_STOP_REQ(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const SubStopReq * const sig = (SubStopReq *)theData;
+ fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
+ fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
+ fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ return false;
+}
+
+bool
+printSUB_STOP_REF(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const SubStopRef * const sig = (SubStopRef *)theData;
+ fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
+ fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
+ fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ fprintf(output, " err: %x\n", sig->err);
+ return false;
+}
+
+bool
+printSUB_STOP_CONF(FILE * output, const Uint32 * theData,
+ Uint32 len, Uint16 receiverBlockNo) {
+ const SubStopConf * const sig = (SubStopConf *)theData;
+ fprintf(output, " subscriptionId: %x\n", sig->subscriptionId);
+ fprintf(output, " subscriptionKey: %x\n", sig->subscriptionKey);
+ fprintf(output, " subscriberData: %x\n", sig->subscriberData);
+ return false;
+}
+
+bool
printSUB_SYNC_REQ(FILE * output, const Uint32 * theData,
Uint32 len, Uint16 receiverBlockNo) {
const SubSyncReq * const sig = (SubSyncReq *)theData;
diff --git a/ndb/src/common/debugger/signaldata/TcIndx.cpp b/ndb/src/common/debugger/signaldata/TcIndx.cpp
index 6bfa29eff15..b0578f5b646 100644
--- a/ndb/src/common/debugger/signaldata/TcIndx.cpp
+++ b/ndb/src/common/debugger/signaldata/TcIndx.cpp
@@ -18,91 +18,6 @@
#include <signaldata/TcKeyReq.hpp>
#include <BlockNumbers.h>
-bool
-printTCINDXREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){
-
- const TcIndxReq * const sig = (TcIndxReq *) theData;
-
- UintR requestInfo = sig->requestInfo;
- UintR scanInfo = sig->scanInfo;
-
- fprintf(output, " apiConnectPtr: H\'%.8x, senderData: H\'%.8x\n",
- sig->apiConnectPtr, sig->senderData);
-
- fprintf(output, " Operation: %s, Flags: ",
- sig->getOperationType(requestInfo) == ZREAD ? "Read" :
- sig->getOperationType(requestInfo) == ZREAD_EX ? "Read-Ex" :
- sig->getOperationType(requestInfo) == ZUPDATE ? "Update" :
- sig->getOperationType(requestInfo) == ZINSERT ? "Insert" :
- sig->getOperationType(requestInfo) == ZDELETE ? "Delete" :
- sig->getOperationType(requestInfo) == ZWRITE ? "Write" :
- "Unknown");
-
- {
- if(sig->getDirtyFlag(requestInfo)){
- fprintf(output, "Dirty ");
- }
- if(sig->getStartFlag(requestInfo)){
- fprintf(output, "Start ");
- }
- if (TcKeyReq::getExecuteFlag(sig->requestInfo)) {
- fprintf(output, "Execute ");
- }
- if(sig->getCommitFlag(requestInfo)){
- fprintf(output, "Commit, Type = ");
- UintR TcommitType = sig->getCommitType(requestInfo);
- if (TcommitType == TcIndxReq::CommitIfFailFree) {
- fprintf(output, "FailFree ");
- } else if (TcommitType == TcIndxReq::TryCommit) {
- fprintf(output, "TryCommit ");
- } else if (TcommitType == TcIndxReq::CommitAsMuchAsPossible) {
- fprintf(output, "Always ");
- }//if
- }
- if(sig->getSimpleFlag(requestInfo)){
- fprintf(output, "Simple ");
- }
- if(sig->getInterpretedFlag(requestInfo)){
- fprintf(output, "Interpreted ");
- }
- if(sig->getDistributionGroupFlag(requestInfo)){
- fprintf(output, "DGroup = %d ", sig->distrGroupHashValue);
- }
- if(sig->getDistributionKeyFlag(sig->requestInfo)){
- fprintf(output, "DKey = %d ", sig->distributionKeySize);
- }
- fprintf(output, "\n");
- }
-
- const int indexLen = sig->getIndexLength(requestInfo);
- const int attrInThis = sig->getAIInTcIndxReq(requestInfo);
- fprintf(output,
- " indexLen: %d, attrLen: %d, AI in this: %d, indexId: %d, "
- "indexSchemaVer: %d, API Ver: %d\n",
- indexLen, sig->attrLen, attrInThis,
- sig->indexId, sig->indexSchemaVersion, sig->getAPIVersion(scanInfo));
-
- fprintf(output, " transId(1, 2): (H\'%.8x, H\'%.8x)\n -- Variable Data --\n",
- sig->transId1, sig->transId2);
-
- Uint32 restLen = (len - 8);
- const Uint32 * rest = &sig->scanInfo;
- while(restLen >= 7){
- fprintf(output,
- " H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x H\'%.8x\n",
- rest[0], rest[1], rest[2], rest[3],
- rest[4], rest[5], rest[6]);
- restLen -= 7;
- rest += 7;
- }
- if(restLen > 0){
- for(Uint32 i = 0; i<restLen; i++)
- fprintf(output, " H\'%.8x", rest[i]);
- fprintf(output, "\n");
- }
-
- return true;
-}
bool
printTCINDXCONF(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiverBlockNo){
diff --git a/ndb/src/common/debugger/signaldata/TcKeyReq.cpp b/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
index 7304872ff9c..3918bd5db26 100644
--- a/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
+++ b/ndb/src/common/debugger/signaldata/TcKeyReq.cpp
@@ -68,11 +68,8 @@ printTCKEYREQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 receiver
if(sig->getInterpretedFlag(requestInfo)){
fprintf(output, "Interpreted ");
}
- if(sig->getDistributionGroupFlag(requestInfo)){
- fprintf(output, "DGroup = %d ", sig->distrGroupHashValue);
- }
if(sig->getDistributionKeyFlag(sig->requestInfo)){
- fprintf(output, "DKey = %d ", sig->distributionKeySize);
+ fprintf(output, " d-key");
}
fprintf(output, "\n");
}
diff --git a/ndb/src/common/mgmcommon/ConfigRetriever.cpp b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
index b870bc67aa3..b3d0221fedb 100644
--- a/ndb/src/common/mgmcommon/ConfigRetriever.cpp
+++ b/ndb/src/common/mgmcommon/ConfigRetriever.cpp
@@ -112,6 +112,12 @@ ConfigRetriever::do_connect(int no_retries,
0 : -1;
}
+int
+ConfigRetriever::disconnect()
+{
+ return ndb_mgm_disconnect(m_handle);
+}
+
//****************************************************************************
//****************************************************************************
//****************************************************************************
diff --git a/ndb/src/common/mgmcommon/IPCConfig.cpp b/ndb/src/common/mgmcommon/IPCConfig.cpp
index 8cf5e6e8d45..f935f8ffab4 100644
--- a/ndb/src/common/mgmcommon/IPCConfig.cpp
+++ b/ndb/src/common/mgmcommon/IPCConfig.cpp
@@ -111,178 +111,6 @@ IPCConfig::addRemoteNodeId(NodeId nodeId){
}
/**
- * Returns no of transporters configured
- */
-int
-IPCConfig::configureTransporters(TransporterRegistry * theTransporterRegistry)
-{
- DBUG_ENTER("IPCConfig::configureTransporters");
-
- int noOfTransportersCreated = 0;
-
- Uint32 noOfConnections;
- if(!props->get("NoOfConnections", &noOfConnections)) return -1;
-
- for (Uint32 i = 0; i < noOfConnections; i++){
- const Properties * tmp;
- Uint32 nodeId1, nodeId2;
- const char * host1;
- const char * host2;
-
- if(!props->get("Connection", i, &tmp)) continue;
- if(!tmp->get("NodeId1", &nodeId1)) continue;
- if(!tmp->get("NodeId2", &nodeId2)) continue;
- if(nodeId1 != the_ownId && nodeId2 != the_ownId) continue;
-
- Uint32 sendSignalId;
- Uint32 compression;
- Uint32 checksum;
- if(!tmp->get("SendSignalId", &sendSignalId)) continue;
- if(!tmp->get("Checksum", &checksum)) continue;
-
- const char * type;
- if(!tmp->get("Type", &type)) continue;
-
- if(strcmp("SHM", type) == 0){
- SHM_TransporterConfiguration conf;
- conf.localNodeId = the_ownId;
- conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2);
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
-
- if(!tmp->get("ShmKey", &conf.shmKey)) continue;
- if(!tmp->get("ShmSize", &conf.shmSize)) continue;
-
- if(!theTransporterRegistry->createTransporter(&conf)){
- ndbout << "Failed to create SHM Transporter from: "
- << conf.localNodeId << " to: " << conf.remoteNodeId << endl;
- continue;
- } else {
- noOfTransportersCreated++;
- continue;
- }
-
- } else if(strcmp("SCI", type) == 0){
- SCI_TransporterConfiguration conf;
- conf.localNodeId = the_ownId;
- conf.remoteNodeId = (nodeId1 != the_ownId ? nodeId1 : nodeId2);
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
-
- if(!tmp->get("SendLimit", &conf.sendLimit)) continue;
- if(!tmp->get("SharedBufferSize", &conf.bufferSize)) continue;
-
- if(the_ownId == nodeId1){
- if(!tmp->get("Node1_NoOfAdapters", &conf.nLocalAdapters)) continue;
- if(!tmp->get("Node2_Adapter", 0, &conf.remoteSciNodeId0)) continue;
-
- if(conf.nLocalAdapters > 1){
- if(!tmp->get("Node2_Adapter", 1, &conf.remoteSciNodeId1)) continue;
- }
- } else {
- if(!tmp->get("Node2_NoOfAdapters", &conf.nLocalAdapters)) continue;
- if(!tmp->get("Node1_Adapter", 0, &conf.remoteSciNodeId0)) continue;
-
- if(conf.nLocalAdapters > 1){
- if(!tmp->get("Node1_Adapter", 1, &conf.remoteSciNodeId1)) continue;
- }
- }
-
- if(!theTransporterRegistry->createTransporter(&conf)){
- ndbout << "Failed to create SCI Transporter from: "
- << conf.localNodeId << " to: " << conf.remoteNodeId << endl;
- continue;
- } else {
- noOfTransportersCreated++;
- continue;
- }
- }
-
- if(!tmp->get("HostName1", &host1)) continue;
- if(!tmp->get("HostName2", &host2)) continue;
-
- Uint32 ownNodeId;
- Uint32 remoteNodeId;
- const char * ownHostName;
- const char * remoteHostName;
-
- if(nodeId1 == the_ownId){
- ownNodeId = nodeId1;
- ownHostName = host1;
- remoteNodeId = nodeId2;
- remoteHostName = host2;
- } else if(nodeId2 == the_ownId){
- ownNodeId = nodeId2;
- ownHostName = host2;
- remoteNodeId = nodeId1;
- remoteHostName = host1;
- } else {
- continue;
- }
-
- if(strcmp("TCP", type) == 0){
- TCP_TransporterConfiguration conf;
-
- if(!tmp->get("PortNumber", &conf.port)) continue;
- if(!tmp->get("SendBufferSize", &conf.sendBufferSize)) continue;
- if(!tmp->get("MaxReceiveSize", &conf.maxReceiveSize)) continue;
-
- const char * proxy;
- if (tmp->get("Proxy", &proxy)) {
- if (strlen(proxy) > 0 && nodeId2 == the_ownId) {
- // TODO handle host:port
- conf.port = atoi(proxy);
- }
- }
- conf.sendBufferSize *= MAX_MESSAGE_SIZE;
- conf.maxReceiveSize *= MAX_MESSAGE_SIZE;
-
- conf.remoteHostName = remoteHostName;
- conf.localHostName = ownHostName;
- conf.remoteNodeId = remoteNodeId;
- conf.localNodeId = ownNodeId;
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
-
- if(!theTransporterRegistry->createTransporter(&conf)){
- ndbout << "Failed to create TCP Transporter from: "
- << ownNodeId << " to: " << remoteNodeId << endl;
- } else {
- noOfTransportersCreated++;
- }
-
- } else if(strcmp("OSE", type) == 0){
-
- OSE_TransporterConfiguration conf;
-
- if(!tmp->get("PrioASignalSize", &conf.prioASignalSize))
- continue;
- if(!tmp->get("PrioBSignalSize", &conf.prioBSignalSize))
- continue;
- if(!tmp->get("ReceiveArraySize", &conf.receiveBufferSize))
- continue;
-
- conf.remoteHostName = remoteHostName;
- conf.localHostName = ownHostName;
- conf.remoteNodeId = remoteNodeId;
- conf.localNodeId = ownNodeId;
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
-
- if(!theTransporterRegistry->createTransporter(&conf)){
- ndbout << "Failed to create OSE Transporter from: "
- << ownNodeId << " to: " << remoteNodeId << endl;
- } else {
- noOfTransportersCreated++;
- }
- } else {
- continue;
- }
- }
- DBUG_RETURN(noOfTransportersCreated);
-}
-
-/**
* Supply a nodeId,
* and get next higher node id
* Returns false if none found
@@ -338,8 +166,39 @@ Uint32
IPCConfig::configureTransporters(Uint32 nodeId,
const class ndb_mgm_configuration & config,
class TransporterRegistry & tr){
+ TransporterConfiguration conf;
+
DBUG_ENTER("IPCConfig::configureTransporters");
+ /**
+ * Iterate over all MGM's an construct a connectstring
+ * create mgm_handle and give it to the Transporter Registry
+ */
+ {
+ const char *separator= "";
+ BaseString connect_string;
+ ndb_mgm_configuration_iterator iter(config, CFG_SECTION_NODE);
+ for(iter.first(); iter.valid(); iter.next())
+ {
+ Uint32 type;
+ if(iter.get(CFG_TYPE_OF_SECTION, &type)) continue;
+ if(type != NODE_TYPE_MGM) continue;
+ const char* hostname;
+ Uint32 port;
+ if(iter.get(CFG_NODE_HOST, &hostname)) continue;
+ if( strlen(hostname) == 0 ) continue;
+ if(iter.get(CFG_MGM_PORT, &port)) continue;
+ connect_string.appfmt("%s%s:%u",separator,hostname,port);
+ separator= ",";
+ }
+ NdbMgmHandle h= ndb_mgm_create_handle();
+ if ( h && connect_string.length() > 0 )
+ {
+ ndb_mgm_set_connectstring(h,connect_string.c_str());
+ tr.set_mgm_handle(h);
+ }
+ }
+
Uint32 noOfTransportersCreated= 0;
ndb_mgm_configuration_iterator iter(config, CFG_SECTION_CONNECTION);
@@ -371,32 +230,66 @@ IPCConfig::configureTransporters(Uint32 nodeId,
Uint32 server_port= 0;
if(iter.get(CFG_CONNECTION_SERVER_PORT, &server_port)) break;
- if (nodeId <= nodeId1 && nodeId <= nodeId2) {
- tr.add_transporter_interface(localHostName, server_port);
+
+ /*
+ We check the node type. MGM node becomes server.
+ */
+ Uint32 node1type, node2type;
+ ndb_mgm_configuration_iterator node1iter(config, CFG_SECTION_NODE);
+ ndb_mgm_configuration_iterator node2iter(config, CFG_SECTION_NODE);
+ node1iter.find(CFG_NODE_ID,nodeId1);
+ node2iter.find(CFG_NODE_ID,nodeId2);
+ node1iter.get(CFG_TYPE_OF_SECTION,&node1type);
+ node2iter.get(CFG_TYPE_OF_SECTION,&node2type);
+
+ conf.serverNodeId= (nodeId1 < nodeId2)? nodeId1:nodeId2;
+
+ conf.isMgmConnection= false;
+ if(node2type==NODE_TYPE_MGM)
+ {
+ conf.isMgmConnection= true;
+ conf.serverNodeId= nodeId2;
}
+ else if(node1type==NODE_TYPE_MGM)
+ {
+ conf.isMgmConnection= true;
+ conf.serverNodeId= nodeId1;
+ }
+ else if (nodeId == conf.serverNodeId) {
+ tr.add_transporter_interface(remoteNodeId, localHostName, server_port);
+ }
+
DBUG_PRINT("info", ("Transporter between this node %d and node %d using port %d, signalId %d, checksum %d",
nodeId, remoteNodeId, server_port, sendSignalId, checksum));
+ /*
+ This may be a dynamic port. It depends on when we're getting
+ our configuration. If we've been restarted, we'll be getting
+ a configuration with our old dynamic port in it, hence the number
+ here is negative (and we try the old port number first).
+
+ On a first-run, server_port will be zero (with dynamic ports)
+
+ If we're not using dynamic ports, we don't do anything.
+ */
+
+ conf.localNodeId = nodeId;
+ conf.remoteNodeId = remoteNodeId;
+ conf.checksum = checksum;
+ conf.signalId = sendSignalId;
+ conf.s_port = server_port;
+ conf.localHostName = localHostName;
+ conf.remoteHostName = remoteHostName;
+
switch(type){
- case CONNECTION_TYPE_SHM:{
- SHM_TransporterConfiguration conf;
- conf.localNodeId = nodeId;
- conf.remoteNodeId = remoteNodeId;
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
-
- if(iter.get(CFG_SHM_KEY, &conf.shmKey)) break;
- if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shmSize)) break;
- {
- Uint32 tmp;
- if(iter.get(CFG_SHM_SIGNUM, &tmp)) break;
- conf.signum= tmp;
- }
+ case CONNECTION_TYPE_SHM:
+ if(iter.get(CFG_SHM_KEY, &conf.shm.shmKey)) break;
+ if(iter.get(CFG_SHM_BUFFER_MEM, &conf.shm.shmSize)) break;
- conf.port= server_port;
- conf.localHostName = localHostName;
- conf.remoteHostName = remoteHostName;
+ Uint32 tmp;
+ if(iter.get(CFG_SHM_SIGNUM, &tmp)) break;
+ conf.shm.signum= tmp;
- if(!tr.createTransporter(&conf)){
+ if(!tr.createSHMTransporter(&conf)){
DBUG_PRINT("error", ("Failed to create SHM Transporter from %d to %d",
conf.localNodeId, conf.remoteNodeId));
ndbout << "Failed to create SHM Transporter from: "
@@ -404,112 +297,90 @@ IPCConfig::configureTransporters(Uint32 nodeId,
} else {
noOfTransportersCreated++;
}
- DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, buf size = %d",
- conf.shmKey, conf.shmSize));
+ DBUG_PRINT("info", ("Created SHM Transporter using shmkey %d, "
+ "buf size = %d", conf.shm.shmKey, conf.shm.shmSize));
+
break;
- }
- case CONNECTION_TYPE_SCI:{
- SCI_TransporterConfiguration conf;
- conf.localNodeId = nodeId;
- conf.remoteNodeId = remoteNodeId;
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
- conf.port= server_port;
-
- conf.localHostName = localHostName;
- conf.remoteHostName = remoteHostName;
- if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sendLimit)) break;
- if(iter.get(CFG_SCI_BUFFER_MEM, &conf.bufferSize)) break;
+ case CONNECTION_TYPE_SCI:
+ if(iter.get(CFG_SCI_SEND_LIMIT, &conf.sci.sendLimit)) break;
+ if(iter.get(CFG_SCI_BUFFER_MEM, &conf.sci.bufferSize)) break;
if (nodeId == nodeId1) {
- if(iter.get(CFG_SCI_HOST2_ID_0, &conf.remoteSciNodeId0)) break;
- if(iter.get(CFG_SCI_HOST2_ID_1, &conf.remoteSciNodeId1)) break;
+ if(iter.get(CFG_SCI_HOST2_ID_0, &conf.sci.remoteSciNodeId0)) break;
+ if(iter.get(CFG_SCI_HOST2_ID_1, &conf.sci.remoteSciNodeId1)) break;
} else {
- if(iter.get(CFG_SCI_HOST1_ID_0, &conf.remoteSciNodeId0)) break;
- if(iter.get(CFG_SCI_HOST1_ID_1, &conf.remoteSciNodeId1)) break;
+ if(iter.get(CFG_SCI_HOST1_ID_0, &conf.sci.remoteSciNodeId0)) break;
+ if(iter.get(CFG_SCI_HOST1_ID_1, &conf.sci.remoteSciNodeId1)) break;
}
- if (conf.remoteSciNodeId1 == 0) {
- conf.nLocalAdapters = 1;
+ if (conf.sci.remoteSciNodeId1 == 0) {
+ conf.sci.nLocalAdapters = 1;
} else {
- conf.nLocalAdapters = 2;
+ conf.sci.nLocalAdapters = 2;
}
- if(!tr.createTransporter(&conf)){
+ if(!tr.createSCITransporter(&conf)){
DBUG_PRINT("error", ("Failed to create SCI Transporter from %d to %d",
conf.localNodeId, conf.remoteNodeId));
ndbout << "Failed to create SCI Transporter from: "
<< conf.localNodeId << " to: " << conf.remoteNodeId << endl;
} else {
- DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, remote SCI node id %d",
- conf.nLocalAdapters, conf.remoteSciNodeId0));
- DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, buf size = %d",
- conf.localHostName, conf.remoteHostName, conf.sendLimit, conf.bufferSize));
- if (conf.nLocalAdapters > 1) {
- DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, second remote SCI node id = %d",
- conf.remoteSciNodeId1));
+ DBUG_PRINT("info", ("Created SCI Transporter: Adapters = %d, "
+ "remote SCI node id %d",
+ conf.sci.nLocalAdapters, conf.sci.remoteSciNodeId0));
+ DBUG_PRINT("info", ("Host 1 = %s, Host 2 = %s, sendLimit = %d, "
+ "buf size = %d", conf.localHostName,
+ conf.remoteHostName, conf.sci.sendLimit,
+ conf.sci.bufferSize));
+ if (conf.sci.nLocalAdapters > 1) {
+ DBUG_PRINT("info", ("Fault-tolerant with 2 Remote Adapters, "
+ "second remote SCI node id = %d",
+ conf.sci.remoteSciNodeId1));
}
noOfTransportersCreated++;
continue;
}
- }
- case CONNECTION_TYPE_TCP:{
- TCP_TransporterConfiguration conf;
-
- if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.sendBufferSize)) break;
- if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.maxReceiveSize)) break;
+ break;
+
+ case CONNECTION_TYPE_TCP:
+ if(iter.get(CFG_TCP_SEND_BUFFER_SIZE, &conf.tcp.sendBufferSize)) break;
+ if(iter.get(CFG_TCP_RECEIVE_BUFFER_SIZE, &conf.tcp.maxReceiveSize)) break;
- conf.port= server_port;
const char * proxy;
if (!iter.get(CFG_TCP_PROXY, &proxy)) {
if (strlen(proxy) > 0 && nodeId2 == nodeId) {
// TODO handle host:port
- conf.port = atoi(proxy);
+ conf.s_port = atoi(proxy);
}
}
- conf.localNodeId = nodeId;
- conf.remoteNodeId = remoteNodeId;
- conf.localHostName = localHostName;
- conf.remoteHostName = remoteHostName;
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
-
- if(!tr.createTransporter(&conf)){
+ if(!tr.createTCPTransporter(&conf)){
ndbout << "Failed to create TCP Transporter from: "
<< nodeId << " to: " << remoteNodeId << endl;
} else {
noOfTransportersCreated++;
}
- DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, maxReceiveSize = %d",
- conf.sendBufferSize, conf.maxReceiveSize));
+ DBUG_PRINT("info", ("Created TCP Transporter: sendBufferSize = %d, "
+ "maxReceiveSize = %d", conf.tcp.sendBufferSize,
+ conf.tcp.maxReceiveSize));
break;
- case CONNECTION_TYPE_OSE:{
- OSE_TransporterConfiguration conf;
-
- if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.prioASignalSize)) break;
- if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.prioBSignalSize)) break;
- if(iter.get(CFG_OSE_RECEIVE_ARRAY_SIZE, &conf.receiveBufferSize)) break;
-
- conf.localNodeId = nodeId;
- conf.remoteNodeId = remoteNodeId;
- conf.localHostName = localHostName;
- conf.remoteHostName = remoteHostName;
- conf.checksum = checksum;
- conf.signalId = sendSignalId;
+ case CONNECTION_TYPE_OSE:
+ if(iter.get(CFG_OSE_PRIO_A_SIZE, &conf.ose.prioASignalSize)) break;
+ if(iter.get(CFG_OSE_PRIO_B_SIZE, &conf.ose.prioBSignalSize)) break;
- if(!tr.createTransporter(&conf)){
+ if(!tr.createOSETransporter(&conf)){
ndbout << "Failed to create OSE Transporter from: "
<< nodeId << " to: " << remoteNodeId << endl;
} else {
noOfTransportersCreated++;
}
- }
+ break;
+
default:
ndbout << "Unknown transporter type from: " << nodeId <<
" to: " << remoteNodeId << endl;
break;
- }
- }
- }
+ } // switch
+ } // for
+
DBUG_RETURN(noOfTransportersCreated);
}
diff --git a/ndb/src/common/portlib/NdbTCP.cpp b/ndb/src/common/portlib/NdbTCP.cpp
index c7b9d33c5f6..41471548b7e 100644
--- a/ndb/src/common/portlib/NdbTCP.cpp
+++ b/ndb/src/common/portlib/NdbTCP.cpp
@@ -83,3 +83,50 @@ Ndb_getInAddr(struct in_addr * dst, const char *address) {
return -1;
}
#endif
+
+int Ndb_check_socket_hup(NDB_SOCKET_TYPE sock)
+{
+#ifdef HAVE_POLL
+ struct pollfd pfd[1];
+ int r;
+
+ pfd[0].fd= sock;
+ pfd[0].events= POLLHUP | POLLIN | POLLOUT | POLLNVAL;
+ pfd[0].revents= 0;
+ r= poll(pfd,1,0);
+ if(pfd[0].revents & (POLLHUP|POLLERR))
+ return 1;
+
+ return 0;
+#else /* HAVE_POLL */
+ fd_set readfds, writefds, errorfds;
+ struct timeval tv= {0,0};
+ int s_err;
+ int s_err_size= sizeof(s_err);
+
+ FD_ZERO(&readfds);
+ FD_ZERO(&writefds);
+ FD_ZERO(&errorfds);
+
+ FD_SET(sock, &readfds);
+ FD_SET(sock, &writefds);
+ FD_SET(sock, &errorfds);
+
+ if(select(1, &readfds, &writefds, &errorfds, &tv)<0)
+ return 1;
+
+ if(FD_ISSET(sock,&errorfds))
+ return 1;
+
+ s_err=0;
+ if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (char*) &s_err, &s_err_size) != 0)
+ return(1);
+
+ if (s_err)
+ { /* getsockopt could succeed */
+ return(1); /* but return an error... */
+ }
+
+ return 0;
+#endif /* HAVE_POLL */
+}
diff --git a/ndb/src/common/portlib/gcc.cpp b/ndb/src/common/portlib/gcc.cpp
index 66aa4812dc6..4e49d787d3c 100644
--- a/ndb/src/common/portlib/gcc.cpp
+++ b/ndb/src/common/portlib/gcc.cpp
@@ -2,6 +2,6 @@
/**
* GCC linking problem...
*/
-#ifdef DEFINE_CXA_PURE_VIRTUAL
+#if 0
extern "C" { int __cxa_pure_virtual() { return 0;} }
#endif
diff --git a/ndb/src/common/portlib/win32/NdbTCP.c b/ndb/src/common/portlib/win32/NdbTCP.c
index b936cd2db6c..5d6c0ae5c7d 100644
--- a/ndb/src/common/portlib/win32/NdbTCP.c
+++ b/ndb/src/common/portlib/win32/NdbTCP.c
@@ -37,3 +37,35 @@ Ndb_getInAddr(struct in_addr * dst, const char *address)
return -1;
}
+int Ndb_check_socket_hup(NDB_SOCKET_TYPE sock)
+{
+ fd_set readfds, writefds, errorfds;
+ struct timeval tv= {0,0};
+ int s_err;
+ int s_err_size= sizeof(s_err);
+
+ FD_ZERO(&readfds);
+ FD_ZERO(&writefds);
+ FD_ZERO(&errorfds);
+
+ FD_SET(sock, &readfds);
+ FD_SET(sock, &writefds);
+ FD_SET(sock, &errorfds);
+
+ if(select(1, &readfds, &writefds, &errorfds, &tv)==SOCKET_ERROR)
+ return 1;
+
+ if(FD_ISSET(sock,&errorfds))
+ return 1;
+
+ s_err=0;
+ if (getsockopt(sock, SOL_SOCKET, SO_ERROR, (char*) &s_err, &s_err_size) != 0)
+ return(1);
+
+ if (s_err)
+ { /* getsockopt could succeed */
+ return(1); /* but return an error... */
+ }
+
+ return 0;
+}
diff --git a/ndb/src/common/transporter/Makefile.am b/ndb/src/common/transporter/Makefile.am
index b902012e56d..4c277097a91 100644
--- a/ndb/src/common/transporter/Makefile.am
+++ b/ndb/src/common/transporter/Makefile.am
@@ -13,7 +13,7 @@ EXTRA_libtransporter_la_SOURCES = SHM_Transporter.cpp SHM_Transporter.unix.cpp S
libtransporter_la_LIBADD = @ndb_transporter_opt_objs@
libtransporter_la_DEPENDENCIES = @ndb_transporter_opt_objs@
-INCLUDES_LOC = -I$(top_srcdir)/ndb/include/mgmapi -I$(top_srcdir)/ndb/include/debugger -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter @NDB_SCI_INCLUDES@
+INCLUDES_LOC = -I$(top_srcdir)/ndb/include/mgmapi -I$(top_srcdir)/ndb/src/mgmapi -I$(top_srcdir)/ndb/include/debugger -I$(top_srcdir)/ndb/include/kernel -I$(top_srcdir)/ndb/include/transporter @NDB_SCI_INCLUDES@
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/common/transporter/OSE_Transporter.cpp b/ndb/src/common/transporter/OSE_Transporter.cpp
index a52862a80e5..ad67791fc0c 100644
--- a/ndb/src/common/transporter/OSE_Transporter.cpp
+++ b/ndb/src/common/transporter/OSE_Transporter.cpp
@@ -32,6 +32,7 @@ OSE_Transporter::OSE_Transporter(int _prioASignalSize,
NodeId localNodeId,
const char * lHostName,
NodeId remoteNodeId,
+ NodeId serverNodeId,
const char * rHostName,
int byteorder,
bool compression,
@@ -40,6 +41,7 @@ OSE_Transporter::OSE_Transporter(int _prioASignalSize,
Uint32 reportFreq) :
Transporter(localNodeId,
remoteNodeId,
+ serverNodeId,
byteorder,
compression,
checksum,
diff --git a/ndb/src/common/transporter/OSE_Transporter.hpp b/ndb/src/common/transporter/OSE_Transporter.hpp
index 4fd06130477..898352366ba 100644
--- a/ndb/src/common/transporter/OSE_Transporter.hpp
+++ b/ndb/src/common/transporter/OSE_Transporter.hpp
@@ -48,6 +48,7 @@ public:
NodeId localNodeId,
const char * lHostName,
NodeId remoteNodeId,
+ NodeId serverNodeId,
const char * rHostName,
int byteorder,
bool compression,
diff --git a/ndb/src/common/transporter/Packer.cpp b/ndb/src/common/transporter/Packer.cpp
index 9eba335330d..bcfac8417bb 100644
--- a/ndb/src/common/transporter/Packer.cpp
+++ b/ndb/src/common/transporter/Packer.cpp
@@ -93,6 +93,7 @@ TransporterRegistry::unpack(Uint32 * readPtr,
signalHeader.theSendersSignalId = * signalData;
signalData ++;
}//if
+ signalHeader.theSignalId= ~0;
Uint32 * sectionPtr = signalData + signalHeader.theLength;
Uint32 * sectionData = sectionPtr + signalHeader.m_noOfSections;
diff --git a/ndb/src/common/transporter/SCI_Transporter.cpp b/ndb/src/common/transporter/SCI_Transporter.cpp
index 47722939098..1fe276249e5 100644
--- a/ndb/src/common/transporter/SCI_Transporter.cpp
+++ b/ndb/src/common/transporter/SCI_Transporter.cpp
@@ -34,19 +34,21 @@ SCI_Transporter::SCI_Transporter(TransporterRegistry &t_reg,
const char *lHostName,
const char *rHostName,
int r_port,
+ bool isMgmConnection,
Uint32 packetSize,
Uint32 bufferSize,
Uint32 nAdapters,
Uint16 remoteSciNodeId0,
Uint16 remoteSciNodeId1,
NodeId _localNodeId,
- NodeId _remoteNodeId,
+ NodeId _remoteNodeId,
+ NodeId serverNodeId,
bool chksm,
bool signalId,
Uint32 reportFreq) :
Transporter(t_reg, tt_SCI_TRANSPORTER,
- lHostName, rHostName, r_port, _localNodeId,
- _remoteNodeId, 0, false, chksm, signalId)
+ lHostName, rHostName, r_port, isMgmConnection, _localNodeId,
+ _remoteNodeId, serverNodeId, 0, false, chksm, signalId)
{
DBUG_ENTER("SCI_Transporter::SCI_Transporter");
m_PacketSize = (packetSize + 3)/4 ;
diff --git a/ndb/src/common/transporter/SCI_Transporter.hpp b/ndb/src/common/transporter/SCI_Transporter.hpp
index e62c142e1b9..cb42e437118 100644
--- a/ndb/src/common/transporter/SCI_Transporter.hpp
+++ b/ndb/src/common/transporter/SCI_Transporter.hpp
@@ -140,13 +140,15 @@ private:
const char *local_host,
const char *remote_host,
int port,
+ bool isMgmConnection,
Uint32 packetSize,
Uint32 bufferSize,
Uint32 nAdapters,
Uint16 remoteSciNodeId0,
Uint16 remoteSciNodeId1,
NodeId localNodeID,
- NodeId remoteNodeID,
+ NodeId remoteNodeID,
+ NodeId serverNodeId,
bool checksum,
bool signalId,
Uint32 reportFreq = 4096);
diff --git a/ndb/src/common/transporter/SHM_Transporter.cpp b/ndb/src/common/transporter/SHM_Transporter.cpp
index 4d7f46d7723..a225988d37f 100644
--- a/ndb/src/common/transporter/SHM_Transporter.cpp
+++ b/ndb/src/common/transporter/SHM_Transporter.cpp
@@ -32,14 +32,17 @@ SHM_Transporter::SHM_Transporter(TransporterRegistry &t_reg,
const char *lHostName,
const char *rHostName,
int r_port,
+ bool isMgmConnection,
NodeId lNodeId,
- NodeId rNodeId,
+ NodeId rNodeId,
+ NodeId serverNodeId,
bool checksum,
bool signalId,
key_t _shmKey,
Uint32 _shmSize) :
Transporter(t_reg, tt_SHM_TRANSPORTER,
- lHostName, rHostName, r_port, lNodeId, rNodeId,
+ lHostName, rHostName, r_port, isMgmConnection,
+ lNodeId, rNodeId, serverNodeId,
0, false, checksum, signalId),
shmKey(_shmKey),
shmSize(_shmSize)
diff --git a/ndb/src/common/transporter/SHM_Transporter.hpp b/ndb/src/common/transporter/SHM_Transporter.hpp
index 7079e12a924..e7a76225471 100644
--- a/ndb/src/common/transporter/SHM_Transporter.hpp
+++ b/ndb/src/common/transporter/SHM_Transporter.hpp
@@ -36,8 +36,10 @@ public:
const char *lHostName,
const char *rHostName,
int r_port,
+ bool isMgmConnection,
NodeId lNodeId,
- NodeId rNodeId,
+ NodeId rNodeId,
+ NodeId serverNodeId,
bool checksum,
bool signalId,
key_t shmKey,
diff --git a/ndb/src/common/transporter/TCP_Transporter.cpp b/ndb/src/common/transporter/TCP_Transporter.cpp
index 5003d90a4b4..5db12d3985c 100644
--- a/ndb/src/common/transporter/TCP_Transporter.cpp
+++ b/ndb/src/common/transporter/TCP_Transporter.cpp
@@ -68,12 +68,15 @@ TCP_Transporter::TCP_Transporter(TransporterRegistry &t_reg,
const char *lHostName,
const char *rHostName,
int r_port,
+ bool isMgmConnection,
NodeId lNodeId,
NodeId rNodeId,
+ NodeId serverNodeId,
bool chksm, bool signalId,
Uint32 _reportFreq) :
Transporter(t_reg, tt_TCP_TRANSPORTER,
- lHostName, rHostName, r_port, lNodeId, rNodeId,
+ lHostName, rHostName, r_port, isMgmConnection,
+ lNodeId, rNodeId, serverNodeId,
0, false, chksm, signalId),
m_sendBuffer(sendBufSize)
{
diff --git a/ndb/src/common/transporter/TCP_Transporter.hpp b/ndb/src/common/transporter/TCP_Transporter.hpp
index 151ec261506..df4149531b4 100644
--- a/ndb/src/common/transporter/TCP_Transporter.hpp
+++ b/ndb/src/common/transporter/TCP_Transporter.hpp
@@ -49,9 +49,11 @@ private:
int sendBufferSize, int maxReceiveSize,
const char *lHostName,
const char *rHostName,
- int r_port,
+ int r_port,
+ bool isMgmConnection,
NodeId lHostId,
NodeId rHostId,
+ NodeId serverNodeId,
bool checksum, bool signalId,
Uint32 reportFreq = 4096);
diff --git a/ndb/src/common/transporter/Transporter.cpp b/ndb/src/common/transporter/Transporter.cpp
index 328ce2816de..377fabe27ab 100644
--- a/ndb/src/common/transporter/Transporter.cpp
+++ b/ndb/src/common/transporter/Transporter.cpp
@@ -31,13 +31,15 @@ Transporter::Transporter(TransporterRegistry &t_reg,
TransporterType _type,
const char *lHostName,
const char *rHostName,
- int r_port,
+ int s_port,
+ bool _isMgmConnection,
NodeId lNodeId,
- NodeId rNodeId,
+ NodeId rNodeId,
+ NodeId serverNodeId,
int _byteorder,
bool _compression, bool _checksum, bool _signalId)
- : m_r_port(r_port), remoteNodeId(rNodeId), localNodeId(lNodeId),
- isServer(lNodeId < rNodeId),
+ : m_s_port(s_port), remoteNodeId(rNodeId), localNodeId(lNodeId),
+ isServer(lNodeId==serverNodeId), isMgmConnection(_isMgmConnection),
m_packer(_signalId, _checksum),
m_type(_type),
m_transporter_registry(t_reg)
@@ -61,10 +63,10 @@ Transporter::Transporter(TransporterRegistry &t_reg,
if (strlen(lHostName) > 0)
Ndb_getInAddr(&localHostAddress, lHostName);
- DBUG_PRINT("info",("rId=%d lId=%d isServer=%d rHost=%s lHost=%s r_port=%d",
+ DBUG_PRINT("info",("rId=%d lId=%d isServer=%d rHost=%s lHost=%s s_port=%d",
remoteNodeId, localNodeId, isServer,
remoteHostName, localHostName,
- r_port));
+ s_port));
byteOrder = _byteorder;
compressionUsed = _compression;
@@ -75,10 +77,13 @@ Transporter::Transporter(TransporterRegistry &t_reg,
m_timeOutMillis = 1000;
m_connect_address.s_addr= 0;
+ if(s_port<0)
+ s_port= -s_port; // was dynamic
+
if (isServer)
m_socket_client= 0;
else
- m_socket_client= new SocketClient(remoteHostName, r_port,
+ m_socket_client= new SocketClient(remoteHostName, s_port,
new SocketAuthSimple("ndbd",
"ndbd passwd"));
DBUG_VOID_RETURN;
@@ -117,22 +122,42 @@ Transporter::connect_server(NDB_SOCKET_TYPE sockfd) {
bool
Transporter::connect_client() {
+ NDB_SOCKET_TYPE sockfd;
+
if(m_connected)
return true;
- NDB_SOCKET_TYPE sockfd = m_socket_client->connect();
-
+
+ if(isMgmConnection)
+ sockfd= m_transporter_registry.connect_ndb_mgmd(m_socket_client);
+ else
+ sockfd= m_socket_client->connect();
+
+ return connect_client(sockfd);
+}
+
+bool
+Transporter::connect_client(NDB_SOCKET_TYPE sockfd) {
+
+ if(m_connected)
+ return true;
+
if (sockfd == NDB_INVALID_SOCKET)
return false;
DBUG_ENTER("Transporter::connect_client");
+ DBUG_PRINT("info",("port %d isMgmConnection=%d",m_s_port,isMgmConnection));
+
+ SocketOutputStream s_output(sockfd);
+ SocketInputStream s_input(sockfd);
+
// send info about own id
// send info about own transporter type
- SocketOutputStream s_output(sockfd);
+
s_output.println("%d %d", localNodeId, m_type);
// get remote id
int nodeId, remote_transporter_type= -1;
- SocketInputStream s_input(sockfd);
+
char buf[256];
if (s_input.gets(buf, 256) == 0) {
NDB_CLOSE_SOCKET(sockfd);
diff --git a/ndb/src/common/transporter/Transporter.hpp b/ndb/src/common/transporter/Transporter.hpp
index 5f3f8063723..c9f4e9bda42 100644
--- a/ndb/src/common/transporter/Transporter.hpp
+++ b/ndb/src/common/transporter/Transporter.hpp
@@ -44,6 +44,7 @@ public:
* Use isConnected() to check status
*/
bool connect_client();
+ bool connect_client(NDB_SOCKET_TYPE sockfd);
bool connect_server(NDB_SOCKET_TYPE socket);
/**
@@ -69,6 +70,22 @@ public:
*/
NodeId getLocalNodeId() const;
+ /**
+ * Get port we're connecting to (signed)
+ */
+ int get_s_port() { return m_s_port; };
+
+ /**
+ * Set port to connect to (signed)
+ */
+ void set_s_port(int port) {
+ m_s_port = port;
+ if(port<0)
+ port= -port;
+ if(m_socket_client)
+ m_socket_client->set_port(port);
+ };
+
virtual Uint32 get_free_buffer() const = 0;
protected:
@@ -76,9 +93,11 @@ protected:
TransporterType,
const char *lHostName,
const char *rHostName,
- int r_port,
+ int s_port,
+ bool isMgmConnection,
NodeId lNodeId,
- NodeId rNodeId,
+ NodeId rNodeId,
+ NodeId serverNodeId,
int byteorder,
bool compression,
bool checksum,
@@ -104,7 +123,7 @@ protected:
struct in_addr remoteHostAddress;
struct in_addr localHostAddress;
- const unsigned int m_r_port;
+ int m_s_port;
const NodeId remoteNodeId;
const NodeId localNodeId;
@@ -121,6 +140,12 @@ protected:
private:
+ /**
+ * means that we transform an MGM connection into
+ * a transporter connection
+ */
+ bool isMgmConnection;
+
SocketClient *m_socket_client;
struct in_addr m_connect_address;
diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp
index 3f190d16264..3937f1fc98b 100644
--- a/ndb/src/common/transporter/TransporterRegistry.cpp
+++ b/ndb/src/common/transporter/TransporterRegistry.cpp
@@ -48,6 +48,10 @@ extern int g_ndb_shm_signum;
#include <InputStream.hpp>
#include <OutputStream.hpp>
+#include <mgmapi/mgmapi.h>
+#include <mgmapi_internal.h>
+#include <mgmapi/mgmapi_debug.h>
+
#include <EventLogger.hpp>
extern EventLogger g_eventLogger;
@@ -83,6 +87,7 @@ TransporterRegistry::TransporterRegistry(void * callback,
nodeIdSpecified = false;
maxTransporters = _maxTransporters;
sendCounter = 1;
+ m_mgm_handle= 0;
callbackObj=callback;
@@ -119,6 +124,27 @@ TransporterRegistry::TransporterRegistry(void * callback,
DBUG_VOID_RETURN;
}
+void TransporterRegistry::set_mgm_handle(NdbMgmHandle h)
+{
+ DBUG_ENTER("TransporterRegistry::set_mgm_handle");
+ if (m_mgm_handle)
+ ndb_mgm_destroy_handle(&m_mgm_handle);
+ m_mgm_handle= h;
+#ifndef DBUG_OFF
+ if (h)
+ {
+ char buf[256];
+ DBUG_PRINT("info",("handle set with connectstring: %s",
+ ndb_mgm_get_connectstring(h,buf, sizeof(buf))));
+ }
+ else
+ {
+ DBUG_PRINT("info",("handle set to NULL"));
+ }
+#endif
+ DBUG_VOID_RETURN;
+}
+
TransporterRegistry::~TransporterRegistry()
{
DBUG_ENTER("TransporterRegistry::~TransporterRegistry");
@@ -141,6 +167,8 @@ TransporterRegistry::~TransporterRegistry()
theOSEReceiver = 0;
}
#endif
+ if (m_mgm_handle)
+ ndb_mgm_destroy_handle(&m_mgm_handle);
DBUG_VOID_RETURN;
}
@@ -258,7 +286,7 @@ TransporterRegistry::connect_server(NDB_SOCKET_TYPE sockfd)
}
bool
-TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) {
+TransporterRegistry::createTCPTransporter(TransporterConfiguration *config) {
#ifdef NDB_TCP_TRANSPORTER
if(!nodeIdSpecified){
@@ -272,13 +300,15 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) {
return false;
TCP_Transporter * t = new TCP_Transporter(*this,
- config->sendBufferSize,
- config->maxReceiveSize,
+ config->tcp.sendBufferSize,
+ config->tcp.maxReceiveSize,
config->localHostName,
config->remoteHostName,
- config->port,
+ config->s_port,
+ config->isMgmConnection,
localNodeId,
config->remoteNodeId,
+ config->serverNodeId,
config->checksum,
config->signalId);
if (t == NULL)
@@ -307,7 +337,7 @@ TransporterRegistry::createTransporter(TCP_TransporterConfiguration *config) {
}
bool
-TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) {
+TransporterRegistry::createOSETransporter(TransporterConfiguration *conf) {
#ifdef NDB_OSE_TRANSPORTER
if(!nodeIdSpecified){
@@ -326,11 +356,12 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) {
localNodeId);
}
- OSE_Transporter * t = new OSE_Transporter(conf->prioASignalSize,
- conf->prioBSignalSize,
+ OSE_Transporter * t = new OSE_Transporter(conf->ose.prioASignalSize,
+ conf->ose.prioBSignalSize,
localNodeId,
conf->localHostName,
conf->remoteNodeId,
+ conf->serverNodeId,
conf->remoteHostName,
conf->checksum,
conf->signalId);
@@ -356,7 +387,7 @@ TransporterRegistry::createTransporter(OSE_TransporterConfiguration *conf) {
}
bool
-TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) {
+TransporterRegistry::createSCITransporter(TransporterConfiguration *config) {
#ifdef NDB_SCI_TRANSPORTER
if(!SCI_Transporter::initSCI())
@@ -375,14 +406,16 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) {
SCI_Transporter * t = new SCI_Transporter(*this,
config->localHostName,
config->remoteHostName,
- config->port,
- config->sendLimit,
- config->bufferSize,
- config->nLocalAdapters,
- config->remoteSciNodeId0,
- config->remoteSciNodeId1,
+ config->s_port,
+ config->isMgmConnection,
+ config->sci.sendLimit,
+ config->sci.bufferSize,
+ config->sci.nLocalAdapters,
+ config->sci.remoteSciNodeId0,
+ config->sci.remoteSciNodeId1,
localNodeId,
config->remoteNodeId,
+ config->serverNodeId,
config->checksum,
config->signalId);
@@ -407,7 +440,7 @@ TransporterRegistry::createTransporter(SCI_TransporterConfiguration *config) {
}
bool
-TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
+TransporterRegistry::createSHMTransporter(TransporterConfiguration *config) {
DBUG_ENTER("TransporterRegistry::createTransporter SHM");
#ifdef NDB_SHM_TRANSPORTER
if(!nodeIdSpecified){
@@ -418,7 +451,7 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
return false;
if (!g_ndb_shm_signum) {
- g_ndb_shm_signum= config->signum;
+ g_ndb_shm_signum= config->shm.signum;
DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum));
/**
* Make sure to block g_ndb_shm_signum
@@ -430,7 +463,7 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
pthread_sigmask(SIG_BLOCK, &mask, 0);
}
- if(config->signum != g_ndb_shm_signum)
+ if(config->shm.signum != g_ndb_shm_signum)
return false;
if(theTransporters[config->remoteNodeId] != NULL)
@@ -439,13 +472,15 @@ TransporterRegistry::createTransporter(SHM_TransporterConfiguration *config) {
SHM_Transporter * t = new SHM_Transporter(*this,
config->localHostName,
config->remoteHostName,
- config->port,
+ config->s_port,
+ config->isMgmConnection,
localNodeId,
config->remoteNodeId,
+ config->serverNodeId,
config->checksum,
config->signalId,
- config->shmKey,
- config->shmSize
+ config->shm.shmKey,
+ config->shm.shmSize
);
if (t == NULL)
return false;
@@ -883,6 +918,7 @@ TransporterRegistry::performReceive()
NodeId remoteNodeId;
Uint32 * readPtr;
Uint32 sz = theOSEReceiver->getReceiveData(&remoteNodeId, &readPtr);
+ transporter_recv_from(callbackObj, remoteNodeId);
Uint32 szUsed = unpack(readPtr,
sz,
remoteNodeId,
@@ -918,6 +954,7 @@ TransporterRegistry::performReceive()
{
Uint32 * ptr;
Uint32 sz = t->getReceiveData(&ptr);
+ transporter_recv_from(callbackObj, nodeId);
Uint32 szUsed = unpack(ptr, sz, nodeId, ioStates[nodeId]);
t->updateReceiveDataPtr(szUsed);
}
@@ -941,6 +978,7 @@ TransporterRegistry::performReceive()
{
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
+ transporter_recv_from(callbackObj, nodeId);
Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
t->updateReceivePtr(newPtr);
}
@@ -958,6 +996,7 @@ TransporterRegistry::performReceive()
{
Uint32 * readPtr, * eodPtr;
t->getReceivePtr(&readPtr, &eodPtr);
+ transporter_recv_from(callbackObj, nodeId);
Uint32 *newPtr = unpack(readPtr, eodPtr, nodeId, ioStates[nodeId]);
t->updateReceivePtr(newPtr);
}
@@ -1149,7 +1188,10 @@ TransporterRegistry::do_connect(NodeId node_id)
case DISCONNECTING:
break;
}
+ DBUG_ENTER("TransporterRegistry::do_connect");
+ DBUG_PRINT("info",("performStates[%d]=CONNECTING",node_id));
curr_state= CONNECTING;
+ DBUG_VOID_RETURN;
}
void
TransporterRegistry::do_disconnect(NodeId node_id)
@@ -1165,21 +1207,30 @@ TransporterRegistry::do_disconnect(NodeId node_id)
case DISCONNECTING:
return;
}
+ DBUG_ENTER("TransporterRegistry::do_disconnect");
+ DBUG_PRINT("info",("performStates[%d]=DISCONNECTING",node_id));
curr_state= DISCONNECTING;
+ DBUG_VOID_RETURN;
}
void
TransporterRegistry::report_connect(NodeId node_id)
{
+ DBUG_ENTER("TransporterRegistry::report_connect");
+ DBUG_PRINT("info",("performStates[%d]=CONNECTED",node_id));
performStates[node_id] = CONNECTED;
reportConnect(callbackObj, node_id);
+ DBUG_VOID_RETURN;
}
void
TransporterRegistry::report_disconnect(NodeId node_id, int errnum)
{
+ DBUG_ENTER("TransporterRegistry::report_disconnect");
+ DBUG_PRINT("info",("performStates[%d]=DISCONNECTED",node_id));
performStates[node_id] = DISCONNECTED;
reportDisconnect(callbackObj, node_id, errnum);
+ DBUG_VOID_RETURN;
}
void
@@ -1224,8 +1275,67 @@ TransporterRegistry::start_clients_thread()
const NodeId nodeId = t->getRemoteNodeId();
switch(performStates[nodeId]){
case CONNECTING:
- if(!t->isConnected() && !t->isServer)
- t->connect_client();
+ if(!t->isConnected() && !t->isServer) {
+ bool connected= false;
+ /**
+ * First, we try to connect (if we have a port number).
+ */
+ if (t->get_s_port())
+ connected= t->connect_client();
+
+ /**
+ * If dynamic, get the port for connecting from the management server
+ */
+ if( !connected && t->get_s_port() <= 0) { // Port is dynamic
+ int server_port= 0;
+ struct ndb_mgm_reply mgm_reply;
+
+ if(!ndb_mgm_is_connected(m_mgm_handle))
+ ndb_mgm_connect(m_mgm_handle, 0, 0, 0);
+
+ if(ndb_mgm_is_connected(m_mgm_handle))
+ {
+ int res=
+ ndb_mgm_get_connection_int_parameter(m_mgm_handle,
+ t->getRemoteNodeId(),
+ t->getLocalNodeId(),
+ CFG_CONNECTION_SERVER_PORT,
+ &server_port,
+ &mgm_reply);
+ DBUG_PRINT("info",("Got dynamic port %d for %d -> %d (ret: %d)",
+ server_port,t->getRemoteNodeId(),
+ t->getLocalNodeId(),res));
+ if( res >= 0 )
+ {
+ /**
+ * Server_port == 0 just means that that a mgmt server
+ * has not received a new port yet. Keep the old.
+ */
+ if (server_port)
+ t->set_s_port(server_port);
+ }
+ else if(ndb_mgm_is_connected(m_mgm_handle))
+ {
+ ndbout_c("Failed to get dynamic port to connect to: %d", res);
+ ndb_mgm_disconnect(m_mgm_handle);
+ }
+ else
+ {
+ ndbout_c("Management server closed connection early. "
+ "It is probably being shut down (or has crashed). "
+ "We will retry the connection.");
+ }
+ }
+ /** else
+ * We will not be able to get a new port unless
+ * the m_mgm_handle is connected. Note that not
+ * being connected is an ok state, just continue
+ * until it is able to connect. Continue using the
+ * old port until we can connect again and get a
+ * new port.
+ */
+ }
+ }
break;
case DISCONNECTING:
if(t->isConnected())
@@ -1261,24 +1371,26 @@ TransporterRegistry::stop_clients()
if (m_start_clients_thread) {
m_run_start_clients_thread= false;
void* status;
- int r= NdbThread_WaitFor(m_start_clients_thread, &status);
+ NdbThread_WaitFor(m_start_clients_thread, &status);
NdbThread_Destroy(&m_start_clients_thread);
}
return true;
}
void
-TransporterRegistry::add_transporter_interface(const char *interf, unsigned short port)
+TransporterRegistry::add_transporter_interface(NodeId remoteNodeId,
+ const char *interf,
+ int s_port)
{
DBUG_ENTER("TransporterRegistry::add_transporter_interface");
- DBUG_PRINT("enter",("interface=%s, port= %d", interf, port));
+ DBUG_PRINT("enter",("interface=%s, s_port= %d", interf, s_port));
if (interf && strlen(interf) == 0)
interf= 0;
for (unsigned i= 0; i < m_transporter_interface.size(); i++)
{
Transporter_interface &tmp= m_transporter_interface[i];
- if (port != tmp.m_service_port)
+ if (s_port != tmp.m_s_service_port || tmp.m_s_service_port==0)
continue;
if (interf != 0 && tmp.m_interface != 0 &&
strcmp(interf, tmp.m_interface) == 0)
@@ -1291,7 +1403,8 @@ TransporterRegistry::add_transporter_interface(const char *interf, unsigned shor
}
}
Transporter_interface t;
- t.m_service_port= port;
+ t.m_remote_nodeId= remoteNodeId;
+ t.m_s_service_port= s_port;
t.m_interface= interf;
m_transporter_interface.push_back(t);
DBUG_PRINT("exit",("interface and port added"));
@@ -1301,34 +1414,50 @@ TransporterRegistry::add_transporter_interface(const char *interf, unsigned shor
bool
TransporterRegistry::start_service(SocketServer& socket_server)
{
+ struct ndb_mgm_reply mgm_reply;
+
+ DBUG_ENTER("TransporterRegistry::start_service");
if (m_transporter_interface.size() > 0 && !nodeIdSpecified)
{
ndbout_c("TransporterRegistry::startReceiving: localNodeId not specified");
- return false;
+ DBUG_RETURN(false);
}
for (unsigned i= 0; i < m_transporter_interface.size(); i++)
{
Transporter_interface &t= m_transporter_interface[i];
- if (t.m_service_port == 0)
- {
- continue;
- }
+
+ unsigned short port= (unsigned short)t.m_s_service_port;
+ if(t.m_s_service_port<0)
+ port= -t.m_s_service_port; // is a dynamic port
TransporterService *transporter_service =
new TransporterService(new SocketAuthSimple("ndbd", "ndbd passwd"));
if(!socket_server.setup(transporter_service,
- t.m_service_port, t.m_interface))
+ &port, t.m_interface))
{
- ndbout_c("Unable to setup transporter service port: %s:%d!\n"
- "Please check if the port is already used,\n"
- "(perhaps the node is already running)",
- t.m_interface ? t.m_interface : "*", t.m_service_port);
- delete transporter_service;
- return false;
+ DBUG_PRINT("info", ("Trying new port"));
+ port= 0;
+ if(t.m_s_service_port>0
+ || !socket_server.setup(transporter_service,
+ &port, t.m_interface))
+ {
+ /*
+ * If it wasn't a dynamically allocated port, or
+ * our attempts at getting a new dynamic port failed
+ */
+ ndbout_c("Unable to setup transporter service port: %s:%d!\n"
+ "Please check if the port is already used,\n"
+ "(perhaps the node is already running)",
+ t.m_interface ? t.m_interface : "*", t.m_s_service_port);
+ delete transporter_service;
+ DBUG_RETURN(false);
+ }
}
+ t.m_s_service_port= (t.m_s_service_port<=0)?-port:port; // -`ve if dynamic
+ DBUG_PRINT("info", ("t.m_s_service_port = %d",t.m_s_service_port));
transporter_service->setTransporterRegistry(this);
}
- return true;
+ DBUG_RETURN(true);
}
#ifdef NDB_SHM_TRANSPORTER
@@ -1380,12 +1509,7 @@ TransporterRegistry::startReceiving()
DBUG_PRINT("error",("Install failed"));
g_eventLogger.error("Failed to install signal handler for"
" SHM transporter errno: %d (%s)", errno,
-#ifdef HAVE_STRERROR
- strerror(errno)
-#else
- ""
-#endif
- );
+ strerror(errno));
}
}
#endif // NDB_SHM_TRANSPORTER
@@ -1443,4 +1567,107 @@ NdbOut & operator <<(NdbOut & out, SignalHeader & sh){
return out;
}
+Transporter*
+TransporterRegistry::get_transporter(NodeId nodeId) {
+ return theTransporters[nodeId];
+}
+
+bool TransporterRegistry::connect_client(NdbMgmHandle *h)
+{
+ DBUG_ENTER("TransporterRegistry::connect_client(NdbMgmHandle)");
+
+ Uint32 mgm_nodeid= ndb_mgm_get_mgmd_nodeid(*h);
+
+ if(!mgm_nodeid)
+ {
+ ndbout_c("%s: %d", __FILE__, __LINE__);
+ return false;
+ }
+ Transporter * t = theTransporters[mgm_nodeid];
+ if (!t)
+ {
+ ndbout_c("%s: %d", __FILE__, __LINE__);
+ return false;
+ }
+ DBUG_RETURN(t->connect_client(connect_ndb_mgmd(h)));
+}
+
+/**
+ * Given a connected NdbMgmHandle, turns it into a transporter
+ * and returns the socket.
+ */
+NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(NdbMgmHandle *h)
+{
+ struct ndb_mgm_reply mgm_reply;
+
+ if ( h==NULL || *h == NULL )
+ {
+ ndbout_c("%s: %d", __FILE__, __LINE__);
+ return NDB_INVALID_SOCKET;
+ }
+
+ for(unsigned int i=0;i < m_transporter_interface.size();i++)
+ if (m_transporter_interface[i].m_s_service_port < 0
+ && ndb_mgm_set_connection_int_parameter(*h,
+ get_localNodeId(),
+ m_transporter_interface[i].m_remote_nodeId,
+ CFG_CONNECTION_SERVER_PORT,
+ m_transporter_interface[i].m_s_service_port,
+ &mgm_reply) < 0)
+ {
+ ndbout_c("Error: %s: %d",
+ ndb_mgm_get_latest_error_desc(*h),
+ ndb_mgm_get_latest_error(*h));
+ ndbout_c("%s: %d", __FILE__, __LINE__);
+ ndb_mgm_destroy_handle(h);
+ return NDB_INVALID_SOCKET;
+ }
+
+ /**
+ * convert_to_transporter also disposes of the handle (i.e. we don't leak
+ * memory here.
+ */
+ NDB_SOCKET_TYPE sockfd= ndb_mgm_convert_to_transporter(h);
+ if ( sockfd == NDB_INVALID_SOCKET)
+ {
+ ndbout_c("Error: %s: %d",
+ ndb_mgm_get_latest_error_desc(*h),
+ ndb_mgm_get_latest_error(*h));
+ ndbout_c("%s: %d", __FILE__, __LINE__);
+ ndb_mgm_destroy_handle(h);
+ }
+ return sockfd;
+}
+
+/**
+ * Given a SocketClient, creates a NdbMgmHandle, turns it into a transporter
+ * and returns the socket.
+ */
+NDB_SOCKET_TYPE TransporterRegistry::connect_ndb_mgmd(SocketClient *sc)
+{
+ NdbMgmHandle h= ndb_mgm_create_handle();
+
+ if ( h == NULL )
+ {
+ return NDB_INVALID_SOCKET;
+ }
+
+ /**
+ * Set connectstring
+ */
+ {
+ BaseString cs;
+ cs.assfmt("%s:%u",sc->get_server_name(),sc->get_port());
+ ndb_mgm_set_connectstring(h, cs.c_str());
+ }
+
+ if(ndb_mgm_connect(h, 0, 0, 0)<0)
+ {
+ ndb_mgm_destroy_handle(&h);
+ return NDB_INVALID_SOCKET;
+ }
+
+ return connect_ndb_mgmd(&h);
+}
+
template class Vector<TransporterRegistry::Transporter_interface>;
diff --git a/ndb/src/common/util/Base64.cpp b/ndb/src/common/util/Base64.cpp
index f7a490d427d..3db911f481f 100644
--- a/ndb/src/common/util/Base64.cpp
+++ b/ndb/src/common/util/Base64.cpp
@@ -22,17 +22,22 @@ static char base64_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
"0123456789+/";
int
-base64_encode(const UtilBuffer &src, BaseString &dst) {
- const unsigned char *s = (const unsigned char *)src.get_data();
+base64_encode(const UtilBuffer &src, BaseString &dst)
+{
+ return base64_encode(src.get_data(), src.length(), dst);
+}
+
+int
+base64_encode(const void * _s, size_t src_len, BaseString &dst) {
+ const unsigned char * s = (const unsigned char*)_s;
size_t i = 0;
size_t len = 0;
- size_t src_len = src.length();
while(i < src_len) {
if(len == 76){
len = 0;
dst.append('\n');
}
-
+
unsigned c;
c = s[i++];
c <<= 8;
diff --git a/ndb/src/common/util/Bitmask.cpp b/ndb/src/common/util/Bitmask.cpp
new file mode 100644
index 00000000000..0aa39a37204
--- /dev/null
+++ b/ndb/src/common/util/Bitmask.cpp
@@ -0,0 +1,351 @@
+#include <Bitmask.hpp>
+#include <NdbOut.hpp>
+
+static
+void print(const Uint32 src[], Uint32 len, Uint32 pos = 0)
+{
+ printf("b'");
+ for(unsigned i = 0; i<len; i++)
+ {
+ if(BitmaskImpl::get((pos + len + 31) >> 5, src, i+pos))
+ printf("1");
+ else
+ printf("0");
+ if((i & 31) == 31)
+ printf(" ");
+ }
+}
+
+#ifndef __TEST_BITMASK__
+
+void
+BitmaskImpl::getFieldImpl(const Uint32 src[],
+ unsigned shiftL, unsigned len, Uint32 dst[])
+{
+ assert(shiftL < 32);
+
+ unsigned shiftR = 32 - shiftL;
+ unsigned undefined = shiftL ? ~0 : 0;
+
+ * dst = shiftL ? * dst : 0;
+
+ while(len >= 32)
+ {
+ * dst++ |= (* src) << shiftL;
+ * dst = ((* src++) >> shiftR) & undefined;
+ len -= 32;
+ }
+
+ if(len < shiftR)
+ {
+ * dst |= ((* src) & ((1 << len) - 1)) << shiftL;
+ }
+ else
+ {
+ * dst++ |= ((* src) << shiftL);
+ * dst = ((* src) >> shiftR) & ((1 << (len - shiftR)) - 1) & undefined;
+ }
+}
+
+void
+BitmaskImpl::setFieldImpl(Uint32 dst[],
+ unsigned shiftL, unsigned len, const Uint32 src[])
+{
+ /**
+ *
+ * abcd ef00
+ * 00ab cdef
+ */
+ assert(shiftL < 32);
+ unsigned shiftR = 32 - shiftL;
+ unsigned undefined = shiftL ? ~0 : 0;
+ while(len >= 32)
+ {
+ * dst = (* src++) >> shiftL;
+ * dst++ |= ((* src) << shiftR) & undefined;
+ len -= 32;
+ }
+
+ Uint32 mask = ((1 << len) -1);
+ * dst = (* dst & ~mask);
+ if(len < shiftR)
+ {
+ * dst |= ((* src++) >> shiftL) & mask;
+ }
+ else
+ {
+ * dst |= ((* src++) >> shiftL);
+ * dst |= ((* src) & ((1 << (len - shiftR)) - 1)) << shiftR ;
+ }
+}
+#else
+
+#define DEBUG 0
+#include <Vector.hpp>
+static void do_test(int bitmask_size);
+
+int
+main(int argc, char** argv)
+{
+ int loops = argc > 1 ? atoi(argv[1]) : 1000;
+ int max_size = argc > 2 ? atoi(argv[2]) : 1000;
+
+
+ for(int i = 0; i<loops; i++)
+ do_test(1 + (rand() % max_size));
+}
+
+struct Alloc
+{
+ Uint32 pos;
+ Uint32 size;
+ Vector<Uint32> data;
+};
+
+static void require(bool b)
+{
+ if(!b) abort();
+}
+
+static
+bool cmp(const Uint32 b1[], const Uint32 b2[], Uint32 len)
+{
+ Uint32 sz32 = (len + 31) >> 5;
+ for(int i = 0; i<len; i++)
+ {
+ if(BitmaskImpl::get(sz32, b1, i) ^ BitmaskImpl::get(sz32, b2, i))
+ return false;
+ }
+ return true;
+}
+
+
+static int val_pos = 0;
+static int val[] = { 384, 241, 32,
+ 1,1,1,1, 0,0,0,0, 1,1,1,1, 0,0,0,0,
+ 241 };
+
+static int lrand()
+{
+#if 0
+ return val[val_pos++];
+#else
+ return rand();
+#endif
+}
+
+static
+void rand(Uint32 dst[], Uint32 len)
+{
+ for(int i = 0; i<len; i++)
+ BitmaskImpl::set((len + 31) >> 5, dst, i, (lrand() % 1000) > 500);
+}
+
+static
+void simple(int pos, int size)
+{
+ ndbout_c("simple pos: %d size: %d", pos, size);
+ Vector<Uint32> _mask;
+ Vector<Uint32> _src;
+ Vector<Uint32> _dst;
+ Uint32 sz32 = (size + pos + 32) >> 5;
+ const Uint32 sz = 4 * sz32;
+
+ Uint32 zero = 0;
+ _mask.fill(sz32+1, zero);
+ _src.fill(sz32+1, zero);
+ _dst.fill(sz32+1, zero);
+
+ Uint32 * src = _src.getBase();
+ Uint32 * dst = _dst.getBase();
+ Uint32 * mask = _mask.getBase();
+
+ memset(src, 0x0, sz);
+ memset(dst, 0x0, sz);
+ memset(mask, 0xFF, sz);
+ rand(src, size);
+ BitmaskImpl::setField(sz32, mask, pos, size, src);
+ BitmaskImpl::getField(sz32, mask, pos, size, dst);
+ printf("src: "); print(src, size+31); printf("\n");
+ printf("msk: "); print(mask, (sz32 << 5) + 31); printf("\n");
+ printf("dst: "); print(dst, size+31); printf("\n");
+ require(cmp(src, dst, size+31));
+};
+
+static
+void simple2(int size, int loops)
+{
+ ndbout_c("simple2 %d - ", size);
+ Vector<Uint32> _mask;
+ Vector<Uint32> _src;
+ Vector<Uint32> _dst;
+
+ Uint32 sz32 = (size + 32) >> 5;
+ Uint32 sz = sz32 << 2;
+
+ Uint32 zero = 0;
+ _mask.fill(sz32+1, zero);
+ _src.fill(sz32+1, zero);
+ _dst.fill(sz32+1, zero);
+
+ Uint32 * src = _src.getBase();
+ Uint32 * dst = _dst.getBase();
+ Uint32 * mask = _mask.getBase();
+
+ Vector<Uint32> save;
+ for(int i = 0; i<loops; i++)
+ {
+ memset(mask, 0xFF, sz);
+ memset(dst, 0xFF, sz);
+ int len;
+ int pos = 0;
+ while(pos+1 < size)
+ {
+ memset(src, 0xFF, sz);
+ while(!(len = rand() % (size - pos)));
+ BitmaskImpl::setField(sz32, mask, pos, len, src);
+ if(memcmp(dst, mask, sz))
+ {
+ ndbout_c("pos: %d len: %d", pos, len);
+ print(mask, size);
+ abort();
+ }
+ printf("[ %d %d ]", pos, len);
+ save.push_back(pos);
+ save.push_back(len);
+ pos += len;
+ }
+
+ for(int j = 0; j<save.size(); )
+ {
+ pos = save[j++];
+ len = save[j++];
+ memset(src, 0xFF, sz);
+ BitmaskImpl::getField(sz32, mask, pos, len, src);
+ if(memcmp(dst, src, sz))
+ {
+ ndbout_c("pos: %d len: %d", pos, len);
+ printf("src: "); print(src, size); printf("\n");
+ printf("dst: "); print(dst, size); printf("\n");
+ printf("msk: "); print(mask, size); printf("\n");
+ abort();
+ }
+ }
+ ndbout_c("");
+ }
+}
+
+static void
+do_test(int bitmask_size)
+{
+#if 1
+ simple(rand() % 33, (rand() % 63)+1);
+//#else
+ Vector<Alloc> alloc_list;
+ bitmask_size = (bitmask_size + 31) & ~31;
+ Uint32 sz32 = (bitmask_size >> 5);
+ Vector<Uint32> alloc_mask;
+ Vector<Uint32> test_mask;
+
+ ndbout_c("Testing bitmask of size %d", bitmask_size);
+ Uint32 zero = 0;
+ alloc_mask.fill(sz32, zero);
+ test_mask.fill(sz32, zero);
+
+ for(int i = 0; i<5000; i++)
+ {
+ Vector<Uint32> tmp;
+ tmp.fill(sz32, zero);
+
+ int pos = lrand() % (bitmask_size - 1);
+ int free = 0;
+ if(BitmaskImpl::get(sz32, alloc_mask.getBase(), pos))
+ {
+ // Bit was allocated
+ // 1) Look up allocation
+ // 2) Check data
+ // 3) free it
+ size_t j;
+ int min, max;
+ for(j = 0; j<alloc_list.size(); j++)
+ {
+ min = alloc_list[j].pos;
+ max = min + alloc_list[j].size;
+ if(pos >= min && pos < max)
+ {
+ break;
+ }
+ }
+ require(pos >= min && pos < max);
+ BitmaskImpl::getField(sz32, test_mask.getBase(), min, max-min,
+ tmp.getBase());
+ if(DEBUG)
+ {
+ printf("freeing [ %d %d ]", min, max);
+ printf("- mask: ");
+ print(tmp.getBase(), max - min);
+
+ printf(" save: ");
+ size_t k;
+ Alloc& a = alloc_list[j];
+ for(k = 0; k<a.data.size(); k++)
+ printf("%.8x ", a.data[k]);
+ printf("\n");
+ }
+ int bytes = (max - min + 7) >> 3;
+ if(!cmp(tmp.getBase(), alloc_list[j].data.getBase(), max - min))
+ {
+ abort();
+ }
+ while(min < max)
+ BitmaskImpl::clear(sz32, alloc_mask.getBase(), min++);
+ alloc_list.erase(j);
+ }
+ else
+ {
+ Vector<Uint32> tmp;
+ tmp.fill(sz32, zero);
+
+ // Bit was free
+ // 1) Check how much space is avaiable
+ // 2) Create new allocation of lrandom size
+ // 3) Fill data with lrandom data
+ // 4) Update alloc mask
+ while(pos+free < bitmask_size &&
+ !BitmaskImpl::get(sz32, alloc_mask.getBase(), pos+free))
+ free++;
+
+ Uint32 sz =
+ (free <= 64 && ((lrand() % 100) > 80)) ? free : (lrand() % free);
+ sz = sz ? sz : 1;
+ sz = pos + sz == bitmask_size ? sz - 1 : sz;
+ Alloc a;
+ a.pos = pos;
+ a.size = sz;
+ a.data.fill(((sz+31)>> 5)-1, zero);
+ if(DEBUG)
+ printf("pos %d -> alloc [ %d %d ]", pos, pos, pos+sz);
+ for(size_t j = 0; j<sz; j++)
+ {
+ BitmaskImpl::set(sz32, alloc_mask.getBase(), pos+j);
+ if((lrand() % 1000) > 500)
+ BitmaskImpl::set((sz + 31) >> 5, a.data.getBase(), j);
+ }
+ if(DEBUG)
+ {
+ printf("- mask: ");
+ print(a.data.getBase(), sz);
+ printf("\n");
+ }
+ BitmaskImpl::setField(sz32, test_mask.getBase(), pos, sz,
+ a.data.getBase());
+ alloc_list.push_back(a);
+ }
+ }
+#endif
+}
+
+template class Vector<Alloc>;
+template class Vector<Uint32>;
+
+#endif
diff --git a/ndb/src/common/util/Makefile.am b/ndb/src/common/util/Makefile.am
index a62c8186174..2719d14ee92 100644
--- a/ndb/src/common/util/Makefile.am
+++ b/ndb/src/common/util/Makefile.am
@@ -9,7 +9,24 @@ libgeneral_la_SOURCES = \
NdbSqlUtil.cpp new.cpp \
uucode.c random.c version.c \
strdup.c \
- ConfigValues.cpp ndb_init.c basestring_vsnprintf.c
+ ConfigValues.cpp ndb_init.c basestring_vsnprintf.c \
+ Bitmask.cpp
+
+EXTRA_PROGRAMS = testBitmask
+testBitmask_SOURCES = testBitmask.cpp
+testBitmask_LDFLAGS = @ndb_bin_am_ldflags@ \
+ $(top_builddir)/ndb/src/libndbclient.la \
+ $(top_builddir)/dbug/libdbug.a \
+ $(top_builddir)/mysys/libmysys.a \
+ $(top_builddir)/strings/libmystrings.a
+
+testBitmask.cpp : Bitmask.cpp
+ rm -f testBitmask.cpp
+ @LN_CP_F@ Bitmask.cpp testBitmask.cpp
+
+testBitmask.o: $(testBitmask_SOURCES)
+ $(CXXCOMPILE) -c $(INCLUDES) -D__TEST_BITMASK__ $<
+
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_util.mk.am
diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp
index c4114ad5ffa..09e150dbacf 100644
--- a/ndb/src/common/util/NdbSqlUtil.cpp
+++ b/ndb/src/common/util/NdbSqlUtil.cpp
@@ -15,182 +15,169 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <NdbSqlUtil.hpp>
+#include <NdbOut.hpp>
+#include <my_sys.h>
-int
-NdbSqlUtil::char_compare(const char* s1, unsigned n1,
- const char* s2, unsigned n2, bool padded)
-{
- int c1 = 0;
- int c2 = 0;
- unsigned i = 0;
- while (i < n1 || i < n2) {
- c1 = i < n1 ? s1[i] : padded ? 0x20 : 0;
- c2 = i < n2 ? s2[i] : padded ? 0x20 : 0;
- if (c1 != c2)
- break;
- i++;
- }
- return c1 - c2;
-}
-
-bool
-NdbSqlUtil::char_like(const char* s1, unsigned n1,
- const char* s2, unsigned n2, bool padded)
-{
- int c1 = 0;
- int c2 = 0;
- unsigned i1 = 0;
- unsigned i2 = 0;
- while (i1 < n1 || i2 < n2) {
- c1 = i1 < n1 ? s1[i1] : padded ? 0x20 : 0;
- c2 = i2 < n2 ? s2[i2] : padded ? 0x20 : 0;
- if (c2 == '%') {
- while (i2 + 1 < n2 && s2[i2 + 1] == '%') {
- i2++;
- }
- unsigned m = 0;
- while (m <= n1 - i1) {
- if (char_like(s1 + i1 + m, n1 -i1 - m,
- s2 + i2 + 1, n2 - i2 - 1, padded))
- return true;
- m++;
- }
- return false;
- }
- if (c2 == '_') {
- if (c1 == 0)
- return false;
- } else {
- if (c1 != c2)
- return false;
- }
- i1++;
- i2++;
- }
- return i1 == n2 && i2 == n2;
-}
-
-/**
- * Data types.
+/*
+ * Data types. The entries must be in the numerical order.
*/
const NdbSqlUtil::Type
NdbSqlUtil::m_typeList[] = {
{ // 0
Type::Undefined,
+ NULL,
NULL
},
{ // 1
Type::Tinyint,
- cmpTinyint
+ cmpTinyint,
+ NULL
},
{ // 2
Type::Tinyunsigned,
- cmpTinyunsigned
+ cmpTinyunsigned,
+ NULL
},
{ // 3
Type::Smallint,
- cmpSmallint
+ cmpSmallint,
+ NULL
},
{ // 4
Type::Smallunsigned,
- cmpSmallunsigned
+ cmpSmallunsigned,
+ NULL
},
{ // 5
Type::Mediumint,
- cmpMediumint
+ cmpMediumint,
+ NULL
},
{ // 6
Type::Mediumunsigned,
- cmpMediumunsigned
+ cmpMediumunsigned,
+ NULL
},
{ // 7
Type::Int,
- cmpInt
+ cmpInt,
+ NULL
},
{ // 8
Type::Unsigned,
- cmpUnsigned
+ cmpUnsigned,
+ NULL
},
{ // 9
Type::Bigint,
- cmpBigint
+ cmpBigint,
+ NULL
},
{ // 10
Type::Bigunsigned,
- cmpBigunsigned
+ cmpBigunsigned,
+ NULL
},
{ // 11
Type::Float,
- cmpFloat
+ cmpFloat,
+ NULL
},
{ // 12
Type::Double,
- cmpDouble
+ cmpDouble,
+ NULL
},
{ // 13
Type::Olddecimal,
- cmpOlddecimal
+ cmpOlddecimal,
+ NULL
},
{ // 14
Type::Char,
- cmpChar
+ cmpChar,
+ likeChar
},
{ // 15
Type::Varchar,
- cmpVarchar
+ cmpVarchar,
+ likeVarchar
},
{ // 16
Type::Binary,
- cmpBinary
+ cmpBinary,
+ likeBinary
},
{ // 17
Type::Varbinary,
- cmpVarbinary
+ cmpVarbinary,
+ likeVarbinary
},
{ // 18
Type::Datetime,
- cmpDatetime
+ cmpDatetime,
+ NULL
},
{ // 19
Type::Date,
- cmpDate
+ cmpDate,
+ NULL
},
{ // 20
Type::Blob,
- cmpBlob
+ NULL,
+ NULL
},
{ // 21
Type::Text,
- cmpText
+ NULL,
+ NULL
},
{ // 22
- Type::Undefined, // 5.0 Bit
+ Type::Bit,
+ NULL,
NULL
},
{ // 23
- Type::Undefined, // 5.0 Longvarchar
- NULL
+ Type::Longvarchar,
+ cmpLongvarchar,
+ likeLongvarchar
},
{ // 24
- Type::Undefined, // 5.0 Longvarbinary
- NULL
+ Type::Longvarbinary,
+ cmpLongvarbinary,
+ likeLongvarbinary
},
{ // 25
Type::Time,
- cmpTime
+ cmpTime,
+ NULL
},
{ // 26
Type::Year,
- cmpYear
+ cmpYear,
+ NULL
},
{ // 27
Type::Timestamp,
- cmpTimestamp
+ cmpTimestamp,
+ NULL
},
{ // 28
Type::Olddecimalunsigned,
- cmpOlddecimalunsigned
+ cmpOlddecimalunsigned,
+ NULL
+ },
+ { // 29
+ Type::Decimal,
+ cmpDecimal,
+ NULL
+ },
+ { // 30
+ Type::Decimalunsigned,
+ cmpDecimalunsigned,
+ NULL
}
};
@@ -209,10 +196,12 @@ NdbSqlUtil::getTypeBinary(Uint32 typeId)
{
switch (typeId) {
case Type::Char:
- typeId = Type::Binary;
- break;
case Type::Varchar:
- typeId = Type::Varbinary;
+ case Type::Binary:
+ case Type::Varbinary:
+ case Type::Longvarchar:
+ case Type::Longvarbinary:
+ typeId = Type::Binary;
break;
case Type::Text:
typeId = Type::Blob;
@@ -223,194 +212,211 @@ NdbSqlUtil::getTypeBinary(Uint32 typeId)
return getType(typeId);
}
-// compare
+/*
+ * Comparison functions.
+ */
int
-NdbSqlUtil::cmpTinyint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTinyint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; Int8 v; } u1, u2;
- u1.p[0] = p1[0];
- u2.p[0] = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(Int8)) {
+ Int8 v1, v2;
+ memcpy(&v1, p1, sizeof(Int8));
+ memcpy(&v2, p2, sizeof(Int8));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpTinyunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTinyunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; Uint8 v; } u1, u2;
- u1.p[0] = p1[0];
- u2.p[0] = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(Uint8)) {
+ Uint8 v1, v2;
+ memcpy(&v1, p1, sizeof(Uint8));
+ memcpy(&v2, p2, sizeof(Uint8));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpSmallint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpSmallint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; Int16 v; } u1, u2;
- u1.p[0] = p1[0];
- u2.p[0] = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(Int16)) {
+ Int16 v1, v2;
+ memcpy(&v1, p1, sizeof(Int16));
+ memcpy(&v2, p2, sizeof(Int16));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpSmallunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpSmallunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; Uint16 v; } u1, u2;
- u1.p[0] = p1[0];
- u2.p[0] = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(Uint16)) {
+ Uint16 v1, v2;
+ memcpy(&v1, p1, sizeof(Uint16));
+ memcpy(&v2, p2, sizeof(Uint16));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpMediumint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpMediumint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- Int32 v1 = sint3korr(u1.v);
- Int32 v2 = sint3korr(u2.v);
- if (v1 < v2)
- return -1;
- if (v1 > v2)
- return +1;
- return 0;
+ if (n2 >= 3) {
+ Int32 v1, v2;
+ v1 = sint3korr((const uchar*)p1);
+ v2 = sint3korr((const uchar*)p2);
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpMediumunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpMediumunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- Uint32 v1 = uint3korr(u1.v);
- Uint32 v2 = uint3korr(u2.v);
- if (v1 < v2)
- return -1;
- if (v1 > v2)
- return +1;
- return 0;
+ if (n2 >= 3) {
+ Uint32 v1, v2;
+ v1 = uint3korr((const uchar*)p1);
+ v2 = uint3korr((const uchar*)p2);
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpInt(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpInt(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; Int32 v; } u1, u2;
- u1.p[0] = p1[0];
- u2.p[0] = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(Int32)) {
+ Int32 v1, v2;
+ memcpy(&v1, p1, sizeof(Int32));
+ memcpy(&v2, p2, sizeof(Int32));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpUnsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpUnsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; Uint32 v; } u1, u2;
- u1.v = p1[0];
- u2.v = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(Uint32)) {
+ Uint32 v1, v2;
+ memcpy(&v1, p1, sizeof(Uint32));
+ memcpy(&v2, p2, sizeof(Uint32));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpBigint(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBigint(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- if (size >= 2) {
- union { Uint32 p[2]; Int64 v; } u1, u2;
- u1.p[0] = p1[0];
- u1.p[1] = p1[1];
- u2.p[0] = p2[0];
- u2.p[1] = p2[1];
- if (u1.v < u2.v)
+ if (n2 >= sizeof(Int64)) {
+ Int64 v1, v2;
+ memcpy(&v1, p1, sizeof(Int64));
+ memcpy(&v2, p2, sizeof(Int64));
+ if (v1 < v2)
return -1;
- if (u1.v > u2.v)
+ if (v1 > v2)
return +1;
return 0;
}
+ assert(! full);
return CmpUnknown;
}
int
-NdbSqlUtil::cmpBigunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBigunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- if (size >= 2) {
- union { Uint32 p[2]; Uint64 v; } u1, u2;
- u1.p[0] = p1[0];
- u1.p[1] = p1[1];
- u2.p[0] = p2[0];
- u2.p[1] = p2[1];
- if (u1.v < u2.v)
+ if (n2 >= sizeof(Uint64)) {
+ Uint64 v1, v2;
+ memcpy(&v1, p1, sizeof(Uint64));
+ memcpy(&v2, p2, sizeof(Uint64));
+ if (v1 < v2)
return -1;
- if (u1.v > u2.v)
+ if (v1 > v2)
return +1;
return 0;
}
+ assert(! full);
return CmpUnknown;
}
int
-NdbSqlUtil::cmpFloat(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpFloat(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; float v; } u1, u2;
- u1.p[0] = p1[0];
- u2.p[0] = p2[0];
- // no format check
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(float)) {
+ float v1, v2;
+ memcpy(&v1, p1, sizeof(float));
+ memcpy(&v2, p2, sizeof(float));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpDouble(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDouble(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- if (size >= 2) {
- union { Uint32 p[2]; double v; } u1, u2;
- u1.p[0] = p1[0];
- u1.p[1] = p1[1];
- u2.p[0] = p2[0];
- u2.p[1] = p2[1];
- // no format check
- if (u1.v < u2.v)
+ if (n2 >= sizeof(double)) {
+ double v1, v2;
+ memcpy(&v1, p1, sizeof(double));
+ memcpy(&v2, p2, sizeof(double));
+ if (v1 < v2)
return -1;
- if (u1.v > u2.v)
+ if (v1 > v2)
return +1;
return 0;
}
+ assert(! full);
return CmpUnknown;
}
@@ -440,266 +446,428 @@ NdbSqlUtil::cmp_olddecimal(const uchar* s1, const uchar* s2, unsigned n)
}
int
-NdbSqlUtil::cmpOlddecimal(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpOlddecimal(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- if (full == size) {
- union { const Uint32* p; const uchar* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- return cmp_olddecimal(u1.v, u2.v, full << 2);
+ if (full) {
+ assert(n1 == n2);
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ return cmp_olddecimal(v1, v2, n1);
}
return CmpUnknown;
}
int
-NdbSqlUtil::cmpOlddecimalunsigned(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpOlddecimalunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- if (full == size) {
- union { const Uint32* p; const uchar* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- return cmp_olddecimal(u1.v, u2.v, full << 2);
+ if (full) {
+ assert(n1 == n2);
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ return cmp_olddecimal(v1, v2, n1);
}
return CmpUnknown;
}
int
-NdbSqlUtil::cmpChar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDecimal(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ // compare as binary strings
+ unsigned n = (n1 <= n2 ? n1 : n2);
+ int k = memcmp(v1, v2, n);
+ if (k == 0) {
+ k = (full ? n1 : n) - n2;
+ }
+ return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown;
+}
+
+int
+NdbSqlUtil::cmpDecimalunsigned(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ // compare as binary strings
+ unsigned n = (n1 <= n2 ? n1 : n2);
+ int k = memcmp(v1, v2, n);
+ if (k == 0) {
+ k = (full ? n1 : n) - n2;
+ }
+ return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown;
+}
+
+int
+NdbSqlUtil::cmpChar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
// collation does not work on prefix for some charsets
- assert(full == size && size > 0);
- /*
- * Char is blank-padded to length and null-padded to word size.
- */
- union { const Uint32* p; const uchar* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
+ assert(full);
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
// not const in MySQL
CHARSET_INFO* cs = (CHARSET_INFO*)(info);
- // length in bytes including null padding to Uint32
- uint l1 = (full << 2);
- int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
+ // compare with space padding
+ int k = (*cs->coll->strnncollsp)(cs, v1, n1, v2, n2, false);
return k < 0 ? -1 : k > 0 ? +1 : 0;
}
int
-NdbSqlUtil::cmpVarchar(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpVarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- /*
- * Varchar is not allowed to contain a null byte and the value is
- * null-padded. Therefore comparison does not need to use the length.
- *
- * Not used before MySQL 5.0. Format is likely to change. Handle
- * only binary collation for now.
- */
- union { const Uint32* p; const char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- // skip length in first 2 bytes
- int k = strncmp(u1.v + 2, u2.v + 2, (size << 2) - 2);
- return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ const unsigned lb = 1;
+ // collation does not work on prefix for some charsets
+ assert(full && n1 >= lb && n2 >= lb);
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ unsigned m1 = *v1;
+ unsigned m2 = *v2;
+ if (m1 <= n1 - lb && m2 <= n2 - lb) {
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ // compare with space padding
+ int k = (*cs->coll->strnncollsp)(cs, v1 + lb, m1, v2 + lb, m2, false);
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
+ }
+ // treat bad data as NULL
+ if (m1 > n1 - lb && m2 <= n2 - lb)
+ return -1;
+ if (m1 <= n1 - lb && m2 > n2 - lb)
+ return +1;
+ return 0;
}
int
-NdbSqlUtil::cmpBinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
-{
- assert(full >= size && size > 0);
- /*
- * Binary data of full length. Compare bytewise.
- */
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- int k = memcmp(u1.v, u2.v, size << 2);
- return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+NdbSqlUtil::cmpBinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ // compare as binary strings
+ unsigned n = (n1 <= n2 ? n1 : n2);
+ int k = memcmp(v1, v2, n);
+ if (k == 0) {
+ k = (full ? n1 : n) - n2;
+ }
+ return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown;
}
int
-NdbSqlUtil::cmpVarbinary(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
-{
- assert(full >= size && size > 0);
- /*
- * Binary data of variable length padded with nulls. The comparison
- * does not need to use the length.
- *
- * Not used before MySQL 5.0. Format is likely to change.
- */
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- // skip length in first 2 bytes
- int k = memcmp(u1.v + 2, u2.v + 2, (size << 2) - 2);
- return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+NdbSqlUtil::cmpVarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ const unsigned lb = 1;
+ if (n2 >= lb) {
+ assert(n1 >= lb);
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ unsigned m1 = *v1;
+ unsigned m2 = *v2;
+ if (m1 <= n1 - lb && m2 <= n2 - lb) {
+ // compare as binary strings
+ unsigned m = (m1 <= m2 ? m1 : m2);
+ int k = memcmp(v1 + lb, v2 + lb, m);
+ if (k == 0) {
+ k = (full ? m1 : m) - m2;
+ }
+ return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown;
+ }
+ // treat bad data as NULL
+ if (m1 > n1 - lb && m2 <= n2 - lb)
+ return -1;
+ if (m1 <= n1 - lb && m2 > n2 - lb)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpDatetime(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDatetime(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- if (size >= 2) {
- union { Uint32 p[2]; Int64 v; } u1, u2;
- u1.p[0] = p1[0];
- u1.p[1] = p1[1];
- u2.p[0] = p2[0];
- u2.p[1] = p2[1];
- if (u1.v < u2.v)
+ if (n2 >= sizeof(Int64)) {
+ Int64 v1, v2;
+ memcpy(&v1, p1, sizeof(Int64));
+ memcpy(&v2, p2, sizeof(Int64));
+ if (v1 < v2)
return -1;
- if (u1.v > u2.v)
+ if (v1 > v2)
return +1;
return 0;
}
+ assert(! full);
return CmpUnknown;
}
int
-NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpDate(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
#ifdef ndb_date_is_4_byte_native_int
- assert(full >= size && size > 0);
- union { Uint32 p[2]; Int32 v; } u1, u2;
- u1.p[0] = p1[0];
- u2.p[0] = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ if (n2 >= sizeof(Int32)) {
+ Int32 v1, v2;
+ memcpy(&v1, p1, sizeof(Int32));
+ memcpy(&v2, p2, sizeof(Int32));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
#else
- assert(full >= size && size > 0);
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
#ifdef ndb_date_sol9x86_cc_xO3_madness
- // from Field_newdate::val_int
- Uint64 j1 = uint3korr(u1.v);
- Uint64 j2 = uint3korr(u2.v);
- j1 = (j1 % 32L)+(j1 / 32L % 16L)*100L + (j1/(16L*32L))*10000L;
- j2 = (j2 % 32L)+(j2 / 32L % 16L)*100L + (j2/(16L*32L))*10000L;
- if (j1 < j2)
- return -1;
- if (j1 > j2)
- return +1;
- return 0;
+ if (n2 >= 3) {
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ // from Field_newdate::val_int
+ Uint64 j1 = uint3korr(v1);
+ Uint64 j2 = uint3korr(v2);
+ j1 = (j1 % 32L)+(j1 / 32L % 16L)*100L + (j1/(16L*32L))*10000L;
+ j2 = (j2 % 32L)+(j2 / 32L % 16L)*100L + (j2/(16L*32L))*10000L;
+ if (j1 < j2)
+ return -1;
+ if (j1 > j2)
+ return +1;
+ return 0;
+ }
#else
- uint j1 = uint3korr(u1.v);
- uint j2 = uint3korr(u2.v);
- uint d1 = (j1 & 31);
- uint d2 = (j2 & 31);
- j1 = (j1 >> 5);
- j2 = (j2 >> 5);
- uint m1 = (j1 & 15);
- uint m2 = (j2 & 15);
- j1 = (j1 >> 4);
- j2 = (j2 >> 4);
- uint y1 = j1;
- uint y2 = j2;
- if (y1 < y2)
- return -1;
- if (y1 > y2)
- return +1;
- if (m1 < m2)
- return -1;
- if (m1 > m2)
- return +1;
- if (d1 < d2)
- return -1;
- if (d1 > d2)
- return +1;
- return 0;
+ if (n2 >= 3) {
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ uint j1 = uint3korr(v1);
+ uint j2 = uint3korr(v2);
+ uint d1 = (j1 & 31);
+ uint d2 = (j2 & 31);
+ j1 = (j1 >> 5);
+ j2 = (j2 >> 5);
+ uint m1 = (j1 & 15);
+ uint m2 = (j2 & 15);
+ j1 = (j1 >> 4);
+ j2 = (j2 >> 4);
+ uint y1 = j1;
+ uint y2 = j2;
+ if (y1 < y2)
+ return -1;
+ if (y1 > y2)
+ return +1;
+ if (m1 < m2)
+ return -1;
+ if (m1 > m2)
+ return +1;
+ if (d1 < d2)
+ return -1;
+ if (d1 > d2)
+ return +1;
+ return 0;
+ }
#endif
#endif
+ assert(! full);
+ return CmpUnknown;
}
+// not supported
int
-NdbSqlUtil::cmpBlob(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpBlob(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- /*
- * Blob comparison is on the inline bytes (null padded).
- */
- const unsigned head = NDB_BLOB_HEAD_SIZE;
- // skip blob head
- if (size >= head + 1) {
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1 + head;
- u2.p = p2 + head;
- int k = memcmp(u1.v, u2.v, (size - head) << 2);
- return k < 0 ? -1 : k > 0 ? +1 : full == size ? 0 : CmpUnknown;
+ assert(false);
+ return 0;
+}
+
+// not supported
+int
+NdbSqlUtil::cmpText(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ assert(false);
+ return 0;
+}
+
+int
+NdbSqlUtil::cmpTime(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ if (n2 >= 3) {
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ // from Field_time::val_int
+ Int32 j1 = sint3korr(v1);
+ Int32 j2 = sint3korr(v2);
+ if (j1 < j2)
+ return -1;
+ if (j1 > j2)
+ return +1;
+ return 0;
}
+ assert(! full);
return CmpUnknown;
}
+// not yet
+int
+NdbSqlUtil::cmpBit(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ assert(false);
+ return 0;
+}
+
int
-NdbSqlUtil::cmpText(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpLongvarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
+ const unsigned lb = 2;
// collation does not work on prefix for some charsets
- assert(full == size && size > 0);
- /*
- * Text comparison is on the inline bytes (blank padded). Currently
- * not supported for multi-byte charsets.
- */
- const unsigned head = NDB_BLOB_HEAD_SIZE;
- // skip blob head
- if (size >= head + 1) {
- union { const Uint32* p; const uchar* v; } u1, u2;
- u1.p = p1 + head;
- u2.p = p2 + head;
- // not const in MySQL
+ assert(full && n1 >= lb && n2 >= lb);
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ unsigned m1 = uint2korr(v1);
+ unsigned m2 = uint2korr(v2);
+ if (m1 <= n1 - lb && m2 <= n2 - lb) {
CHARSET_INFO* cs = (CHARSET_INFO*)(info);
- // length in bytes including null padding to Uint32
- uint l1 = (full << 2);
- int k = (*cs->coll->strnncollsp)(cs, u1.v, l1, u2.v, l1);
+ // compare with space padding
+ int k = (*cs->coll->strnncollsp)(cs, v1 + lb, m1, v2 + lb, m2, false);
return k < 0 ? -1 : k > 0 ? +1 : 0;
}
+ // treat bad data as NULL
+ if (m1 > n1 - lb && m2 <= n2 - lb)
+ return -1;
+ if (m1 <= n1 - lb && m2 > n2 - lb)
+ return +1;
+ return 0;
+}
+
+int
+NdbSqlUtil::cmpLongvarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
+{
+ const unsigned lb = 2;
+ if (n2 >= lb) {
+ assert(n1 >= lb);
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ unsigned m1 = uint2korr(v1);
+ unsigned m2 = uint2korr(v2);
+ if (m1 <= n1 - lb && m2 <= n2 - lb) {
+ // compare as binary strings
+ unsigned m = (m1 <= m2 ? m1 : m2);
+ int k = memcmp(v1 + lb, v2 + lb, m);
+ if (k == 0) {
+ k = (full ? m1 : m) - m2;
+ }
+ return k < 0 ? -1 : k > 0 ? +1 : full ? 0 : CmpUnknown;
+ }
+ // treat bad data as NULL
+ if (m1 > n1 - lb && m2 <= n2 - lb)
+ return -1;
+ if (m1 <= n1 - lb && m2 > n2 - lb)
+ return +1;
+ return 0;
+ }
+ assert(! full);
return CmpUnknown;
}
int
-NdbSqlUtil::cmpTime(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpYear(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- // from Field_time::val_int
- Int32 j1 = sint3korr(u1.v);
- Int32 j2 = sint3korr(u2.v);
- if (j1 < j2)
- return -1;
- if (j1 > j2)
- return +1;
- return 0;
+ if (n2 >= sizeof(Uint8)) {
+ Uint8 v1, v2;
+ memcpy(&v1, p1, sizeof(Uint8));
+ memcpy(&v2, p2, sizeof(Uint8));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
int
-NdbSqlUtil::cmpYear(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::cmpTimestamp(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2, bool full)
{
- assert(full >= size && size > 0);
- union { const Uint32* p; const unsigned char* v; } u1, u2;
- u1.p = p1;
- u2.p = p2;
- if (u1.v[0] < u2.v[0])
- return -1;
- if (u1.v[0] > u2.v[0])
- return +1;
- return 0;
+ if (n2 >= sizeof(Uint32)) {
+ Uint32 v1, v2;
+ memcpy(&v1, p1, sizeof(Uint32));
+ memcpy(&v2, p2, sizeof(Uint32));
+ if (v1 < v2)
+ return -1;
+ if (v1 > v2)
+ return +1;
+ return 0;
+ }
+ assert(! full);
+ return CmpUnknown;
}
+// like
+
+static const int ndb_wild_prefix = '\\';
+static const int ndb_wild_one = '_';
+static const int ndb_wild_many = '%';
+
int
-NdbSqlUtil::cmpTimestamp(const void* info, const Uint32* p1, const Uint32* p2, Uint32 full, Uint32 size)
+NdbSqlUtil::likeChar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2)
{
- assert(full >= size && size > 0);
- union { Uint32 p[1]; Uint32 v; } u1, u2;
- u1.v = p1[0];
- u2.v = p2[0];
- if (u1.v < u2.v)
- return -1;
- if (u1.v > u2.v)
- return +1;
- return 0;
+ const char* v1 = (const char*)p1;
+ const char* v2 = (const char*)p2;
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ int k = (cs->coll->wildcmp)(cs, v1, v1 + n1, v2, v2 + n2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many);
+ return k == 0 ? 0 : +1;
+}
+
+int
+NdbSqlUtil::likeBinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2)
+{
+ assert(info == 0);
+ return likeChar(&my_charset_bin, p1, n1, p2, n2);
+}
+
+int
+NdbSqlUtil::likeVarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2)
+{
+ const unsigned lb = 1;
+ if (n1 >= lb && n2 >= lb) {
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ unsigned m1 = *v1;
+ unsigned m2 = *v2;
+ if (lb + m1 <= n1 && lb + m2 <= n2) {
+ const char* w1 = (const char*)v1 + lb;
+ const char* w2 = (const char*)v2 + lb;
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ int k = (cs->coll->wildcmp)(cs, w1, w1 + m1, w2, w2 + m2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many);
+ return k == 0 ? 0 : +1;
+ }
+ }
+ return -1;
+}
+
+int
+NdbSqlUtil::likeVarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2)
+{
+ assert(info == 0);
+ return likeVarchar(&my_charset_bin, p1, n1, p2, n2);
+}
+
+int
+NdbSqlUtil::likeLongvarchar(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2)
+{
+ const unsigned lb = 2;
+ if (n1 >= lb && n2 >= lb) {
+ const uchar* v1 = (const uchar*)p1;
+ const uchar* v2 = (const uchar*)p2;
+ unsigned m1 = uint2korr(v1);
+ unsigned m2 = uint2korr(v2);
+ if (lb + m1 <= n1 && lb + m2 <= n2) {
+ const char* w1 = (const char*)v1 + lb;
+ const char* w2 = (const char*)v2 + lb;
+ CHARSET_INFO* cs = (CHARSET_INFO*)(info);
+ int k = (cs->coll->wildcmp)(cs, w1, w1 + m1, w2, w2 + m2, ndb_wild_prefix, ndb_wild_one, ndb_wild_many);
+ return k == 0 ? 0 : +1;
+ }
+ }
+ return -1;
+}
+
+int
+NdbSqlUtil::likeLongvarbinary(const void* info, const void* p1, unsigned n1, const void* p2, unsigned n2)
+{
+ assert(info == 0);
+ return likeLongvarchar(&my_charset_bin, p1, n1, p2, n2);
}
// check charset
@@ -709,9 +877,9 @@ NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
{
const Type& type = getType(typeId);
switch (type.m_typeId) {
- case Type::Undefined:
- break;
case Type::Char:
+ case Type::Varchar:
+ case Type::Longvarchar:
{
const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
return
@@ -719,13 +887,13 @@ NdbSqlUtil::usable_in_pk(Uint32 typeId, const void* info)
cs->cset != 0 &&
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
- cs->strxfrm_multiply <= 1; // current limitation
+ cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY;
}
break;
- case Type::Varchar:
- return true; // Varchar not used via MySQL
+ case Type::Undefined:
case Type::Blob:
case Type::Text:
+ case Type::Bit:
break;
default:
return true;
@@ -743,10 +911,12 @@ bool
NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
{
const Type& type = getType(typeId);
+ if (type.m_cmp == NULL)
+ return false;
switch (type.m_typeId) {
- case Type::Undefined:
- break;
case Type::Char:
+ case Type::Varchar:
+ case Type::Longvarchar:
{
const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
return
@@ -755,23 +925,13 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
cs->coll != 0 &&
cs->coll->strnxfrm != 0 &&
cs->coll->strnncollsp != 0 &&
- cs->strxfrm_multiply <= 1; // current limitation
+ cs->strxfrm_multiply <= MAX_XFRM_MULTIPLY;
}
break;
- case Type::Varchar:
- return true; // Varchar not used via MySQL
+ case Type::Undefined:
+ case Type::Blob:
case Type::Text:
- {
- const CHARSET_INFO *cs = (const CHARSET_INFO*)info;
- return
- cs != 0 &&
- cs->mbmaxlen == 1 && // extra limitation
- cs->cset != 0 &&
- cs->coll != 0 &&
- cs->coll->strnxfrm != 0 &&
- cs->coll->strnncollsp != 0 &&
- cs->strxfrm_multiply <= 1; // current limitation
- }
+ case Type::Bit: // can be fixed
break;
default:
return true;
@@ -779,68 +939,69 @@ NdbSqlUtil::usable_in_ordered_index(Uint32 typeId, const void* info)
return false;
}
-#ifdef NDB_SQL_UTIL_TEST
-
-#include <NdbTick.h>
-#include <NdbOut.hpp>
+// utilities
-struct Testcase {
- int op; // 1=compare 2=like
- int res;
- const char* s1;
- const char* s2;
- int pad;
-};
-const Testcase testcase[] = {
- { 2, 1, "abc", "abc", 0 },
- { 2, 1, "abc", "abc%", 0 },
- { 2, 1, "abcdef", "abc%", 0 },
- { 2, 1, "abcdefabcdefabcdef", "abc%", 0 },
- { 2, 1, "abcdefabcdefabcdef", "abc%f", 0 },
- { 2, 0, "abcdefabcdefabcdef", "abc%z", 0 },
- { 2, 1, "abcdefabcdefabcdef", "%f", 0 },
- { 2, 1, "abcdef", "a%b%c%d%e%f", 0 },
- { 0, 0, 0, 0 }
-};
-
-int
-main(int argc, char** argv)
-{
- ndb_init(); // for charsets
- unsigned count = argc > 1 ? atoi(argv[1]) : 1000000;
- ndbout_c("count = %u", count);
- assert(count != 0);
- for (const Testcase* t = testcase; t->s1 != 0; t++) {
- ndbout_c("%d = '%s' %s '%s' pad=%d",
- t->res, t->s1, t->op == 1 ? "comp" : "like", t->s2);
- NDB_TICKS x1 = NdbTick_CurrentMillisecond();
- unsigned n1 = strlen(t->s1);
- unsigned n2 = strlen(t->s2);
- for (unsigned i = 0; i < count; i++) {
- if (t->op == 1) {
- int res = NdbSqlUtil::char_compare(t->s1, n1, t->s2, n2, t->pad);
- assert(res == t->res);
- continue;
- }
- if (t->op == 2) {
- int res = NdbSqlUtil::char_like(t->s1, n1, t->s2, n2, t->pad);
- assert(res == t->res);
- continue;
- }
- assert(false);
+bool
+NdbSqlUtil::get_var_length(Uint32 typeId, const void* p, unsigned attrlen, Uint32& lb, Uint32& len)
+{
+ const unsigned char* const src = (const unsigned char*)p;
+ switch (typeId) {
+ case NdbSqlUtil::Type::Varchar:
+ case NdbSqlUtil::Type::Varbinary:
+ lb = 1;
+ if (attrlen >= lb) {
+ len = src[0];
+ if (attrlen >= lb + len)
+ return true;
}
- NDB_TICKS x2 = NdbTick_CurrentMillisecond();
- if (x2 < x1)
- x2 = x1;
- double usec = 1000000.0 * double(x2 - x1) / double(count);
- ndbout_c("time %.0f usec per call", usec);
- }
- // quick check
- for (unsigned i = 0; i < sizeof(m_typeList) / sizeof(m_typeList[0]); i++) {
- const NdbSqlUtil::Type& t = m_typeList[i];
- assert(t.m_typeId == i);
+ break;
+ case NdbSqlUtil::Type::Longvarchar:
+ case NdbSqlUtil::Type::Longvarbinary:
+ lb = 2;
+ if (attrlen >= lb) {
+ len = src[0] + (src[1] << 8);
+ if (attrlen >= lb + len)
+ return true;
+ }
+ break;
+ default:
+ lb = 0;
+ len = attrlen;
+ return true;
+ break;
}
- return 0;
+ return false;
}
+// workaround
+
+int
+NdbSqlUtil::strnxfrm_bug7284(CHARSET_INFO* cs, unsigned char* dst, unsigned dstLen, const unsigned char*src, unsigned srcLen)
+{
+ unsigned char nsp[20]; // native space char
+ unsigned char xsp[20]; // strxfrm-ed space char
+#ifdef VM_TRACE
+ memset(nsp, 0x1f, sizeof(nsp));
+ memset(xsp, 0x1f, sizeof(xsp));
#endif
+ // convert from unicode codepoint for space
+ int n1 = (*cs->cset->wc_mb)(cs, (my_wc_t)0x20, nsp, nsp + sizeof(nsp));
+ if (n1 <= 0)
+ return -1;
+ // strxfrm to binary
+ int n2 = (*cs->coll->strnxfrm)(cs, xsp, sizeof(xsp), nsp, n1);
+ if (n2 <= 0)
+ return -1;
+ // XXX bug workaround - strnxfrm may not write full string
+ memset(dst, 0x0, dstLen);
+ // strxfrm argument string - returns no error indication
+ int n3 = (*cs->coll->strnxfrm)(cs, dst, dstLen, src, srcLen);
+ // pad with strxfrm-ed space chars
+ int n4 = n3;
+ while (n4 < (int)dstLen) {
+ dst[n4] = xsp[(n4 - n3) % n2];
+ n4++;
+ }
+ // no check for partial last
+ return dstLen;
+}
diff --git a/ndb/src/common/util/SimpleProperties.cpp b/ndb/src/common/util/SimpleProperties.cpp
index 00c440fcb4e..c25aaea491a 100644
--- a/ndb/src/common/util/SimpleProperties.cpp
+++ b/ndb/src/common/util/SimpleProperties.cpp
@@ -37,6 +37,28 @@ SimpleProperties::Writer::add(Uint16 key, Uint32 value){
}
bool
+SimpleProperties::Writer::add(const char * value, int len){
+ const Uint32 valLen = (len + 3) / 4;
+
+ if ((len % 4) == 0)
+ return putWords((Uint32*)value, valLen);
+
+ const Uint32 putLen= valLen - 1;
+ if (!putWords((Uint32*)value, putLen))
+ return false;
+
+ // Special handling of last bytes
+ union {
+ Uint32 lastWord;
+ char lastBytes[4];
+ };
+ memcpy(lastBytes,
+ value + putLen*4,
+ len - putLen*4);
+ return putWord(lastWord);
+}
+
+bool
SimpleProperties::Writer::add(Uint16 key, const char * value){
Uint32 head = StringValue;
head <<= 16;
@@ -46,9 +68,9 @@ SimpleProperties::Writer::add(Uint16 key, const char * value){
Uint32 strLen = strlen(value) + 1; // Including NULL-byte
if(!putWord(htonl(strLen)))
return false;
-
- const Uint32 valLen = (strLen + 3) / 4;
- return putWords((Uint32*)value, valLen);
+
+ return add(value, (int)strLen);
+
}
bool
@@ -60,9 +82,8 @@ SimpleProperties::Writer::add(Uint16 key, const void* value, int len){
return false;
if(!putWord(htonl(len)))
return false;
-
- const Uint32 valLen = (len + 3) / 4;
- return putWords((Uint32*)value, valLen);
+
+ return add((const char*)value, len);
}
SimpleProperties::Reader::Reader(){
@@ -392,6 +413,7 @@ UtilBufferWriter::putWords(const Uint32 * src, Uint32 len){
return (m_buf.append(src, 4 * len) == 0);
}
+
Uint32
UtilBufferWriter::getWordsUsed() const { return m_buf.length() / 4;}
diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp
index db5c03f925a..15dca2d96b1 100644
--- a/ndb/src/common/util/SocketServer.cpp
+++ b/ndb/src/common/util/SocketServer.cpp
@@ -84,15 +84,15 @@ SocketServer::tryBind(unsigned short port, const char * intface) {
bool
SocketServer::setup(SocketServer::Service * service,
- unsigned short port,
+ unsigned short * port,
const char * intface){
DBUG_ENTER("SocketServer::setup");
- DBUG_PRINT("enter",("interface=%s, port=%d", intface, port));
+ DBUG_PRINT("enter",("interface=%s, port=%u", intface, *port));
struct sockaddr_in servaddr;
memset(&servaddr, 0, sizeof(servaddr));
servaddr.sin_family = AF_INET;
servaddr.sin_addr.s_addr = htonl(INADDR_ANY);
- servaddr.sin_port = htons(port);
+ servaddr.sin_port = htons(*port);
if(intface != 0){
if(Ndb_getInAddr(&servaddr.sin_addr, intface))
@@ -123,7 +123,17 @@ SocketServer::setup(SocketServer::Service * service,
NDB_CLOSE_SOCKET(sock);
DBUG_RETURN(false);
}
-
+
+ /* Get the port we bound to */
+ SOCKET_SIZE_TYPE sock_len = sizeof(servaddr);
+ if(getsockname(sock,(struct sockaddr*)&servaddr,&sock_len)<0) {
+ ndbout_c("An error occurred while trying to find out what"
+ " port we bound to. Error: %s",strerror(errno));
+ NDB_CLOSE_SOCKET(sock);
+ DBUG_RETURN(false);
+ }
+
+ DBUG_PRINT("info",("bound to %u",ntohs(servaddr.sin_port)));
if (listen(sock, m_maxSessions) == -1){
DBUG_PRINT("error",("listen() - %d - %s",
errno, strerror(errno)));
@@ -135,6 +145,9 @@ SocketServer::setup(SocketServer::Service * service,
i.m_socket = sock;
i.m_service = service;
m_services.push_back(i);
+
+ *port = ntohs(servaddr.sin_port);
+
DBUG_RETURN(true);
}
@@ -314,11 +327,18 @@ sessionThread_C(void* _sc){
return 0;
}
- if(!si->m_stop){
- si->m_stopped = false;
- si->runSession();
- } else {
- NDB_CLOSE_SOCKET(si->m_socket);
+ /**
+ * may have m_stopped set if we're transforming a mgm
+ * connection into a transporter connection.
+ */
+ if(!si->m_stopped)
+ {
+ if(!si->m_stop){
+ si->m_stopped = false;
+ si->runSession();
+ } else {
+ NDB_CLOSE_SOCKET(si->m_socket);
+ }
}
si->m_stopped = true;
diff --git a/ndb/src/common/util/md5_hash.cpp b/ndb/src/common/util/md5_hash.cpp
index 068843183ac..d4eedbc40fb 100644
--- a/ndb/src/common/util/md5_hash.cpp
+++ b/ndb/src/common/util/md5_hash.cpp
@@ -75,7 +75,7 @@ void byteReverse(unsigned char *buf, unsigned longs)
* reflect the addition of 16 longwords of new data. MD5Update blocks
* the data and converts bytes into longwords for this routine.
*/
-void MD5Transform(Uint32 buf[4], Uint32 const in[16])
+static void MD5Transform(Uint32 buf[4], Uint32 const in[16])
{
register Uint32 a, b, c, d;
@@ -162,13 +162,13 @@ void MD5Transform(Uint32 buf[4], Uint32 const in[16])
* Start MD5 accumulation. Set bit count to 0 and buffer to mysterious
* initialization constants.
*/
-Uint32 md5_hash(const Uint64* keybuf, Uint32 no_of_32_words)
+void md5_hash(Uint32 result[4], const Uint64* keybuf, Uint32 no_of_32_words)
{
-/*
- * This is the external interface of the module
- * It is assumed that keybuf is placed on 8 byte
- * alignment.
- */
+ /**
+ * This is the external interface of the module
+ * It is assumed that keybuf is placed on 8 byte
+ * alignment.
+ */
Uint32 i;
Uint32 buf[4];
Uint64 transform64_buf[8];
@@ -230,6 +230,10 @@ Uint32 md5_hash(const Uint64* keybuf, Uint32 no_of_32_words)
byteReverse((unsigned char *)transform32_buf, 16);
MD5Transform(buf, transform32_buf);
}
- return buf[0];
+
+ result[0] = buf[0];
+ result[1] = buf[1];
+ result[2] = buf[2];
+ result[3] = buf[3];
}
diff --git a/ndb/src/common/util/new.cpp b/ndb/src/common/util/new.cpp
index 901f74bf979..643800f1582 100644
--- a/ndb/src/common/util/new.cpp
+++ b/ndb/src/common/util/new.cpp
@@ -6,7 +6,7 @@ extern "C" {
void (* ndb_new_handler)() = 0;
}
-#ifdef USE_MYSYS_NEW
+#if 0
void *operator new (size_t sz)
{
diff --git a/ndb/src/common/util/version.c b/ndb/src/common/util/version.c
index 51cf8082a62..8076db576c2 100644
--- a/ndb/src/common/util/version.c
+++ b/ndb/src/common/util/version.c
@@ -92,6 +92,8 @@ void ndbSetOwnVersion() {}
#ifndef TEST_VERSION
struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
+ { MAKE_VERSION(5,0,NDB_VERSION_BUILD), MAKE_VERSION(5,0,12), UG_Range},
+ { MAKE_VERSION(5,0,11), MAKE_VERSION(5,0,2), UG_Range},
{ MAKE_VERSION(4,1,NDB_VERSION_BUILD), MAKE_VERSION(4,1,15), UG_Range },
{ MAKE_VERSION(4,1,14), MAKE_VERSION(4,1,10), UG_Range },
{ MAKE_VERSION(4,1,10), MAKE_VERSION(4,1,9), UG_Exact },
@@ -101,6 +103,8 @@ struct NdbUpGradeCompatible ndbCompatibleTable_full[] = {
};
struct NdbUpGradeCompatible ndbCompatibleTable_upgrade[] = {
+ { MAKE_VERSION(5,0,12), MAKE_VERSION(5,0,11), UG_Exact },
+ { MAKE_VERSION(5,0,2), MAKE_VERSION(4,1,8), UG_Exact },
{ MAKE_VERSION(4,1,15), MAKE_VERSION(4,1,14), UG_Exact },
{ MAKE_VERSION(3,5,4), MAKE_VERSION(3,5,3), UG_Exact },
{ 0, 0, UG_Null }
diff --git a/ndb/src/cw/cpcd/APIService.cpp b/ndb/src/cw/cpcd/APIService.cpp
index b009f0c0fc4..e7a2092c15d 100644
--- a/ndb/src/cw/cpcd/APIService.cpp
+++ b/ndb/src/cw/cpcd/APIService.cpp
@@ -136,6 +136,8 @@ ParserRow<CPCDAPISession> commands[] =
CPCD_ARG("id", Int, Mandatory, "Id of process"),
CPCD_CMD("list processes", &CPCDAPISession::listProcesses, ""),
+
+ CPCD_CMD("show version", &CPCDAPISession::showVersion, ""),
CPCD_END()
};
@@ -359,6 +361,7 @@ CPCDAPISession::listProcesses(Parser_t::Context & /* unused */,
m_output->println("stdout: %s", p->m_stdout.c_str());
m_output->println("stderr: %s", p->m_stderr.c_str());
m_output->println("ulimit: %s", p->m_ulimit.c_str());
+ m_output->println("shutdown: %s", p->m_shutdown_options.c_str());
switch(p->m_status){
case STOPPED:
m_output->println("status: stopped");
@@ -384,4 +387,16 @@ CPCDAPISession::listProcesses(Parser_t::Context & /* unused */,
m_cpcd.m_processes.unlock();
}
+void
+CPCDAPISession::showVersion(Parser_t::Context & /* unused */,
+ const class Properties & args){
+ Uint32 id;
+ CPCD::RequestStatus rs;
+
+ m_output->println("show version");
+ m_output->println("compile time: %s %s", __DATE__, __TIME__);
+
+ m_output->println("");
+}
+
template class Vector<ParserRow<CPCDAPISession> const*>;
diff --git a/ndb/src/cw/cpcd/APIService.hpp b/ndb/src/cw/cpcd/APIService.hpp
index ef988785f89..3586d64187e 100644
--- a/ndb/src/cw/cpcd/APIService.hpp
+++ b/ndb/src/cw/cpcd/APIService.hpp
@@ -49,6 +49,7 @@ public:
void stopProcess(Parser_t::Context & ctx, const class Properties & args);
void showProcess(Parser_t::Context & ctx, const class Properties & args);
void listProcesses(Parser_t::Context & ctx, const class Properties & args);
+ void showVersion(Parser_t::Context & ctx, const class Properties & args);
};
class CPCDAPIService : public SocketServer::Service {
diff --git a/ndb/src/cw/cpcd/main.cpp b/ndb/src/cw/cpcd/main.cpp
index ba877095a04..c320f07ef04 100644
--- a/ndb/src/cw/cpcd/main.cpp
+++ b/ndb/src/cw/cpcd/main.cpp
@@ -138,7 +138,8 @@ int main(int argc, char** argv){
SocketServer * ss = new SocketServer();
CPCDAPIService * serv = new CPCDAPIService(cpcd);
- if(!ss->setup(serv, port)){
+ unsigned short real_port= port; // correct type
+ if(!ss->setup(serv, &real_port)){
logger.critical("Cannot setup server: %s", strerror(errno));
sleep(1);
delete ss;
diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt
index 791df915d66..1a72537a77e 100644
--- a/ndb/src/kernel/blocks/ERROR_codes.txt
+++ b/ndb/src/kernel/blocks/ERROR_codes.txt
@@ -4,13 +4,13 @@ Next NDBFS 2000
Next DBACC 3002
Next DBTUP 4013
Next DBLQH 5042
-Next DBDICT 6006
+Next DBDICT 6007
Next DBDIH 7174
Next DBTC 8037
Next CMVMI 9000
Next BACKUP 10022
Next DBUTIL 11002
-Next DBTUX 12007
+Next DBTUX 12008
Next SUMA 13001
TESTING NODE FAILURE, ARBITRATION
@@ -196,6 +196,8 @@ Delay execution of ABORTREQ signal 2 seconds to generate time-out.
8048: Make TC not choose own node for simple/dirty read
5041: Crash is receiving simple read from other TC on different node
+8050: Send TCKEYREF is operation is non local
+
ERROR CODES FOR TESTING TIME-OUT HANDLING IN DBTC
-------------------------------------------------
8040:
@@ -411,6 +413,7 @@ Drop Table/Index:
8034: Fail next index create in TC
8035: Fail next trigger drop in TC
8036: Fail next index drop in TC
+6006: Crash participant in create index
System Restart:
---------------
@@ -442,6 +445,7 @@ Test routing of signals:
Ordered index:
--------------
+12007: Make next alloc node fail with no memory error
Dbdict:
-------
diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp
index 56af24c5cf0..cbffd6bcb6b 100644
--- a/ndb/src/kernel/blocks/backup/Backup.cpp
+++ b/ndb/src/kernel/blocks/backup/Backup.cpp
@@ -955,7 +955,7 @@ Backup::sendBackupRef(BlockReference senderRef, Uint32 flags, Signal *signal,
}
if(errorCode != BackupRef::IAmNotMaster){
- signal->theData[0] = EventReport::BackupFailedToStart;
+ signal->theData[0] = NDB_LE_BackupFailedToStart;
signal->theData[1] = senderRef;
signal->theData[2] = errorCode;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -1214,7 +1214,7 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
BackupConf::SignalLength, JBB);
}
- signal->theData[0] = EventReport::BackupStarted;
+ signal->theData[0] = NDB_LE_BackupStarted;
signal->theData[1] = ptr.p->clientRef;
signal->theData[2] = ptr.p->backupId;
ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3);
@@ -2110,7 +2110,7 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId)
BackupCompleteRep::SignalLength, JBB);
}
- signal->theData[0] = EventReport::BackupCompleted;
+ signal->theData[0] = NDB_LE_BackupCompleted;
signal->theData[1] = ptr.p->clientRef;
signal->theData[2] = ptr.p->backupId;
signal->theData[3] = ptr.p->startGCP;
@@ -2155,7 +2155,7 @@ Backup::masterAbort(Signal* signal, BackupRecordPtr ptr)
sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal,
BackupAbortRep::SignalLength, JBB);
}
- signal->theData[0] = EventReport::BackupAborted;
+ signal->theData[0] = NDB_LE_BackupAborted;
signal->theData[1] = ptr.p->clientRef;
signal->theData[2] = ptr.p->backupId;
signal->theData[3] = ptr.p->errorCode;
@@ -2884,8 +2884,6 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len)
/**
* Initialize table object
*/
- tabPtr.p->frag_mask = RNIL;
-
tabPtr.p->schemaVersion = tmpTab.TableVersion;
tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes;
tabPtr.p->noOfNull = 0;
@@ -2978,7 +2976,6 @@ Backup::execDI_FCOUNTCONF(Signal* signal)
ndbrequire(findTable(ptr, tabPtr, tableId));
ndbrequire(tabPtr.p->fragments.seize(fragCount) != false);
- tabPtr.p->frag_mask = calculate_frag_mask(fragCount);
for(Uint32 i = 0; i<fragCount; i++) {
jam();
FragmentPtr fragPtr;
@@ -3796,15 +3793,6 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr)
* Slave functionallity: Perform logging
*
****************************************************************************/
-Uint32
-Backup::calculate_frag_mask(Uint32 count)
-{
- Uint32 mask = 1;
- while (mask < count) mask <<= 1;
- mask -= 1;
- return mask;
-}
-
void
Backup::execBACKUP_TRIG_REQ(Signal* signal)
{
@@ -3821,14 +3809,6 @@ Backup::execBACKUP_TRIG_REQ(Signal* signal)
jamEntry();
c_triggerPool.getPtr(trigPtr, trigger_id);
c_tablePool.getPtr(tabPtr, trigPtr.p->tab_ptr_i);
- frag_id = frag_id & tabPtr.p->frag_mask;
- /*
- At the moment the fragment identity known by TUP is the
- actual fragment id but with possibly an extra bit set.
- This is due to that ACC splits the fragment. Thus fragment id 5 can
- here be either 5 or 13. Thus masking with 2 ** n - 1 where number of
- fragments <= 2 ** n will always provide a correct fragment id.
- */
tabPtr.p->fragments.getPtr(fragPtr, frag_id);
if (fragPtr.p->node != getOwnNodeId()) {
jam();
diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp
index f3d180b9467..67b53d3eccd 100644
--- a/ndb/src/kernel/blocks/backup/Backup.hpp
+++ b/ndb/src/kernel/blocks/backup/Backup.hpp
@@ -195,7 +195,6 @@ public:
Uint32 tableId;
Uint32 schemaVersion;
- Uint32 frag_mask;
Uint32 tableType;
Uint32 noOfNull;
Uint32 noOfAttributes;
@@ -527,8 +526,6 @@ public:
ArrayPool<Node> c_nodePool;
ArrayPool<TriggerRecord> c_triggerPool;
- Uint32 calculate_frag_mask(Uint32);
-
void checkFile(Signal*, BackupFilePtr);
void checkScan(Signal*, BackupFilePtr);
void fragmentCompleted(Signal*, BackupFilePtr);
diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
index 7659ee1145d..6f6aee6a7f7 100644
--- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
+++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp
@@ -188,7 +188,7 @@ void Cmvmi::execEVENT_REP(Signal* signal)
// to the graphical management interface.
//-----------------------------------------------------------------------
EventReport * const eventReport = (EventReport *)&signal->theData[0];
- EventReport::EventType eventType = eventReport->getEventType();
+ Ndb_logevent_type eventType = eventReport->getEventType();
jamEntry();
@@ -198,7 +198,8 @@ void Cmvmi::execEVENT_REP(Signal* signal)
Uint32 threshold;
LogLevel::EventCategory eventCategory;
Logger::LoggerLevel severity;
- if (EventLoggerBase::event_lookup(eventType,eventCategory,threshold,severity))
+ EventLoggerBase::EventTextFunction textF;
+ if (EventLoggerBase::event_lookup(eventType,eventCategory,threshold,severity,textF))
return;
SubscriberPtr ptr;
@@ -362,7 +363,7 @@ void Cmvmi::execCLOSE_COMREQ(Signal* signal)
//-----------------------------------------------------
// Report that the connection to the node is closed
//-----------------------------------------------------
- signal->theData[0] = EventReport::CommunicationClosed;
+ signal->theData[0] = NDB_LE_CommunicationClosed;
signal->theData[1] = i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -396,7 +397,7 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
//-----------------------------------------------------
// Report that the connection to the node is opened
//-----------------------------------------------------
- signal->theData[0] = EventReport::CommunicationOpened;
+ signal->theData[0] = NDB_LE_CommunicationOpened;
signal->theData[1] = tStartingNode;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
//-----------------------------------------------------
@@ -408,7 +409,7 @@ void Cmvmi::execOPEN_COMREQ(Signal* signal)
globalTransporterRegistry.do_connect(i);
globalTransporterRegistry.setIOState(i, HaltIO);
- signal->theData[0] = EventReport::CommunicationOpened;
+ signal->theData[0] = NDB_LE_CommunicationOpened;
signal->theData[1] = i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}
@@ -433,7 +434,7 @@ void Cmvmi::execENABLE_COMORD(Signal* signal)
//-----------------------------------------------------
// Report that the version of the node
//-----------------------------------------------------
- signal->theData[0] = EventReport::ConnectedApiVersion;
+ signal->theData[0] = NDB_LE_ConnectedApiVersion;
signal->theData[1] = tStartingNode;
signal->theData[2] = getNodeInfo(tStartingNode).m_version;
@@ -475,7 +476,7 @@ void Cmvmi::execDISCONNECT_REP(Signal *signal)
cancelSubscription(hostId);
- signal->theData[0] = EventReport::Disconnected;
+ signal->theData[0] = NDB_LE_Disconnected;
signal->theData[1] = hostId;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}
@@ -525,7 +526,7 @@ void Cmvmi::execCONNECT_REP(Signal *signal){
//------------------------------------------
// Also report this event to the Event handler
//------------------------------------------
- signal->theData[0] = EventReport::Connected;
+ signal->theData[0] = NDB_LE_Connected;
signal->theData[1] = hostId;
signal->header.theLength = 2;
diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
index ea866aafff9..1da6c56b0e3 100644
--- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
+++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp
@@ -22,6 +22,9 @@
#include <pc.hpp>
#include <SimulatedBlock.hpp>
+// primary key is stored in TUP
+#include <Dbtup.hpp>
+
#ifdef DBACC_C
// Debug Macros
#define dbgWord32(ptr, ind, val)
@@ -98,7 +101,6 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
#define ZPOS_PREV_PAGE 11
#define ZNORMAL_PAGE_TYPE 0
#define ZOVERFLOW_PAGE_TYPE 1
-#define ZLONG_PAGE_TYPE 2
#define ZDEFAULT_LIST 3
#define ZWORDS_IN_PAGE 2048
/* --------------------------------------------------------------------------------- */
@@ -132,16 +134,6 @@ ndbout << "Ptr: " << ptr.p->word32 << " \tIndex: " << tmp_string << " \tValue: "
#define ZPAGEZERO_NODETYPE 33
#define ZPAGEZERO_SLACK_CHECK 34
/* --------------------------------------------------------------------------------- */
-/* CONSTANTS FOR THE LONG KEY PAGES */
-/* --------------------------------------------------------------------------------- */
-/* --------------------------------------------------------------------------------- */
-// Maximum number of elements in long key page = (ZWORDS_IN_PAGE - ZHEAD_SIZE) /
-// (MinKeySize + IndexSize) = (2048 - 32) / (8 + 1) = 224. MinKeySize is actually 9
-// because 8 is the largest normal key size.
-#define ZMAX_NO_OF_LONGKEYS_IN_PAGE 225
-#define ZMAX_LONG_KEY_ARRAY_INDEX 3
-#define ZACTIVE_LONG_KEY_LEN 1
-/* --------------------------------------------------------------------------------- */
/* CONSTANTS IN ALPHABETICAL ORDER */
/* --------------------------------------------------------------------------------- */
#define ZADDFRAG 0
@@ -392,49 +384,6 @@ enum State {
// Records
-
-//----------------------------------------------------------------------------------
-// LONGKEY PAGE RECORD
-//
-// A long key page consist of a header part, a key data part and an index part. The
-// page starts with a header of size HEAD_SIZE. As you can see below, not every word
-// in the header is used. After the header comes the data part, where the actual
-// keys are stored. A key is always inserted after the existing keys in the data
-// part. If we have a fragmented data part and a new key doesn't fit after the
-// existing keys we reorganize the keys. The index part starts at the end of the
-// page and grows towards the end of the data part. This means that the limit
-// between the data part and the index part is floating. Each inserted key have a
-// word in the index part that describes size and position of the key in the data
-// part. The free indexes in the index part are single linked.
-//----------------------------------------------------------------------------------
- union LongKeyPage {
- struct {
- Uint32 pageId; // ZPOS_PAGE_ID 0
- Uint32 b;
- // The number of keys in page.
- Uint32 noOfElements; // ZPOS_NO_ELEM_IN_PAGE 2
- Uint32 d;
- Uint32 e;
- // The free area in the data part of page.
- Uint32 freeArea; // ZPOS_FREE_AREA_IN_PAGE 5
- // The index position, which defines the limit between the data and the index part.
- Uint32 highestIndex; // ZPOS_LAST_INDEX 6
- // The position where to insert the actual key in the data part.
- Uint32 insertPos; // ZPOS_INSERT_INDEX 7
- // Position in a page array where the pages are stored in a double linked list.
- // Based on the free area in the page. Values 0 to 3.
- Uint32 pageArrayPos; // ZPOS_ARRAY_POS 8
- // Next free position in the index part.
- Uint32 nextFreeIndex; // ZPOS_NEXT_FREE_INDEX 9
- // Next page in the double linked list.
- Uint32 nextPage; // ZPOS_NEXT_PAGE 10
- // Previous page in the double linked list.
- Uint32 prevPage; // ZPOS_PREV_PAGE 11
- } header;
- // This is kept to keep the logic and to make changes to a minimum.
- Uint32 word32[2048];
- };
-
/* --------------------------------------------------------------------------------- */
/* UNDO HEADER RECORD */
/* --------------------------------------------------------------------------------- */
@@ -444,9 +393,7 @@ enum State {
ZPAGE_INFO = 0,
ZOVER_PAGE_INFO = 1,
ZOP_INFO = 2,
- ZUNDO_INSERT_LONG_KEY = 3,
- ZUNDO_DELETE_LONG_KEY = 4,
- ZNO_UNDORECORD_TYPES = 5
+ ZNO_UNDORECORD_TYPES = 3
};
UintR tableId;
UintR rootFragId;
@@ -660,10 +607,10 @@ struct Fragmentrec {
//-----------------------------------------------------------------------------
// elementLength: Length of element in bucket and overflow pages
-// keyLength: Length of key (== 0 if long key or variable key length)
+// keyLength: Length of key
//-----------------------------------------------------------------------------
Uint8 elementLength;
- Uint8 keyLength;
+ Uint16 keyLength;
//-----------------------------------------------------------------------------
// This flag is used to avoid sending a big number of expand or shrink signals
@@ -689,6 +636,11 @@ struct Fragmentrec {
//-----------------------------------------------------------------------------
Uint8 nodetype;
Uint8 stopQueOp;
+
+//-----------------------------------------------------------------------------
+// flag to avoid accessing table record if no char attributes
+//-----------------------------------------------------------------------------
+ Uint8 hasCharAttr;
};
typedef Ptr<Fragmentrec> FragmentrecPtr;
@@ -771,6 +723,7 @@ struct Operationrec {
State transactionstate;
Uint16 elementContainer;
Uint16 tupkeylen;
+ Uint32 xfrmtupkeylen;
Uint32 userblockref;
Uint32 scanBits;
Uint8 elementIsDisappeared;
@@ -783,7 +736,7 @@ struct Operationrec {
Uint8 dirtyRead;
Uint8 commitDeleteCheckFlag;
Uint8 isAccLockReq;
- Uint32 nextOpList;
+ Uint8 isUndoLogReq;
}; /* p2c: size = 168 bytes */
typedef Ptr<Operationrec> OperationrecPtr;
@@ -914,6 +867,9 @@ public:
Dbacc(const class Configuration &);
virtual ~Dbacc();
+ // pointer to TUP instance in this thread
+ Dbtup* c_tup;
+
private:
BLOCK_DEFINES(Dbacc);
@@ -972,10 +928,8 @@ private:
void initFragGeneral(FragmentrecPtr);
void verifyFragCorrect(FragmentrecPtr regFragPtr);
void sendFSREMOVEREQ(Signal* signal, Uint32 tableId);
- void sendDROP_TABFILECONF(Signal* signal, TabrecPtr tabPtr);
void releaseFragResources(Signal* signal, Uint32 fragIndex);
void releaseRootFragRecord(Signal* signal, RootfragmentrecPtr rootPtr);
- void sendREL_TABMEMCONF(Signal* signal, TabrecPtr tabPtr);
void releaseRootFragResources(Signal* signal, Uint32 tableId);
void releaseDirResources(Signal* signal,
Uint32 fragIndex,
@@ -1051,7 +1005,6 @@ private:
void releaseScanRec(Signal* signal);
bool searchScanContainer(Signal* signal);
void sendNextScanConf(Signal* signal);
- void sendScaninfo(Signal* signal);
void setlock(Signal* signal);
void takeOutActiveScanOp(Signal* signal);
void takeOutScanLockQueue(Uint32 scanRecIndex);
@@ -1063,15 +1016,8 @@ private:
void increaselistcont(Signal* signal);
void seizeLeftlist(Signal* signal);
void seizeRightlist(Signal* signal);
- void allocLongOverflowPage(Signal* signal);
- void allocSpecificLongOverflowPage(Signal* signal);
- void getLongKeyPage(Signal* signal);
- void initLongOverpage(Signal* signal);
- void storeLongKeys(Signal* signal);
- void storeLongKeysAtPos(Signal* signal);
- void reorgLongPage(Signal* signal);
+ Uint32 readTablePk(Uint32 localkey1);
void getElement(Signal* signal);
- void searchLongKey(Signal* signal, bool verify);
void getdirindex(Signal* signal);
void commitdelete(Signal* signal, bool systemRestart);
void deleteElement(Signal* signal);
@@ -1079,15 +1025,6 @@ private:
void releaseLeftlist(Signal* signal);
void releaseRightlist(Signal* signal);
void checkoverfreelist(Signal* signal);
- void deleteLongKey(Signal* signal);
- void removeFromPageArrayList(Signal* signal);
- void insertPageArrayList(Signal* signal);
- void checkPageArrayList(Signal* signal, const char *);
- void checkPageB4Insert(Uint32, const char *);
- void checkPageB4Remove(Uint32, const char *);
- void checkIndexInLongKeyPage(Uint32, const char *);
- void printoutInfoAndShutdown(LongKeyPage *);
- void releaseLongPage(Signal* signal);
void abortOperation(Signal* signal);
void accAbortReqLab(Signal* signal, bool sendConf);
void commitOperation(Signal* signal);
@@ -1105,7 +1042,6 @@ private:
void initLcpConnRec(Signal* signal);
void initOverpage(Signal* signal);
void initPage(Signal* signal);
- void initPageZero(Signal* signal);
void initRootfragrec(Signal* signal);
void putOpInFragWaitQue(Signal* signal);
void putOverflowRecInFrag(Signal* signal);
@@ -1159,8 +1095,6 @@ private:
void refaccConnectLab(Signal* signal);
void srReadOverPagesLab(Signal* signal);
void releaseScanLab(Signal* signal);
- void exeoperationLab(Signal* signal);
- void saveKeyDataLab(Signal* signal);
void lcpOpenUndofileConfLab(Signal* signal);
void srFsOpenConfLab(Signal* signal);
void checkSyncUndoPagesLab(Signal* signal);
@@ -1172,13 +1106,12 @@ private:
void srReadPagesLab(Signal* signal);
void srDoUndoLab(Signal* signal);
void ndbrestart1Lab(Signal* signal);
- void initialiseRecordsLab(Signal* signal, Uint32 returnRef, Uint32 retData);
+ void initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data);
void srReadPagesAllocLab(Signal* signal);
void checkNextBucketLab(Signal* signal);
void endsavepageLab(Signal* signal);
void saveZeroPageLab(Signal* signal);
void srAllocPage0011Lab(Signal* signal);
- void allocscanrecLab(Signal* signal);
void sendLcpFragidconfLab(Signal* signal);
void savepagesLab(Signal* signal);
void saveOverPagesLab(Signal* signal);
@@ -1192,6 +1125,8 @@ private:
void lcp_write_op_to_undolog(Signal* signal);
void reenable_expand_after_redo_log_exection_complete(Signal*);
+ // charsets
+ void xfrmKeyData(Signal* signal);
// Initialisation
void initData();
@@ -1281,8 +1216,6 @@ private:
/* --------------------------------------------------------------------------------- */
Page8 *page8;
/* 8 KB PAGE */
- Page8Ptr aslpPageptr;
- Page8Ptr alpPageptr;
Page8Ptr ancPageptr;
Page8Ptr colPageptr;
Page8Ptr ccoPageptr;
@@ -1293,29 +1226,17 @@ private:
Page8Ptr gdiPageptr;
Page8Ptr gePageptr;
Page8Ptr gflPageptr;
- Page8Ptr glkPageptr;
Page8Ptr idrPageptr;
Page8Ptr ilcPageptr;
- Page8Ptr iloPageptr;
Page8Ptr inpPageptr;
Page8Ptr iopPageptr;
- Page8Ptr ipzPageptr;
Page8Ptr lastPageptr;
Page8Ptr lastPrevpageptr;
Page8Ptr lcnPageptr;
Page8Ptr lcnCopyPageptr;
Page8Ptr lupPageptr;
- Page8Ptr dlkPageptr;
- Page8Ptr ipaPagePtr;
Page8Ptr priPageptr;
Page8Ptr pwiPageptr;
- Page8Ptr rfpPageptr;
- Page8Ptr relpPageptr;
- Page8Ptr rlopPageptr;
- Page8Ptr slkPageptr;
- Page8Ptr slkCopyPageptr;
- Page8Ptr slkapPageptr;
- Page8Ptr slkapCopyPageptr;
Page8Ptr ciPageidptr;
Page8Ptr gsePageidptr;
Page8Ptr isoPageptr;
@@ -1329,7 +1250,6 @@ private:
Page8Ptr ropPageptr;
Page8Ptr rpPageptr;
Page8Ptr slPageptr;
- Page8Ptr slpPageptr;
Page8Ptr spPageptr;
Uint32 cfirstfreepage;
Uint32 cfreepage;
@@ -1347,7 +1267,6 @@ private:
/* --------------------------------------------------------------------------------- */
Rootfragmentrec *rootfragmentrec;
RootfragmentrecPtr rootfragrecptr;
- RootfragmentrecPtr tmprootfrgptr;
Uint32 crootfragmentsize;
Uint32 cfirstfreerootfrag;
/* --------------------------------------------------------------------------------- */
@@ -1380,7 +1299,6 @@ private:
Uint32 tpriElementptr;
Uint32 tgseElementptr;
Uint32 tgseContainerptr;
- Uint32 tiloIndex;
Uint32 trlHead;
Uint32 trlRelCon;
Uint32 trlNextused;
@@ -1389,20 +1307,12 @@ private:
Uint32 tlupElemIndex;
Uint32 tlupIndex;
Uint32 tlupForward;
- Uint32 tslkPageIndex;
- Uint32 tslkKeyLen;
- Uint32 tslkapKeyLen;
- Uint32 tslkapPageIndex;
- Uint32 tipaArrayPos;
- Uint32 trfpArrayPos;
- Uint32 tdlkLogicalPageIndex;
Uint32 tancNext;
Uint32 tancBufType;
Uint32 tancContainerptr;
Uint32 tancPageindex;
Uint32 tancPageid;
Uint32 tidrResult;
- Uint32 tidrKeyLen;
Uint32 tidrElemhead;
Uint32 tidrForward;
Uint32 tidrPageindex;
@@ -1420,15 +1330,11 @@ private:
Uint32 tdelForward;
Uint32 tiopPageId;
Uint32 tipPageId;
- Uint32 ttupKeyLength;
Uint32 tgeLocked;
Uint32 tgeResult;
Uint32 tgeContainerptr;
Uint32 tgeElementptr;
Uint32 tgeForward;
- Uint32 tslcResult;
- Uint32 tslcPagedir;
- Uint32 tslcPageIndex;
Uint32 tundoElemIndex;
Uint32 texpReceivedBucket;
Uint32 texpDirInd;
@@ -1453,7 +1359,6 @@ private:
Uint32 tscanFlag;
Uint32 theadundoindex;
Uint32 tgflBufType;
- Uint32 thashvalue;
Uint32 tgseIsforward;
Uint32 tsscIsforward;
Uint32 trscIsforward;
@@ -1462,21 +1367,10 @@ private:
Uint32 tisoIsforward;
Uint32 tgseIsLocked;
Uint32 tsscIsLocked;
- Uint32 tkey1;
- Uint32 tkey2;
- Uint32 tkey3;
- Uint32 tkey4;
Uint32 tkeylen;
- Uint32 tkSize;
- Uint32 tlhfragbits;
- Uint32 tlhdirbits;
- Uint32 tlocalkeylen;
- Uint32 tmaxloadfactor;
- Uint32 tminloadfactor;
Uint32 tmp;
Uint32 tmpP;
Uint32 tmpP2;
- Uint32 taslpDirIndex;
Uint32 tmp1;
Uint32 tmp2;
Uint32 tgflPageindex;
@@ -1490,9 +1384,6 @@ private:
Uint32 trsbPageindex;
Uint32 tnciPageindex;
Uint32 tlastPrevconptr;
- Uint32 treqinfo;
- Uint32 transactionid1;
- Uint32 transactionid2;
Uint32 tresult;
Uint32 tslUpdateHeader;
Uint32 tuserptr;
@@ -1505,15 +1396,12 @@ private:
Uint32 tgdiPageindex;
Uint32 tiopIndex;
Uint32 tnciTmp;
- Uint32 tlenKeyinfo;
Uint32 tullIndex;
Uint32 turlIndex;
Uint32 tlfrTmp1;
Uint32 tlfrTmp2;
- Uint32 tudqeIndex;
Uint32 tscanTrid1;
Uint32 tscanTrid2;
- Uint32 taccscanTmp;
Uint16 clastUndoPageIdWritten;
Uint32 cactiveCheckpId;
@@ -1557,10 +1445,13 @@ private:
Uint32 cexcPrevpageindex;
Uint32 cexcPrevforward;
Uint32 clocalkey[32];
+ union {
Uint32 ckeys[2048];
+ Uint64 ckeys_align;
+ };
Uint32 c_errorInsert3000_TableId;
- Uint32 cSrUndoRecords[5];
+ Uint32 cSrUndoRecords[UndoHeader::ZNO_UNDORECORD_TYPES];
};
#endif
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
index c98c072cc89..d03f3b55d6a 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccInit.cpp
@@ -133,7 +133,8 @@ void Dbacc::initRecords()
}//Dbacc::initRecords()
Dbacc::Dbacc(const class Configuration & conf):
- SimulatedBlock(DBACC, conf)
+ SimulatedBlock(DBACC, conf),
+ c_tup(0)
{
Uint32 log_page_size= 0;
BLOCK_CONSTRUCTOR(Dbacc);
@@ -197,6 +198,80 @@ Dbacc::Dbacc(const class Configuration & conf):
addRecSignal(GSN_SET_VAR_REQ, &Dbacc::execSET_VAR_REQ);
initData();
+
+#ifdef VM_TRACE
+ {
+ void* tmp[] = { &expDirRangePtr,
+ &gnsDirRangePtr,
+ &newDirRangePtr,
+ &rdDirRangePtr,
+ &nciOverflowrangeptr,
+ &expDirptr,
+ &rdDirptr,
+ &sdDirptr,
+ &nciOverflowDirptr,
+ &fragrecptr,
+ &fsConnectptr,
+ &fsOpptr,
+ &lcpConnectptr,
+ &operationRecPtr,
+ &idrOperationRecPtr,
+ &copyInOperPtr,
+ &copyOperPtr,
+ &mlpqOperPtr,
+ &queOperPtr,
+ &readWriteOpPtr,
+ &iopOverflowRecPtr,
+ &tfoOverflowRecPtr,
+ &porOverflowRecPtr,
+ &priOverflowRecPtr,
+ &rorOverflowRecPtr,
+ &sorOverflowRecPtr,
+ &troOverflowRecPtr,
+ &ancPageptr,
+ &colPageptr,
+ &ccoPageptr,
+ &datapageptr,
+ &delPageptr,
+ &excPageptr,
+ &expPageptr,
+ &gdiPageptr,
+ &gePageptr,
+ &gflPageptr,
+ &idrPageptr,
+ &ilcPageptr,
+ &inpPageptr,
+ &iopPageptr,
+ &lastPageptr,
+ &lastPrevpageptr,
+ &lcnPageptr,
+ &lcnCopyPageptr,
+ &lupPageptr,
+ &priPageptr,
+ &pwiPageptr,
+ &ciPageidptr,
+ &gsePageidptr,
+ &isoPageptr,
+ &nciPageidptr,
+ &rsbPageidptr,
+ &rscPageidptr,
+ &slPageidptr,
+ &sscPageidptr,
+ &rlPageptr,
+ &rlpPageptr,
+ &ropPageptr,
+ &rpPageptr,
+ &slPageptr,
+ &spPageptr,
+ &rootfragrecptr,
+ &scanPtr,
+ &srVersionPtr,
+ &tabptr,
+ &undopageptr
+ };
+ init_globals_list(tmp, sizeof(tmp)/sizeof(tmp[0]));
+ }
+#endif
}//Dbacc::Dbacc()
Dbacc::~Dbacc()
diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
index a3880e2df1d..f4b084c42fb 100644
--- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
+++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp
@@ -16,6 +16,7 @@
#define DBACC_C
#include "Dbacc.hpp"
+#include <my_sys.h>
#include <AttributeHeader.hpp>
#include <signaldata/AccFrag.hpp>
@@ -27,6 +28,8 @@
#include <signaldata/FsRemoveReq.hpp>
#include <signaldata/DropTab.hpp>
#include <signaldata/DumpStateOrd.hpp>
+#include <KeyDescriptor.hpp>
+
// TO_DO_RONM is a label for comments on what needs to be improved in future versions
// when more time is given.
@@ -532,7 +535,14 @@ void Dbacc::execNDB_STTOR(Signal* signal)
void Dbacc::execSTTOR(Signal* signal)
{
jamEntry();
- // tstartphase = signal->theData[1];
+ Uint32 tstartphase = signal->theData[1];
+ switch (tstartphase) {
+ case 1:
+ jam();
+ c_tup = (Dbtup*)globalData.getBlock(DBTUP);
+ ndbrequire(c_tup != 0);
+ break;
+ }
tuserblockref = signal->theData[3];
csignalkey = signal->theData[6];
sttorrysignalLab(signal);
@@ -1037,7 +1047,7 @@ void Dbacc::execACCFRAGREQ(Signal* signal)
// config mismatch - do not crash if release compiled
if (tabptr.i >= ctablesize) {
jam();
- addFragRefuse(signal, 800);
+ addFragRefuse(signal, 640);
return;
}
#endif
@@ -1119,8 +1129,8 @@ void Dbacc::execACCFRAGREQ(Signal* signal)
Uint32 userPtr = req->userPtr;
BlockReference retRef = req->userRef;
rootfragrecptr.p->rootState = ACTIVEROOT;
- AccFragConf * const conf = (AccFragConf*)&signal->theData[0];
+ AccFragConf * const conf = (AccFragConf*)&signal->theData[0];
conf->userPtr = userPtr;
conf->rootFragPtr = rootfragrecptr.i;
conf->fragId[0] = rootfragrecptr.p->fragmentid[0];
@@ -1144,6 +1154,7 @@ void Dbacc::addFragRefuse(Signal* signal, Uint32 errorCode)
return;
}//Dbacc::addFragRefuseEarly()
+
void
Dbacc::execDROP_TAB_REQ(Signal* signal){
jamEntry();
@@ -1503,6 +1514,7 @@ void Dbacc::initOpRec(Signal* signal)
operationRecPtr.p->hashValue = signal->theData[3];
operationRecPtr.p->tupkeylen = signal->theData[4];
+ operationRecPtr.p->xfrmtupkeylen = signal->theData[4];
operationRecPtr.p->transId1 = signal->theData[5];
operationRecPtr.p->transId2 = signal->theData[6];
operationRecPtr.p->transactionstate = ACTIVE;
@@ -1541,6 +1553,9 @@ void Dbacc::initOpRec(Signal* signal)
// bit to mark lock operation
operationRecPtr.p->isAccLockReq = (Treqinfo >> 31) & 0x1;
+
+ // undo log is not run via ACCKEYREQ
+ operationRecPtr.p->isUndoLogReq = 0;
}//Dbacc::initOpRec()
/* --------------------------------------------------------------------------------- */
@@ -1614,6 +1629,10 @@ void Dbacc::execACCKEYREQ(Signal* signal)
ndbrequire(operationRecPtr.p->transactionstate == IDLE);
initOpRec(signal);
+ // normalize key if any char attr
+ if (! operationRecPtr.p->isAccLockReq && fragrecptr.p->hasCharAttr)
+ xfrmKeyData(signal);
+
/*---------------------------------------------------------------*/
/* */
/* WE WILL USE THE HASH VALUE TO LOOK UP THE PROPER MEMORY */
@@ -1713,6 +1732,19 @@ void Dbacc::execACCKEYREQ(Signal* signal)
return;
}//Dbacc::execACCKEYREQ()
+void
+Dbacc::xfrmKeyData(Signal* signal)
+{
+ Uint32 table = fragrecptr.p->myTableId;
+ Uint32 dst[MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY];
+ Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX];
+ Uint32* src = &signal->theData[7];
+ Uint32 len = xfrm_key(table, src, dst, sizeof(dst) >> 2, keyPartLen);
+ ndbrequire(len); // 0 means error
+ memcpy(src, dst, len << 2);
+ operationRecPtr.p->xfrmtupkeylen = len;
+}
+
void Dbacc::accIsLockedLab(Signal* signal)
{
ndbrequire(csystemRestart == ZFALSE);
@@ -1786,8 +1818,6 @@ void Dbacc::insertExistElemLab(Signal* signal)
/* --------------------------------------------------------------------------------- */
void Dbacc::insertelementLab(Signal* signal)
{
- Uint32 tinsKeyLen;
-
if (fragrecptr.p->createLcp == ZTRUE) {
if (remainingUndoPages() < ZMIN_UNDO_PAGES_AT_OPERATION) {
jam();
@@ -1805,46 +1835,9 @@ void Dbacc::insertelementLab(Signal* signal)
}//if
}//if
if (fragrecptr.p->keyLength != operationRecPtr.p->tupkeylen) {
+ // historical
ndbrequire(fragrecptr.p->keyLength == 0);
}//if
- if (fragrecptr.p->keyLength != 0) {
- ndbrequire(operationRecPtr.p->tupkeylen <= 8);
- for (Uint32 i = 0; i < operationRecPtr.p->tupkeylen; i++) {
- jam();
- ckeys[i] = signal->theData[i + 7];
- }//for
- tinsKeyLen = operationRecPtr.p->tupkeylen;
- } else {
- jam();
- seizePage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- acckeyref1Lab(signal, tresult);
- return;
- }//if
- operationRecPtr.p->keyinfoPage = spPageptr.i;
- for (Uint32 i = 0; i < signal->theData[4]; i++) {
- spPageptr.p->word32[i] = signal->theData[i + 7];
- }//for
-
- getLongKeyPage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- acckeyref1Lab(signal, tresult);
- return;
- }//if
- slkPageptr = glkPageptr;
- slkCopyPageptr.i = operationRecPtr.p->keyinfoPage;
- ptrCheckGuard(slkCopyPageptr, cpagesize, page8);
- tslkKeyLen = operationRecPtr.p->tupkeylen;
- storeLongKeys(signal);
- ckeys[0] = (slkPageptr.p->word32[ZPOS_PAGE_ID] << 10) + tslkPageIndex;
- tinsKeyLen = ZACTIVE_LONG_KEY_LEN;
- rpPageptr.i = operationRecPtr.p->keyinfoPage;
- ptrCheckGuard(rpPageptr, cpagesize, page8);
- releasePage(signal);
- operationRecPtr.p->keyinfoPage = RNIL;
- }//if
signal->theData[0] = operationRecPtr.p->userptr;
Uint32 blockNo = refToBlock(operationRecPtr.p->userblockref);
@@ -1868,7 +1861,6 @@ void Dbacc::insertelementLab(Signal* signal)
idrPageptr = gdiPageptr;
tidrPageindex = tgdiPageindex;
tidrForward = ZTRUE;
- tidrKeyLen = tinsKeyLen;
idrOperationRecPtr = operationRecPtr;
clocalkey[0] = localKey;
operationRecPtr.p->localdata[0] = localKey;
@@ -2314,14 +2306,14 @@ void Dbacc::execACC_COMMITREQ(Signal* signal)
operationRecPtr.p->transactionstate = IDLE;
operationRecPtr.p->operation = ZUNDEFINED_OP;
if(Toperation != ZREAD){
+ rootfragrecptr.i = fragrecptr.p->myroot;
+ ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
rootfragrecptr.p->m_commit_count++;
if (Toperation != ZINSERT) {
if (Toperation != ZDELETE) {
return;
} else {
jam();
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
rootfragrecptr.p->noOfElements--;
fragrecptr.p->slack += operationRecPtr.p->insertDeleteLen;
if (fragrecptr.p->slack > fragrecptr.p->slackCheck) {
@@ -2341,8 +2333,6 @@ void Dbacc::execACC_COMMITREQ(Signal* signal)
}//if
} else {
jam(); /* EXPAND PROCESS HANDLING */
- rootfragrecptr.i = fragrecptr.p->myroot;
- ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec);
rootfragrecptr.p->noOfElements++;
fragrecptr.p->slack -= operationRecPtr.p->insertDeleteLen;
if (fragrecptr.p->slack >= (1u << 31)) {
@@ -2460,6 +2450,7 @@ void Dbacc::execACC_LOCKREQ(Signal* signal)
signal->theData[4] = 1; // fake primKeyLen
signal->theData[5] = req->transId1;
signal->theData[6] = req->transId2;
+ // enter local key in place of PK
signal->theData[7] = req->tupAddr;
EXECUTE_DIRECT(DBACC, GSN_ACCKEYREQ, signal, 8);
// translate the result
@@ -2484,26 +2475,6 @@ void Dbacc::execACC_LOCKREQ(Signal* signal)
*sig = *req;
return;
}
- operationRecPtr.i = req->accOpPtr;
- ptrCheckGuard(operationRecPtr, coprecsize, operationrec);
- fragrecptr.i = operationRecPtr.p->fragptr;
- ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec);
- if (fragrecptr.p->keyLength == 0 &&
- // should test some state variable
- operationRecPtr.p->elementPage != RNIL) {
- jam();
- // re-compute long key vars
- Page8Ptr tPageptr;
- tPageptr.i = operationRecPtr.p->elementPage;
- ptrCheckGuard(tPageptr, cpagesize, page8);
- Uint32 tKeyptr =
- operationRecPtr.p->elementPointer +
- operationRecPtr.p->elementIsforward *
- (ZELEM_HEAD_SIZE + fragrecptr.p->localkeylen);
- tslcPageIndex = tPageptr.p->word32[tKeyptr] & 0x3ff;
- tslcPagedir = tPageptr.p->word32[tKeyptr] >> 10;
- searchLongKey(signal, false);
- }
if (lockOp == AccLockReq::Unlock) {
jam();
// do unlock via ACC_COMMITREQ (immediate)
@@ -2857,14 +2828,6 @@ void Dbacc::insertContainer(Signal* signal)
idrPageptr.p->word32[tidrIndex] = clocalkey[tidrInputIndex]; /* INSERTS LOCALKEY */
tidrIndex += tidrForward;
}//for
- guard26 = tidrKeyLen - 1;
- arrGuard(guard26, 8);
- for (tidrInputIndex = 0; tidrInputIndex <= guard26; tidrInputIndex++) {
- dbgWord32(idrPageptr, tidrIndex, ckeys[tidrInputIndex]);
- arrGuard(tidrIndex, 2048);
- idrPageptr.p->word32[tidrIndex] = ckeys[tidrInputIndex]; /* INSERTS TUPLE KEY */
- tidrIndex += tidrForward;
- }//for
tidrContLen = idrPageptr.p->word32[tidrContainerptr] << 6;
tidrContLen = tidrContLen >> 6;
dbgWord32(idrPageptr, tidrContainerptr, (tidrContainerlen << 26) | tidrContLen);
@@ -3198,1215 +3161,6 @@ void Dbacc::seizeRightlist(Signal* signal)
increaselistcont(signal);
}//Dbacc::seizeRightlist()
-
-//---------------------------------------------------------------------------------
-// ALLOC_SPECIFIC_LONG_OVERFLOW_PAGE
-//
-// DESCRIPTION: ALLOCATES A LONG OVER FLOW PAGE AND PUTS IT IN A SPECIFIED
-// DIRINDEX. THIS IS TO SUPPORT AN UNDO_DELETE AFTER AN
-// UNDO_INSERT ON THE SAME LONG KEY IN A LCP.
-// UNDO_INSERT ONLY HAVE A REFERENCE TO THE KEY AND TO MAKE
-// IT POSSIBLE TO DELETE THE KEY, THE REFERENCE MUST BE
-// ACCURATE, WHICH MEANS THE KEY MUST BE SAVED ON THE SAME
-// PLACE IT WAS DELETED FROM.
-//---------------------------------------------------------------------------------
-void Dbacc::allocSpecificLongOverflowPage(Signal* signal)
-{
- DirRangePtr aloDirRangePtr;
- DirectoryarrayPtr aloOverflowDirptr;
-
- if ((cfirstfreepage == RNIL) &&
- (cfreepage >= cpagesize)) {
- jam();
- zpagesize_error("Dbacc::allocSpecificLongOverflowPage");
- tresult = ZPAGESIZE_ERROR;
- return;
- }
-
- if ((cfirstfreedir == RNIL) &&
- (cdirarraysize <= cdirmemory)) {
- jam();
- tresult = ZDIRSIZE_ERROR;
- return;
- }
-
- tmpP = taslpDirIndex;
- aloDirRangePtr.i = fragrecptr.p->overflowdir;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- ptrCheckGuard(aloDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
-
- if (aloDirRangePtr.p->dirArray[tmpP2] == RNIL) {
- jam();
- seizeDirectory(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- sendSystemerror(signal);
- return;
- }
- aloDirRangePtr.p->dirArray[tmpP2] = sdDirptr.i;
- } else {
- jam();
- sdDirptr.i = RNIL;
- ptrNull(sdDirptr);
- }
-
- aloOverflowDirptr.i = aloDirRangePtr.p->dirArray[tmpP2];
- ptrCheckGuard(aloOverflowDirptr, cdirarraysize, directoryarray);
- seizePage(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- sendSystemerror(signal);
- return;
- }//if
-
- if (aloOverflowDirptr.p->pagep[tmpP] != RNIL) {
- jam();
- sendSystemerror(signal);
- return;
- }
-
- aloOverflowDirptr.p->pagep[tmpP] = spPageptr.i;
- iloPageptr.p = spPageptr.p;
- iloPageptr.i = spPageptr.i;
- tiloIndex = taslpDirIndex;
- initLongOverpage(signal);
- aslpPageptr.i = spPageptr.i;
- aslpPageptr.p = spPageptr.p;
-}//Dbacc::allocSpecificLongOverflowPage
-
-/* --------------------------------------------------------------------------------- */
-/* ALLOC_LONG_OVERFLOW_PAGE */
-/* DESCRIPTION: */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::allocLongOverflowPage(Signal* signal)
-{
- DirRangePtr aloDirRangePtr;
- DirectoryarrayPtr aloOverflowDirptr;
- OverflowRecordPtr aloOverflowRecPtr;
- Uint32 taloIndex;
-
- if ((cfirstfreepage == RNIL) &&
- (cfreepage >= cpagesize)) {
- jam();
- zpagesize_error("Dbacc::allocLongOverflowPage");
- tresult = ZPAGESIZE_ERROR;
- return;
- }//if
- if ((cfirstfreedir == RNIL) &&
- (cdirarraysize <= cdirmemory)) {
- jam();
- tresult = ZDIRSIZE_ERROR;
- return;
- }//if
- if (fragrecptr.p->firstFreeDirindexRec != RNIL) {
- jam();
- aloOverflowRecPtr.i = fragrecptr.p->firstFreeDirindexRec;
- ptrCheckGuard(aloOverflowRecPtr, coverflowrecsize, overflowRecord);
- troOverflowRecPtr.p = aloOverflowRecPtr.p;
- takeRecOutOfFreeOverdir(signal);
- taloIndex = aloOverflowRecPtr.p->dirindex;
- rorOverflowRecPtr = aloOverflowRecPtr;
- releaseOverflowRec(signal);
- } else {
- jam();
- taloIndex = fragrecptr.p->lastOverIndex;
- fragrecptr.p->lastOverIndex++;
- }//if
- tmpP = taloIndex;
- aloDirRangePtr.i = fragrecptr.p->overflowdir;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- ptrCheckGuard(aloDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
- if (aloDirRangePtr.p->dirArray[tmpP2] == RNIL) {
- jam();
- seizeDirectory(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- aloDirRangePtr.p->dirArray[tmpP2] = sdDirptr.i;
- } else {
- jam();
- sdDirptr.i = RNIL;
- ptrNull(sdDirptr);
- }//if
- aloOverflowDirptr.i = aloDirRangePtr.p->dirArray[tmpP2];
- ptrCheckGuard(aloOverflowDirptr, cdirarraysize, directoryarray);
- seizePage(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
- aloOverflowDirptr.p->pagep[tmpP] = spPageptr.i;
- iloPageptr = spPageptr;
- tiloIndex = taloIndex;
- initLongOverpage(signal);
- alpPageptr = spPageptr;
- ipaPagePtr = spPageptr;
- tipaArrayPos = 3;
- insertPageArrayList(signal);
-}//Dbacc::allocLongOverflowPage()
-
-/* --------------------------------------------------------------------------------- */
-/* GET_LONG_KEY_PAGE */
-/* DESCRIPTION: SEARCH FOR A FREE OVERFLOW PAGE TO STORE A LONG KEY. */
-/* LONG_KEY_PAGE_PTR IS RETURNED. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::getLongKeyPage(Signal* signal)
-{
- LongKeyPage *glkPage;
-
- jam();
-
- Uint32 tglkLongIndex = 0;
-
- ndbrequire(operationRecPtr.p->tupkeylen <= ZWORDS_IN_PAGE - ZHEAD_SIZE);
-
- // Do not look in longKeyPageArray[tglkLongIndex] where the pages are to small.
- if(operationRecPtr.p->tupkeylen < 128) {
- jam();
- tglkLongIndex = 0;
- } else {
- jam();
- tglkLongIndex = (operationRecPtr.p->tupkeylen - 128) / 512;
- }//if
-
- // Go through the longKeyPageArray and search for a page.
- for (; tglkLongIndex <= ZMAX_LONG_KEY_ARRAY_INDEX; tglkLongIndex++) {
- jam();
- glkPageptr.i = fragrecptr.p->longKeyPageArray[tglkLongIndex];
-
- if (glkPageptr.i != RNIL) {
- // A page is found.
- jam();
- do {
- ptrCheckGuard(glkPageptr, cpagesize, page8);
- glkPage = (LongKeyPage *) &glkPageptr.p->word32[0];
-
- // Check page if there is enough memory available. Accept only page
- // with free_area > tupkeylen, this leaves at least one word for eventually
- // an increase in the index area.
- if (glkPage->header.freeArea > operationRecPtr.p->tupkeylen){
- // The page found is OK
- jam();
- return;
- } else {
- // Not enough space in page, look in the next page if not RNIL,
- // otherwise continue with for-loop.
- jam();
- glkPageptr.i = glkPage->header.nextPage;
- }
- }//do
- while (glkPageptr.i != RNIL);
- }//if
- }//for
-
- // No page with enough space was available, allocate a new page!
- jam();
- allocLongOverflowPage(signal);
- glkPageptr = alpPageptr;
-}//Dbacc::getLongKeyPage()
-
-/* --------------------------------------------------------------------------------- */
-/* INIT_LONG_OVERPAGE */
-/* INPUT. ILO_PAGEPTR, POINTER TO AN OVERFLOW PAGE RECORD */
-/* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */
-/* ACCORDING TO LH3 AND PAGE STRUCTOR DISACRIPTION OF NDBACC BLOCK */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::initLongOverpage(Signal* signal)
-{
- iloPageptr.p->word32[ZPOS_PAGE_ID] = tiloIndex;
- iloPageptr.p->word32[ZPOS_PAGE_TYPE] = ZLONG_PAGE_TYPE << ZPOS_PAGE_TYPE_BIT;
- iloPageptr.p->word32[ZPOS_NO_ELEM_IN_PAGE] = 0;
- iloPageptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
- iloPageptr.p->word32[ZPOS_FREE_AREA_IN_PAGE] = ZWORDS_IN_PAGE - ZHEAD_SIZE;
- iloPageptr.p->word32[ZPOS_LAST_INDEX] = 0;
- iloPageptr.p->word32[ZPOS_INSERT_INDEX] = ZHEAD_SIZE;
- iloPageptr.p->word32[ZPOS_ARRAY_POS] = ZDEFAULT_LIST;
- iloPageptr.p->word32[ZPOS_NEXT_FREE_INDEX] = 0;
- iloPageptr.p->word32[ZPOS_NEXT_PAGE] = RNIL;
- iloPageptr.p->word32[ZPOS_PREV_PAGE] = RNIL;
- iloPageptr.p->word32[12] = 0;
- iloPageptr.p->word32[13] = 0;
- iloPageptr.p->word32[14] = 0;
- iloPageptr.p->word32[15] = 0;
- // Initialize free indexes
- for (int i = 1; i < (ZWORDS_IN_PAGE - ZHEAD_SIZE); i++)
- iloPageptr.p->word32[ZWORDS_IN_PAGE - i] = i + 1;
-}//Dbacc::initLongOverpage()
-
-//---------------------------------------------------------------------------------
-// STORE_LONG_KEYS_AT_POS
-//
-// INPUT: SLKAP_PAGEPTR
-// SLKAP_COPY_PAGEPTR
-// TSLKAP_KEY_LEN
-// TSLKAP_PAGE_INDEX
-//
-// DESCRIPTION: A LONG ELEMENT IS STORED ON A LONG_KEY_PAGE AT A
-// SPECIFIC POSITION. THIS FUNCTION IS USED BY UNDO_DELETE.
-//---------------------------------------------------------------------------------
-void Dbacc::storeLongKeysAtPos(Signal* signal)
-{
- Uint32 tslkapHighestIndex;
- Uint32 tslkapLastSize;
- Uint32 tslkapInsertIndex;
- Uint32 tslkapIndexIncreaseSize;
- Uint32 tslkapTmp;
-
- LongKeyPage *slkapPage;
-
- jam();
- slkapPage = (LongKeyPage *) &slkapPageptr.p->word32[0];
-
-#ifdef VM_TRACE
- checkIndexInLongKeyPage(slkapPageptr.i, "storeLongKeysAtPos");
-#endif
-
- // if (csystemRestart != ZTRUE) {
- if (cundoLogActive != ZTRUE) {
- //-------------------------------------------------------------
- // This function is only allowed to be called during
- // undolog execution.
- //-------------------------------------------------------------
- jam();
- sendSystemerror(signal);
- return;
- }
-
- if (slkapPage->word32[ZWORDS_IN_PAGE - tslkapPageIndex] >> 16 != 0 ) {
- //-------------------------------------------------------------
- // The index should be empty, we have a serious problem.
- //-------------------------------------------------------------
- jam();
- sendSystemerror(signal);
- return;
- }
-
- //-------------------------------------------------------------
- // Calculate some variables to use later.
- //-------------------------------------------------------------
- tslkapHighestIndex = slkapPage->header.highestIndex;
- tslkapPageIndex > tslkapHighestIndex ?
- tslkapIndexIncreaseSize = tslkapPageIndex - tslkapHighestIndex :
- tslkapIndexIncreaseSize = 0;
-
- slkapPage->header.highestIndex += tslkapIndexIncreaseSize;
-
- if ((slkapPage->header.freeArea - tslkapIndexIncreaseSize)
- < tslkapKeyLen) {
- //-------------------------------------------------------------
- // Not enough area in the page, a serious problem.
- //-------------------------------------------------------------
- jam();
- sendSystemerror(signal);
- return;
- }
-
- //-------------------------------------------------------------
- // Fix the free index list. We might put in a key in the
- // middle of the list, so we must fix the free list and the
- // free index pointers.
- //-------------------------------------------------------------
- slkapPage->header.nextFreeIndex = 0;
-
- for (Uint32 i = tslkapHighestIndex + tslkapIndexIncreaseSize; i > 0; i--) {
- if (i == tslkapPageIndex) {
- // The key index shall not be in the free list.
- continue;
- }
-
- if (slkapPage->word32[ZWORDS_IN_PAGE - i] >> 16 == 0 ) {
- // Go through all empty indexes.
- slkapPage->word32[ZWORDS_IN_PAGE - i] = slkapPage->header.nextFreeIndex;
- arrGuard(i, 2048);
- slkapPage->header.nextFreeIndex = i;
- }
- }
-
- //-------------------------------------------------------------
- // Decrement the free area in page according to the above
- // increase in index size.
- //-------------------------------------------------------------
- slkapPage->header.freeArea -= tslkapIndexIncreaseSize;
-
- tslkapLastSize = ZWORDS_IN_PAGE - slkapPage->header.highestIndex
- - slkapPage->header.insertPos;
-
- //-------------------------------------------------------------
- // Check if we have to reorganize the page.
- //-------------------------------------------------------------
- if (tslkapLastSize >= tslkapKeyLen) {
- jam();
- } else {
- jam();
- relpPageptr.p = slkapPageptr.p;
- reorgLongPage(signal);
- }
-
- //-------------------------------------------------------------
- // Insert the key and update page attributes.
- //-------------------------------------------------------------
- jam();
- // Increase the number of element in the page.
- slkapPage->header.noOfElements++;
- jam();
- // Put in the key reference into the index. The reference
- // consists of key length and insert position.
- arrGuard(ZWORDS_IN_PAGE - tslkapPageIndex, 2048);
- slkapPage->word32[ZWORDS_IN_PAGE - tslkapPageIndex] =
- slkapPage->header.insertPos | (tslkapKeyLen << 16);
- jam();
- // Increase the key insert position.
- tslkapInsertIndex = slkapPage->header.insertPos;
- slkapPage->header.insertPos += tslkapKeyLen;
- jam();
- // Decrease the free area.
- slkapPage->header.freeArea -= tslkapKeyLen;
- jam();
-
- // Update pageArrayPos. insertPageArrayList() called from execACC_OVER_REC
- // needs this value.
- if (slkapPage->header.freeArea < 128) {
- jam();
- slkapPage->header.pageArrayPos = 4;
- } else {
- jam();
- slkapPage->header.pageArrayPos = (slkapPage->header.freeArea - 128) / 512;
- }//if
-
- // Store the actual key at the insert position.
- Uint32 guard27 = tslkapKeyLen - 1;
- arrGuard(guard27 + tslkapInsertIndex, 2048);
- for (tslkapTmp = 0; tslkapTmp <= guard27; tslkapTmp++) {
- jam();
- slkapPage->word32[tslkapTmp + tslkapInsertIndex] = slkapCopyPageptr.p->word32[tslkapTmp];
- }//for
-}//Dbacc::storeLongKeysAtPos
-
-/* --------------------------------------------------------------------------------- */
-/* STORE_LONG_KEYS */
-/* INPUT: SLK_PAGEPTR */
-/* SLK_COPY_PAGEPTR */
-/* TSLK_KEY_LEN */
-/* OUTPUT: TSLK_PAGE_INDEX */
-/* */
-/* DESCRIPTION: A LONG ELEMENT IS STORED ON A LONG_KEY_PAGE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::storeLongKeys(Signal* signal)
-{
- Uint32 tslkLastSize;
- Uint32 tslkInsertIndex;
- Uint32 tslkArrayPos;
- Uint32 tslkTmp;
- Uint32 guard27;
- LongKeyPage *slkPage;
-
- jam();
- slkPage = (LongKeyPage *) &slkPageptr.p->word32[0];
-
-#ifdef VM_TRACE
- checkIndexInLongKeyPage(slkPageptr.i, "storeLongKeys1");
-#endif
-
- // Accept only page with free_area > tupkeylen, this leaves at least
- // one word for eventually an increase in the index area.
- ndbrequire(slkPage->header.freeArea > tslkKeyLen);
-
- dbgWord32(slkPageptr, ZPOS_LAST_INDEX, slkPage->header.highestIndex);
- dbgWord32(slkPageptr, ZPOS_INSERT_INDEX, slkPage->header.insertPos);
-
- tslkLastSize = ZWORDS_IN_PAGE - slkPage->header.highestIndex - slkPage->header.insertPos;
-
- if (tslkLastSize > operationRecPtr.p->tupkeylen) {
- // WE DO NOT NEED TO REORGANIZE THE PAGE TO INSERT THE NEW KEY. IT FITS INTO THE
- // SIZE REMAINING AT THE END.
- jam();
- } else {
- // THE KEY FITS INTO THE PAGE BUT ONLY AFTER REORGANISING THE PAGE.
- jam();
- relpPageptr.p = slkPageptr.p;
- reorgLongPage(signal);
- }//if
-
- if (slkPage->header.nextFreeIndex == 0) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE PAGE INDEX HAS NO EMPTY SLOTS. WE MUST EXTEND THE PAGE INDEX BY ONE NEW SLOT.*/
- /* --------------------------------------------------------------------------------- */
- tslkPageIndex = slkPage->header.highestIndex + 1;
- } else {
- jam();
- tslkPageIndex = slkPage->header.nextFreeIndex;
- }//if
-
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* ON LONG PAGES WE USE A PHYSIOLOGICAL LOGGING SCHEME. THIS MEANS THAT WE ONLY NEED*/
- /* TO SPECIFY WHICH INDEX TO DELETE IN ORDER TO UNDO THE CHANGES WE DO. THE */
- /* POSSIBLE REORGANISATION DO NOT CHANGE THE LOGICAL LAYOUT OF THE PAGE. */
- /* --------------------------------------------------------------------------------- */
- datapageptr.p = slkPageptr.p;
- cundoElemIndex = tslkPageIndex;
- cundoinfolength = 0;
- undoWritingProcess(signal);
- }//if
-
- if (slkPage->header.nextFreeIndex == 0) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE PAGE INDEX HAS NO EMPTY SLOTS. WE MUST EXTEND THE PAGE INDEX BY ONE NEW SLOT.*/
- /* --------------------------------------------------------------------------------- */
- dbgWord32(slkPageptr, ZPOS_LAST_INDEX, slkPage->header.highestIndex + 1);
- slkPage->header.highestIndex++;
- ndbrequire(slkPage->header.insertPos < (ZWORDS_IN_PAGE - slkPage->header.highestIndex));
- // Reset index. We have already checked that we can increase "highestIndex" value
- // without overwriting the data part.
- slkPage->word32[ZWORDS_IN_PAGE - slkPage->header.highestIndex] = 0;
- dbgWord32(slkPageptr, ZPOS_FREE_AREA_IN_PAGE, slkPage->header.freeArea - 1);
- slkPage->header.freeArea--;
- } else {
- jam();
- dbgWord32(slkPageptr, ZPOS_NEXT_FREE_INDEX, slkPage->word32[ZWORDS_IN_PAGE - tslkPageIndex]);
- arrGuard(ZWORDS_IN_PAGE - tslkPageIndex, 2048);
- arrGuard(slkPage->word32[ZWORDS_IN_PAGE - tslkPageIndex], 2048);
-
- slkPage->header.nextFreeIndex = slkPage->word32[ZWORDS_IN_PAGE - tslkPageIndex];
- if(slkPage->header.nextFreeIndex > slkPage->header.highestIndex){
- slkPage->header.nextFreeIndex = 0;
- dbgWord32(slkPageptr, ZPOS_NEXT_FREE_INDEX, slkPage->header.nextFreeIndex);
- }
- }//if
-
- dbgWord32(slkPageptr, ZWORDS_IN_PAGE - tslkPageIndex, tslkKeyLen);
- dbgWord32(slkPageptr, ZWORDS_IN_PAGE - tslkPageIndex, slkPage->header.insertPos);
- arrGuard(ZWORDS_IN_PAGE - tslkPageIndex, 2048);
- slkPage->word32[ZWORDS_IN_PAGE - tslkPageIndex] =
- slkPage->header.insertPos | (tslkKeyLen << 16);
-
- dbgWord32(slkPageptr, ZPOS_INSERT_INDEX, slkPage->header.insertPos);
- tslkInsertIndex = slkPage->header.insertPos;
- slkPage->header.insertPos += tslkKeyLen;
-
- dbgWord32(slkPageptr, ZPOS_FREE_AREA_IN_PAGE, slkPage->header.freeArea - tslkKeyLen);
- slkPage->header.freeArea = slkPage->header.freeArea - tslkKeyLen;
- if (slkPage->header.freeArea < 128) {
- jam();
- tslkArrayPos = 4;
- } else {
- jam();
- tslkArrayPos = (slkPage->header.freeArea - 128) / 512;
- }//if
-
- if (tslkArrayPos != slkPage->header.pageArrayPos) {
- jam();
- if (cundoLogActive != ZTRUE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE ONLY HANDLE THE LISTS WHEN WE ARE NOT IN A SYSTEM RESTART. */
- /* --------------------------------------------------------------------------------- */
- rfpPageptr = slkPageptr;
- trfpArrayPos = slkPage->header.pageArrayPos;
- removeFromPageArrayList(signal);
- ipaPagePtr = slkPageptr;
- tipaArrayPos = tslkArrayPos;
- slkPage->header.pageArrayPos = tipaArrayPos;
- if (tslkArrayPos != 4) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE PAGE WILL STILL BE ON ONE OF THE FREE LISTS SINCE AT LEAST 128 * 4 */
- /* BYTES OF FREE SPACE REMAINS ON THE PAGE. */
- /* --------------------------------------------------------------------------------- */
- insertPageArrayList(signal);
- }//if
- } else {
- // This should never happen. Should use storeLongKeysAtPos() instead when executing
- // undolog.
- ndbrequire(false);
- }
- }//if
- /* --------------------------------------------------------------------------------- */
- /* INCREASE THE NUMBER OF ELEMENTS IN THE PAGE. */
- /* --------------------------------------------------------------------------------- */
- dbgWord32(slkPageptr, ZPOS_NO_ELEM_IN_PAGE, slkPage->header.noOfElements + 1);
- slkPage->header.noOfElements++;
-
- guard27 = tslkKeyLen - 1;
- arrGuard(guard27 + tslkInsertIndex, 2048);
- for (tslkTmp = 0; tslkTmp <= guard27; tslkTmp++) {
- dbgWord32(slkPageptr, tslkTmp + tslkInsertIndex, slkCopyPageptr.p->word32[tslkTmp]);
- slkPage->word32[tslkTmp + tslkInsertIndex] = slkCopyPageptr.p->word32[tslkTmp];
- }//for
-
- // Used by abortoperation() in case of an abort.
- operationRecPtr.p->longPagePtr = slkPageptr.i;
-
- // This is for an eventual LCP start in the middle of this locked operation.
- operationRecPtr.p->longKeyPageIndex = tslkPageIndex;
-
-#ifdef VM_TRACE
- if (cundoLogActive != ZTRUE) checkPageArrayList(signal, "storeLongKeys");
- checkIndexInLongKeyPage(slkPageptr.i, "storeLongKeys2");
-#endif
-
-}//Dbacc::storeLongKeys()
-
-/* --------------------------------------------------------------------------------- */
-/* REORGANIZE THE PAGE BY COPYING IT TEMPORARILY TO A NEW AREA AND THEN SIMPLY */
-/* PUTTING THE OBJECTS BACK ON THE PAGE IN THE SAME ORDER AS THEY ARE PLACED IN THE */
-/* INDEX. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::reorgLongPage(Signal* signal)
-{
- Uint32 indexStartPos;
- Uint32 pagePos;
- Uint32 pagePos2;
- Uint32 indexNo;
- Uint32 insertPos;
- Uint32 indexValue;
- Uint32 keyLength;
- Uint32 keyPos;
- Uint32 keyEndPos;
- LongKeyPage *reOrgPage;
-
- ptrGuard(relpPageptr);
- reOrgPage = (LongKeyPage *) &relpPageptr.p->word32[0];
-
- dbgWord32(relpPageptr, ZPOS_LAST_INDEX, reOrgPage->header.highestIndex);
- indexStartPos = ZWORDS_IN_PAGE - reOrgPage->header.highestIndex;
-
- // Copy key data part of page to a temporary page.
- for (pagePos = ZHEAD_SIZE; pagePos < indexStartPos; pagePos++) {
- jam();
- arrGuard(pagePos, 2048);
- ckeys[pagePos] = reOrgPage->word32[pagePos];
- }//for
-
- insertPos = ZHEAD_SIZE;
-
- // Walk through all the indexes.
- for (indexNo = 1; indexNo <= reOrgPage->header.highestIndex; indexNo++) {
- jam();
- arrGuard(ZWORDS_IN_PAGE - indexNo, 2048);
- dbgWord32(relpPageptr, ZWORDS_IN_PAGE - indexNo, reOrgPage->word32[ZWORDS_IN_PAGE - indexNo]);
- indexValue = reOrgPage->word32[ZWORDS_IN_PAGE - indexNo];
-
- if ((indexValue >> 16) != 0) {
- // The index contains a reference to a key.
- jam();
- keyPos = indexValue & 0xffff;
- keyLength = indexValue >> 16;
- dbgWord32(relpPageptr, ZWORDS_IN_PAGE - indexNo, insertPos + (keyLength << 16));
- arrGuard(ZWORDS_IN_PAGE - indexNo, 2048);
-
- // Refresh the index data with the new key start position in the data part.
- reOrgPage->word32[ZWORDS_IN_PAGE - indexNo] = insertPos + (keyLength << 16);
- keyEndPos = keyPos + keyLength;
- arrGuard(keyEndPos, 2048);
-
- // Copy the key from the temporary page
- // to the insert position at original page.
- for (pagePos2 = keyPos; pagePos2 < keyEndPos; pagePos2++, insertPos++) {
- jam();
- dbgWord32(relpPageptr, insertPos, ckeys[pagePos2]);
- arrGuard(insertPos, 2048);
- arrGuard(pagePos2, 2048);
- reOrgPage->word32[insertPos] = ckeys[pagePos2];
- }//for
- }//if
- }//for
- dbgWord32(relpPageptr, ZPOS_INSERT_INDEX, insertPos);
- reOrgPage->header.insertPos = insertPos;
-}//Dbacc::reorgLongPage()
-
-
-/* --------------------------------------------------------------------------------- */
-/* DELETE_LONG_KEY */
-/* INPUT: DLK_PAGEPTR PAGE POINTER OF DELETED KEY OBJECT */
-/* TDLK_LOGICAL_PAGE_INDEX LOGICAL PAGE INDEX OF DELETED KEY OBJECT */
-/* */
-/* DESCRIPTION: DELETE AN ELEMENT OF A LONG_KEY_PAGE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::deleteLongKey(Signal* signal)
-{
- Uint32 tdlkLastIndex;
- Uint32 tdlkNextPosition;
- Uint32 tdlkFreeArea;
- Uint32 tdlkArrayPos;
- Uint32 tdlkOldArrayPos;
- LongKeyPage *dlkPage;
-
- jam();
- dlkPage = (LongKeyPage *) &dlkPageptr.p->word32[0];
-
-#ifdef VM_TRACE
- checkIndexInLongKeyPage(dlkPageptr.i, "deleteLongKey1");
-#endif
-
- dbgWord32(dlkPageptr, ZWORDS_IN_PAGE - tdlkLogicalPageIndex, dlkPage->word32[ZWORDS_IN_PAGE - tdlkLogicalPageIndex] >> 16);
- dbgWord32(dlkPageptr, ZWORDS_IN_PAGE - tdlkLogicalPageIndex, dlkPage->word32[ZWORDS_IN_PAGE - tdlkLogicalPageIndex] & 0xffff);
- arrGuard(ZWORDS_IN_PAGE - tdlkLogicalPageIndex, 2048);
-
- const Uint32 tdlkIndexValue = dlkPage->word32[ZWORDS_IN_PAGE - tdlkLogicalPageIndex];
- const Uint32 tdlkKeyLen = tdlkIndexValue >> 16;
- const Uint32 tdlkPhysPageIndex = tdlkIndexValue & 0xffff;
-
- if (fragrecptr.p->createLcp == ZTRUE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE LOG THE DELETE LONG KEY BY LOGGING THE DELETED KEY AND ITS LOGICAL INDEX.*/
- /* --------------------------------------------------------------------------------- */
- datapageptr.p = dlkPageptr.p;
- cundoElemIndex = tdlkLogicalPageIndex;
- cundoinfolength = tdlkKeyLen;
- undoWritingProcess(signal);
- }//if
- /* --------------------------------------------------------------------------------- */
- /* DECREASE THE NUMBER OF ELEMENTS IN THE PAGE. */
- /* --------------------------------------------------------------------------------- */
- dbgWord32(dlkPageptr, ZPOS_NO_ELEM_IN_PAGE, dlkPage->header.noOfElements - 1);
- dlkPage->header.noOfElements--;
-
- arrGuard(dlkPage->header.noOfElements, ZMAX_NO_OF_LONGKEYS_IN_PAGE);
-
- /* --------------------------------------------------------------------------------- */
- /* INCREASE THE FREE AREA IN THE PAGE. */
- /* --------------------------------------------------------------------------------- */
- dbgWord32(dlkPageptr, ZPOS_FREE_AREA_IN_PAGE, dlkPage->header.freeArea + tdlkKeyLen);
- dbgWord32(dlkPageptr, ZPOS_LAST_INDEX, dlkPage->header.highestIndex);
-
- dlkPage->header.freeArea += tdlkKeyLen;
-
- if (dlkPage->header.noOfElements == 0) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE PAGE IS NOW EMPTY, WE CAN RELEASE IT. */
- /* --------------------------------------------------------------------------------- */
- if (dlkPage->header.freeArea !=
- (ZWORDS_IN_PAGE - ZHEAD_SIZE - dlkPage->header.highestIndex )) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* SOME AREA IN THE PAGE IS STILL LEFT BUT NO ELEMENTS, INCONSISTENT */
- /* --------------------------------------------------------------------------------- */
- sendSystemerror(signal);
- }//if
- /* --------------------------------------------------------------------------------- */
- /* WE REMOVE THE PAGE FROM THE LIST OF FREE LONG PAGES. THERE IS NO RISK THAT IT */
- /* DID NOT BELONG TO ANY SINCE IT IS NOT ALLOWED TO HAVE THAT LARGE KEYS. */
- /* --------------------------------------------------------------------------------- */
-
- if (cundoLogActive != ZTRUE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WHEN DELETING KEYS DURING SYSTEM RESTART WE NEED NOT UPDATE THE LISTS. */
- /* --------------------------------------------------------------------------------- */
- // REMOVEFROMLIST is done by releaseLongPage(). EDTJAMO.
- // rfpPageptr = dlkPageptr;
- // trfpArrayPos = dlkPage->header.pageArrayPos;
- // removeFromPageArrayList(signal, "deleteLongKey");
- rlopPageptr = dlkPageptr;
- releaseLongPage(signal);
- return;
- } else {
- // Must remove reference to the removed key, otherwise left in index. EDTJAMO.
- arrGuard(ZWORDS_IN_PAGE - tdlkLogicalPageIndex, 2048);
- arrGuard(tdlkLogicalPageIndex, 2048);
-
- tdlkNextPosition = dlkPage->header.nextFreeIndex;
- dlkPage->header.nextFreeIndex = tdlkLogicalPageIndex;
- dbgWord32(dlkPageptr, ZWORDS_IN_PAGE - tdlkLogicalPageIndex, tdlkNextPosition);
- dlkPage->word32[ZWORDS_IN_PAGE - tdlkLogicalPageIndex] = tdlkNextPosition;
- }
- } else {
- /* --------------------------------------------------------------------------------- */
- /* THE PAGE IS NOT EMPTY SO WE WILL REMOVE THE KEY OBJECT AND UPDATE THE */
- /* HEADER INFORMATION AND PLACE THE PAGE IN THE PROPER PAGE LIST. */
- /* --------------------------------------------------------------------------------- */
- tdlkLastIndex = dlkPage->header.highestIndex;
- arrGuard(ZWORDS_IN_PAGE - tdlkLastIndex, 2048);
- if (tdlkLastIndex == tdlkLogicalPageIndex) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE DELETE THE LAST PAGE INDEX SO WE NEED TO UPDATE THE VALUE. WE MOVE */
- /* BACKWARDS UNTIL WE EITHER FIND A USED INDEX OR THAT WE COME TO INDEX ZERO. */
- /* --------------------------------------------------------------------------------- */
- tdlkLastIndex--;
- while( (tdlkLastIndex > 1) &&
- (dlkPage->word32[ZWORDS_IN_PAGE - tdlkLastIndex] >> 16) == 0 ) {
- jam();
- tdlkLastIndex--;
- }
- //-----------------------------------------------------
- // Reorganize the rest of the index. Set up the free
- // list and the free index.
- //-----------------------------------------------------
- UintR dlkTmp = tdlkLastIndex;
- dlkPage->header.nextFreeIndex = 0;
- while( dlkTmp > 0) {
- if ( (dlkPage->word32[ZWORDS_IN_PAGE - dlkTmp] >> 16) == 0 ) {
- jam();
- dlkPage->word32[ZWORDS_IN_PAGE - dlkTmp] = dlkPage->header.nextFreeIndex;
- arrGuard(dlkTmp, 2048);
- dlkPage->header.nextFreeIndex = dlkTmp;
- }
- dlkTmp--;
- }
- //-----------------------------------------------------
- // Update free area in page and last index.
- //-----------------------------------------------------
- dbgWord32(dlkPageptr, ZPOS_LAST_INDEX, tdlkLastIndex);
- dlkPage->header.highestIndex = tdlkLastIndex;
- dlkPage->header.freeArea = tdlkLogicalPageIndex +
- dlkPage->header.freeArea - tdlkLastIndex;
- tdlkNextPosition = 0;
- } else {
- if (dlkPage->header.highestIndex > tdlkLogicalPageIndex) {
- jam();
- tdlkNextPosition = dlkPage->header.nextFreeIndex;
- dbgWord32(dlkPageptr, ZPOS_NEXT_FREE_INDEX, tdlkLogicalPageIndex);
- arrGuard(tdlkLogicalPageIndex, 2048);
- dlkPage->header.nextFreeIndex = tdlkLogicalPageIndex;
- } else {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* LOGICAL PAGE INDEX LARGER THAN LARGEST INDEX, INCONSISTENT. */
- /* --------------------------------------------------------------------------------- */
- sendSystemerror(signal);
- return; // Just to keep compiler happy
- }//if
- }//if
- /* --------------------------------------------------------------------------------- */
- /* WE INSERT ZERO INTO THE LENGTH PART TO INDICATE A FREE INDEX POSITION. */
- /* WE INSERT A POINTER TO THE NEXT FREE INDEX TO AS TO PUT IT INTO A FREE */
- /* LIST OF INDEX POSITIONS. WE ONLY DO SO IF IT WAS NOT THE LAST INDEX. */
- /* --------------------------------------------------------------------------------- */
- dbgWord32(dlkPageptr, ZWORDS_IN_PAGE - tdlkLogicalPageIndex, tdlkNextPosition);
- arrGuard(ZWORDS_IN_PAGE - tdlkLogicalPageIndex, 2048);
- dlkPage->word32[ZWORDS_IN_PAGE - tdlkLogicalPageIndex] = tdlkNextPosition;
- if (dlkPage->header.insertPos == (tdlkPhysPageIndex + tdlkKeyLen)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THIS ENTRY IS THE LAST ON THE PAGE SO WE WILL UPDATE THE INSERT INDEX */
- /* --------------------------------------------------------------------------------- */
- dbgWord32(dlkPageptr, ZPOS_INSERT_INDEX, tdlkPhysPageIndex);
- dlkPage->header.insertPos = tdlkPhysPageIndex;
- }//if
- }//if
- dbgWord32(dlkPageptr, ZPOS_FREE_AREA_IN_PAGE, dlkPage->header.freeArea);
- tdlkFreeArea = dlkPage->header.freeArea;
- ndbrequire(tdlkFreeArea <= (ZWORDS_IN_PAGE - ZHEAD_SIZE));
- if (tdlkFreeArea < 128) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* FREE AREA IS STILL LESS THAN 128 WORDS SO IT SHOULD NOT BE PLACED IN ANY OF THE */
- /* FREE LISTS. */
- /* --------------------------------------------------------------------------------- */
- dbgWord32(dlkPageptr, ZPOS_ARRAY_POS, dlkPage->header.pageArrayPos);
- ndbrequire(dlkPage->header.pageArrayPos == 4);
- } else {
- jam();
- // Calculate an eventually new arraypos.
- dbgWord32(dlkPageptr, 0, (tdlkFreeArea - 128) / 512);
- tdlkArrayPos = (tdlkFreeArea - 128) / 512;
-
- if (cundoLogActive != ZTRUE) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WHEN DELETING KEYS DURING SYSTEM RESTART WE NEED NOT UPDATE THE LISTS. */
- /* --------------------------------------------------------------------------------- */
- dbgWord32(dlkPageptr, ZPOS_ARRAY_POS, dlkPage->header.pageArrayPos);
- tdlkOldArrayPos = dlkPage->header.pageArrayPos;
- if (tdlkArrayPos != tdlkOldArrayPos) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THE NEW MEMORY AREA HAS ENABLED THE PAGE TO MOVE TO A NEW FREE PAGE LIST */
- /* --------------------------------------------------------------------------------- */
- rfpPageptr = dlkPageptr;
- trfpArrayPos = tdlkOldArrayPos;
- if (tdlkOldArrayPos != 4) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THERE WAS A FREE PAGE LIST TO REMOVE THE PAGE FROM. IF FREE SPACE IS LESS THAN */
- /* 128 BYTES THEN IT IS NOT ON ANY FREE LIST. */
- /* --------------------------------------------------------------------------------- */
- removeFromPageArrayList(signal);
- }//if
- dlkPage->header.pageArrayPos = tdlkArrayPos;
- ipaPagePtr = dlkPageptr;
- tipaArrayPos = tdlkArrayPos;
- insertPageArrayList(signal);
- }//if
- } else {
- // Update pageArrayPos. We are in a SR, executing undolog, insertPageArrayList() called
- // from execACC_OVER_REC needs this value later.
- dlkPage->header.pageArrayPos = tdlkArrayPos;
- }
- }//if
-#ifdef VM_TRACE
- if (cundoLogActive != ZTRUE) checkPageArrayList(signal, "deleteLongKey");
- checkIndexInLongKeyPage(dlkPageptr.i, "deleteLongKey2");
-#endif
-}//Dbacc::deleteLongKey()
-
-
-void Dbacc::checkIndexInLongKeyPage(Uint32 pageId, const char *calledFrom) {
- Page8Ptr pagePtr;
- LongKeyPage *page;
- Uint32 indexNo;
- Uint32 indexValue;
- Uint32 keyLength;
- Uint32 keyPos;
-
- pagePtr.i = pageId;
- ptrCheckGuard(pagePtr, cpagesize, page8);
- page = (LongKeyPage *) &pagePtr.p->word32[0];
-
- // Check the header variables.
- if (page->header.nextFreeIndex > 2048 ||
- page->header.highestIndex > 2048 ||
- page->header.insertPos > 2048 ||
- page->header.freeArea > 2048 ||
- page->header.noOfElements > 225) {
- ndbout << " ERROR in checkIndexInLongKeyPage, called from " << calledFrom << endl
- << " pagePtr.i = " << pageId << endl;
- printoutInfoAndShutdown(page);
- }
-
- // Walk through all the indexes.
- for (indexNo = 1; indexNo <= page->header.highestIndex; indexNo++) {
- jam();
- indexValue = page->word32[ZWORDS_IN_PAGE - indexNo];
-
- if ((indexValue >> 16) == 0) {
- ; // key length is 0, means no key reference at this position in index.
- } else {
- // The index contains a reference to a key.
- jam();
- keyPos = indexValue & 0xffff;
- keyLength = indexValue >> 16;
- if (keyPos >= ZWORDS_IN_PAGE || keyLength >= ZWORDS_IN_PAGE) {
- jam();
- ndbout << " ERROR in checkIndexInLongKeyPage, called from " << calledFrom << endl
- << " keyPos = " << keyPos << endl
- << " keyLength = " << keyLength << endl
- << " page->header.noOfElements = " << page->header.noOfElements << endl
- << " page->header.freeArea = " << page->header.freeArea << endl
- << " indexNo = " << indexNo << endl
- << " page->header.highestIndex = " << page->header.highestIndex << endl;
- ndbrequire(false);
- }
- }
- }
-}//Dbacc::checkIndexInLongKeyPage
-
-
-/* --------------------------------------------------------------------------------- */
-/* REMOVE A PAGE FROM THE PAGE ARRAY LIST. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::removeFromPageArrayList(Signal* signal)
-{
- Page8Ptr rfpPrevPageptr;
- Page8Ptr rfpNextPageptr;
- LongKeyPage *page;
- LongKeyPage *prevPage;
- LongKeyPage *nextPage;
-
- jam();
-
-#ifdef VM_TRACE
- checkPageB4Remove(rfpPageptr.i, "removeFromPageArrayList");
-#endif
-
- page = (LongKeyPage *) &rfpPageptr.p->word32[0];
-
- if (page->header.prevPage == RNIL) {
- jam();
- arrGuard(trfpArrayPos, 4);
- // This page was first in list, remove reference
- // to this page from the start of the list.
- ndbrequire(fragrecptr.p->longKeyPageArray[trfpArrayPos] == rfpPageptr.i);
- fragrecptr.p->longKeyPageArray[trfpArrayPos] = page->header.nextPage;
- } else {
- jam();
- rfpPrevPageptr.i = page->header.prevPage;
- ptrCheckGuard(rfpPrevPageptr, cpagesize, page8);
- prevPage = (LongKeyPage *) &rfpPrevPageptr.p->word32[0];
- // This page wasn't first in list, remove reference
- // to this page from the previous page.
- ndbrequire(prevPage->header.nextPage == rfpPageptr.i);
- prevPage->header.nextPage = page->header.nextPage;
- }//if
-
- if (page->header.nextPage != RNIL) {
- jam();
- rfpNextPageptr.i = page->header.nextPage;
- ptrCheckGuard(rfpNextPageptr, cpagesize, page8);
- nextPage = (LongKeyPage *) &rfpNextPageptr.p->word32[0];
- // This page wasn't last in list, remove reference
- // to this page from the next page.
- ndbrequire(nextPage->header.prevPage == rfpPageptr.i);
- nextPage->header.prevPage = page->header.prevPage;
- // Remove reference to next page in list.
- page->header.nextPage = RNIL;
- }//if
-
- // This couldn't be set until now.
- // Remove reference to previous page in list.
- page->header.prevPage = RNIL;
-
-#ifdef VM_TRACE
- checkPageArrayList(signal, "removeFromPageArrayList");
-#endif
-}//Dbacc::removeFromPageArrayList()
-
-/* --------------------------------------------------------------------------------- */
-/* INSERT A PAGE INTO THE PAGE ARRAY LIST. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::insertPageArrayList(Signal* signal)
-{
- Page8Ptr ipaNextPagePtr;
- LongKeyPage *page;
- LongKeyPage *nextPage;
-
- jam();
-
-#ifdef VM_TRACE
- checkPageArrayList(signal, "insertPageArrayList1");
- checkPageB4Insert(ipaPagePtr.i, "insertPageArrayList1");
-#endif
-
- page = (LongKeyPage *) &ipaPagePtr.p->word32[0];
-
- arrGuard(tipaArrayPos, 4);
-
- if (fragrecptr.p->longKeyPageArray[tipaArrayPos] != RNIL) {
- jam();
- ipaNextPagePtr.i = fragrecptr.p->longKeyPageArray[tipaArrayPos];
- ptrCheckGuard(ipaNextPagePtr, cpagesize, page8);
- nextPage = (LongKeyPage *) &ipaNextPagePtr.p->word32[0];
-
- // A page already existed in the list, add reference
- // to this page in the next page.
- nextPage->header.prevPage = ipaPagePtr.i;
- }//if
-
- page->header.prevPage = RNIL;
- page->header.nextPage = fragrecptr.p->longKeyPageArray[tipaArrayPos];
- page->header.pageArrayPos = tipaArrayPos;
-
- fragrecptr.p->longKeyPageArray[tipaArrayPos] = ipaPagePtr.i;
-
-#ifdef VM_TRACE
- checkPageArrayList(signal, "insertPageArrayList2");
-#endif
-}//Dbacc::insertPageArrayList()
-
-// --------------------------------------------------------------------------------- */
-// Check the page array list.
-// --------------------------------------------------------------------------------- */
-void Dbacc::checkPageArrayList(Signal* signal, const char *calledFrom)
-{
- Page8Ptr pagePtr;
- Uint32 pageArrayIndex;
- LongKeyPage *page;
- Uint32 prevPage;
-
- // Go through the longKeyPageArray and search for a page.
- for (pageArrayIndex = 0; pageArrayIndex <= ZMAX_LONG_KEY_ARRAY_INDEX; pageArrayIndex++) {
- jam();
- pagePtr.i = fragrecptr.p->longKeyPageArray[pageArrayIndex];
- prevPage = RNIL;
-
- if (pagePtr.i != RNIL) {
- // A page is found.
- jam();
- do {
- ptrCheckGuard(pagePtr, cpagesize, page8);
- page = (LongKeyPage *) &pagePtr.p->word32[0];
-
- if ((page->header.freeArea >= 128) &&
- (((page->header.freeArea - 128) / 512) == page->header.pageArrayPos) &&
- (pageArrayIndex == page->header.pageArrayPos) &&
- (page->header.prevPage == prevPage)) {
- // The page found is OK, test next page.
- prevPage = pagePtr.i;
- pagePtr.i = page->header.nextPage;
- jam();
- } else {
- jam();
- ndbout << " ERROR in checkPageArrayList, called from " << calledFrom << endl
- << " pagePtr.i = " << pagePtr.i << endl
- << " prevPage = " << prevPage << endl
- << " pageArrayIndex = " << pageArrayIndex << endl;
- printoutInfoAndShutdown(page);
- }
- }//do
- while (pagePtr.i != RNIL);
- }//if
- }//for
-}//Dbacc::checkPageArrayList()
-
-// --------------------------------------------------------------------------------- */
-// Check the page to put into the pageArrayList.
-// --------------------------------------------------------------------------------- */
-void Dbacc::checkPageB4Insert(Uint32 pageId, const char *calledFrom) {
- Page8Ptr pagePtr;
- Uint32 pageArrayIndex;
- LongKeyPage *page;
-
- pagePtr.i = pageId;
- ptrCheckGuard(pagePtr, cpagesize, page8);
- page = (LongKeyPage *) &pagePtr.p->word32[0];
-
- if ((page->header.nextPage != RNIL) ||
- (page->header.prevPage != RNIL)) {
- jam();
- ndbout << " ERROR in checkPageB4Insert, called from " << calledFrom << endl
- << " pagePtr.i = " << pagePtr.i << endl
- << " page->header.nextPage = " << page->header.nextPage << endl
- << " page->header.prevPage = " << page->header.prevPage << endl;
- ndbrequire(false);
- }
-
- // Page should not be inserted in list if free area is less than 512 byte.
- if (page->header.freeArea < 128) {
- jam();
- ndbout << " ERROR in checkPageB4Insert, called from " << calledFrom << endl
- << " Page has to little free area to be in list." << endl
- << " pagePtr.i = " << pagePtr.i << endl
- << " tipaArrayPos = " << tipaArrayPos << endl;
- printoutInfoAndShutdown(page);
- }
-
- // Check if position in list is correct
- if ((((page->header.freeArea - 128) / 512) != page->header.pageArrayPos) ||
- (page->header.pageArrayPos != tipaArrayPos)) {
- ndbout << " ERROR in checkPageB4Insert, called from " << calledFrom << endl
- << " Incorrect position in list." << endl
- << " pagePtr.i = " << pagePtr.i << endl
- << " tipaArrayPos = " << tipaArrayPos << endl;
- printoutInfoAndShutdown(page);
- }
-
- // Check if page is already in list.
- for (pageArrayIndex = 0; pageArrayIndex <= ZMAX_LONG_KEY_ARRAY_INDEX; pageArrayIndex++) {
- jam();
- pagePtr.i = fragrecptr.p->longKeyPageArray[pageArrayIndex];
-
- if (pagePtr.i != RNIL) {
- // A page is found.
- jam();
- do {
- ptrCheckGuard(pagePtr, cpagesize, page8);
- page = (LongKeyPage *) &pagePtr.p->word32[0];
- if (pagePtr.i == pageId) {
- jam();
- ndbout << "ERROR in checkPageB4Insert, called from " << calledFrom << endl
- << "Page exists already in list." << endl
- << " pagePtr.i = " << pagePtr.i << endl;
- printoutInfoAndShutdown(page);
- }
- pagePtr.i = page->header.nextPage;
- }//do
- while (pagePtr.i != RNIL);
- }//if
- }//for
-}//Dbacc::checkPageB4Insert()
-
-// --------------------------------------------------------------------------------- */
-// Check the page to remove from the pageArrayList.
-// --------------------------------------------------------------------------------- */
-void Dbacc::checkPageB4Remove(Uint32 pageId, const char *calledFrom) {
- Page8Ptr pagePtr;
- Uint32 pageArrayIndex;
- Uint32 noOfOccurrence = 0;
- Uint32 noOfPagesInList = 0;
- LongKeyPage *page;
-
- LongKeyPage *prevPage;
- LongKeyPage *nextPage;
- Page8Ptr rfpPrevPageptr;
- Page8Ptr rfpNextPageptr;
-
-
- pagePtr.i = pageId;
- ptrCheckGuard(pagePtr, cpagesize, page8);
- page = (LongKeyPage *) &pagePtr.p->word32[0];
-
- // Check that page is in list.
- for (pageArrayIndex = 0; pageArrayIndex <= ZMAX_LONG_KEY_ARRAY_INDEX; pageArrayIndex++) {
- jam();
- pagePtr.i = fragrecptr.p->longKeyPageArray[pageArrayIndex];
-
- if (pagePtr.i != RNIL) {
- // A page is found.
- jam();
- do {
- noOfPagesInList++;
- ptrCheckGuard(pagePtr, cpagesize, page8);
- page = (LongKeyPage *) &pagePtr.p->word32[0];
- if (pagePtr.i == pageId) {
- // Check the consistent in list.
- if (page->header.prevPage != RNIL) {
- rfpPrevPageptr.i = page->header.prevPage;
- ptrCheckGuard(rfpPrevPageptr, cpagesize, page8);
- prevPage = (LongKeyPage *) &rfpPrevPageptr.p->word32[0];
- if (prevPage->header.nextPage != pageId) {
- ndbout << "ERROR: inconsistent in checkPageB4Remove, called from " << calledFrom << endl
- << "prevPage->header.nextPage = " << prevPage->header.nextPage << endl
- << "pageId = " << pageId << endl;
- printoutInfoAndShutdown(page);
- }
- }
- // Check the consistent in list.
- if (page->header.nextPage != RNIL) {
- rfpNextPageptr.i = page->header.nextPage;
- ptrCheckGuard(rfpNextPageptr, cpagesize, page8);
- nextPage = (LongKeyPage *) &rfpNextPageptr.p->word32[0];
- if (nextPage->header.prevPage != pageId) {
- ndbout << "ERROR: inconsistent in checkPageB4Remove, called from " << calledFrom << endl
- << "nextPage->header.prevPage = " << nextPage->header.prevPage << endl
- << "pageId = " << pageId << endl;
- printoutInfoAndShutdown(page);
- }
- }
- jam();
- noOfOccurrence++;
- }
- pagePtr.i = page->header.nextPage;
- }//do
- while (pagePtr.i != RNIL);
- }//if
- }//for
-
- if (noOfOccurrence != 1) {
- pagePtr.i = pageId;
- ptrCheckGuard(pagePtr, cpagesize, page8);
- page = (LongKeyPage *) &pagePtr.p->word32[0];
- ndbout << "ERROR in checkPageB4Remove, called from " << calledFrom << endl
- << "Page occur " << noOfOccurrence << " times in list" << endl
- << "pageId = " << pageId << endl;
- printoutInfoAndShutdown(page);
- }
-}//Dbacc::checkPageB4Remove()
-
-
-// --------------------------------------------------------------------------------- */
-// Printout an error message and shutdown node.
-// --------------------------------------------------------------------------------- */
-void Dbacc::printoutInfoAndShutdown(LongKeyPage *page) {
- ndbout << " page->header.pageArrayPos = " << page->header.pageArrayPos << endl
- << " ((page->header.freeArea - 128) / 512) = "
- << ((page->header.freeArea - 128) / 512) << endl
- << " page->header.freeArea = " << page->header.freeArea << endl
- << " page->header.noOfElements = " << page->header.noOfElements << endl
- << " page->header.nextPage = " << page->header.nextPage << endl
- << " page->header.prevPage = " << page->header.prevPage << endl
- << " page->header.nextFreeIndex = " << page->header.nextFreeIndex << endl
- << " page->header.insertPos = " << page->header.insertPos << endl
- << " page->header.highestIndex = " << page->header.highestIndex << endl
- << " page->header.pageId = " << page->header.pageId << endl;
- ndbrequire(false);
-}//Dbacc::printoutInfoAndShutdown()
-
/* --------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------- */
@@ -4419,7 +3173,7 @@ void Dbacc::printoutInfoAndShutdown(LongKeyPage *page) {
/* --------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------- */
/* */
-/* MODULE: READ */
+/* MODULE: GET_ELEMENT */
/* THE FOLLOWING SUBROUTINES ARE ONLY USED BY GET_ELEMENT AND */
/* GETDIRINDEX. THIS ROUTINE IS THE SOLE INTERFACE TO GET ELEMENTS */
/* FROM THE INDEX. CURRENT USERS ARE ALL REQUESTS AND EXECUTE UNDO LOG */
@@ -4480,6 +3234,21 @@ void Dbacc::getdirindex(Signal* signal)
ptrCheckGuard(gdiPageptr, cpagesize, page8);
}//Dbacc::getdirindex()
+Uint32
+Dbacc::readTablePk(Uint32 localkey1)
+{
+ Uint32 tableId = fragrecptr.p->myTableId;
+ Uint32 fragId = fragrecptr.p->myfid;
+ Uint32 fragPageId = localkey1 >> MAX_TUPLES_BITS;
+ Uint32 pageIndex = localkey1 & ((1 << MAX_TUPLES_BITS ) - 1);
+#ifdef VM_TRACE
+ memset(ckeys, 0x1f, (fragrecptr.p->keyLength * MAX_XFRM_MULTIPLY) << 2);
+#endif
+ int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, ckeys, true);
+ ndbrequire(ret > 0);
+ return ret;
+}
+
/* --------------------------------------------------------------------------------- */
/* GET_ELEMENT */
/* INPUT: */
@@ -4521,7 +3290,6 @@ void Dbacc::getElement(Signal* signal)
Uint32 tgeNextptrtype;
register Uint32 tgeKeyptr;
register Uint32 tgeRemLen;
- register Uint32 tgeCompareLen;
register Uint32 TelemLen = fragrecptr.p->elementLength;
register Uint32* Tkeydata = (Uint32*)&signal->theData[7];
@@ -4529,20 +3297,15 @@ void Dbacc::getElement(Signal* signal)
tgePageindex = tgdiPageindex;
gePageptr = gdiPageptr;
tgeResult = ZFALSE;
- tgeCompareLen = fragrecptr.p->keyLength;
- const Uint32 isAccLockReq = operationRecPtr.p->isAccLockReq;
- if (isAccLockReq) {
- jam();
- tgeCompareLen = 0;
- }
+ /*
+ * The value seached is
+ * - table key for ACCKEYREQ, stored in TUP
+ * - local key (1 word) for ACC_LOCKREQ and UNDO, stored in ACC
+ */
+ const bool searchLocalKey =
+ operationRecPtr.p->isAccLockReq || operationRecPtr.p->isUndoLogReq;
- // We can handle keylength up to 8, but not more (0 means dynamic)
- if (tgeCompareLen >= 9) {
- ACCKEY_error(2); return;
- }//if
- if (TelemLen < 3) {
- ACCKEY_error(3); return;
- }//if
+ ndbrequire(TelemLen == ZELEM_HEAD_SIZE + fragrecptr.p->localkeylen);
tgeNextptrtype = ZLEFT;
tgeLocked = 0;
@@ -4573,7 +3336,7 @@ void Dbacc::getElement(Signal* signal)
} else {
ACCKEY_error(6); return;
}//if
- if (tgeRemLen >= TelemLen) {
+ if (tgeRemLen >= ZCON_HEAD_SIZE + TelemLen) {
if (tgeRemLen > ZBUF_SIZE) {
ACCKEY_error(7); return;
}//if
@@ -4581,151 +3344,46 @@ void Dbacc::getElement(Signal* signal)
// There is at least one element in this container. Check if it is the element
// searched for.
/* --------------------------------------------------------------------------------- */
- if (tgeCompareLen != 0) {
- /* --------------------------------------------------------------------------------- */
- /* THIS PART IS USED TO SEARCH FOR KEYS WITH FIXED SIZE. THE LOOP TAKES CARE */
- /* OF SEARCHING THROUGH ALL ELEMENTS IN ONE CONTAINER. */
- /* --------------------------------------------------------------------------------- */
- do {
- register Uint32 TdataIndex = 0;
- register Uint32 TgeIndex = 0;
+ do {
+ tgeElementHeader = gePageptr.p->word32[tgeElementptr];
+ tgeRemLen = tgeRemLen - TelemLen;
+ Uint32 hashValuePart;
+ if (ElementHeader::getLocked(tgeElementHeader)) {
jam();
- tgeRemLen = tgeRemLen - TelemLen;
- do {
- if (gePageptr.p->word32[tgeKeyptr + TgeIndex] != Tkeydata[TdataIndex]) {
- goto compare_next;
- }//if
- TdataIndex++;
- TgeIndex += tgeForward;
- } while (TdataIndex < tgeCompareLen);
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE FOUND THE ELEMENT. GET THE LOCK INDICATOR AND RETURN FOUND. */
- /* --------------------------------------------------------------------------------- */
+ geTmpOperationRecPtr.i = ElementHeader::getOpPtrI(tgeElementHeader);
+ ptrCheckGuard(geTmpOperationRecPtr, coprecsize, operationrec);
+ hashValuePart = geTmpOperationRecPtr.p->hashvaluePart;
+ } else {
jam();
-#if __ia64 == 1
-#if __INTEL_COMPILER == 810
- // prevents SIGSEGV under icc -O1
- ndb_acc_ia64_icc810_dummy_func();
-#endif
-#endif
- tgeLocked = ElementHeader::getLocked(gePageptr.p->word32[tgeElementptr]);
- tgeResult = ZTRUE;
- TdataIndex = tgeElementptr + tgeForward;
- TgeIndex = TdataIndex + tgeForward;
- operationRecPtr.p->localdata[0] = gePageptr.p->word32[TdataIndex];
- operationRecPtr.p->localdata[1] = gePageptr.p->word32[TgeIndex];
- return;
- /* --------------------------------------------------------------------------------- */
- /* COMPARE NEXT ELEMENT */
- /* --------------------------------------------------------------------------------- */
- compare_next:
- if (tgeRemLen <= ZCON_HEAD_SIZE) {
- break;
- }//if
- tgeKeyptr = tgeKeyptr + tgeElemStep;
- tgeElementptr = tgeElementptr + tgeElemStep;
- } while (1);
- } else if (! isAccLockReq) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* THIS PART IS USED TO SEARCH FOR KEYS WITH VARIABLE LENGTH OR FIXED LENGTH */
- /* GREATER THAN 32 BYTES. IN THIS CASE THE KEY PART IS STORED IN A SPECIAL */
- /* LONG PAGE PART AND THE HASH INDEX CONTAINS A REFERENCE TO THERE PLUS A */
- /* PART OF THE HASH VALUE. */
- /* --------------------------------------------------------------------------------- */
- do {
- tgeElementHeader = gePageptr.p->word32[tgeElementptr];
- tgeRemLen = tgeRemLen - TelemLen;
- Uint32 hashValuePart;
- if (ElementHeader::getLocked(tgeElementHeader)) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* IN THIS CASE THE HASH VALUE PART OF THE ELEMENT HEADER IS STORED IN THE */
- /* OPERATION THAT OWNS THE LOCK. IN THIS CASE WE MIGHT AS WELL GO AHEAD AND */
- /* CHECK THE KEY IN THE LONG PAGE. */
- /* --------------------------------------------------------------------------------- */
- geTmpOperationRecPtr.i =
- ElementHeader::getOpPtrI(tgeElementHeader);
- ptrCheckGuard(geTmpOperationRecPtr, coprecsize, operationrec);
- hashValuePart = geTmpOperationRecPtr.p->hashvaluePart;
+ hashValuePart = ElementHeader::getHashValuePart(tgeElementHeader);
+ }
+ if (hashValuePart == opHashValuePart) {
+ jam();
+ Uint32 localkey1 = gePageptr.p->word32[tgeElementptr + tgeForward];
+ Uint32 localkey2 = 0;
+ bool found;
+ if (! searchLocalKey) {
+ Uint32 len = readTablePk(localkey1);
+ found = (len == operationRecPtr.p->xfrmtupkeylen) &&
+ (memcmp(Tkeydata, ckeys, len << 2) == 0);
} else {
jam();
- /* --------------------------------------------------------------------------------- */
- /* IN THIS CASE THE HASH VALUE PART CAN BE CHECKED TO SEE IF THE HASH VALUE */
- /* GIVES US A REASON TO CONTINUE CHECKING THE FULL KEY. */
- /* --------------------------------------------------------------------------------- */
- hashValuePart = ElementHeader::getHashValuePart(tgeElementHeader);
- }//if
-
- if (hashValuePart == opHashValuePart) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* IF THE HASH VALUES ARE EQUAL THEN XOR-ING THEM WILL GIVE THE RESULT 0. */
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE FOUND A KEY WITH IDENTICAL HASH VALUE. MOST LIKELY WE HAVE FOUND THE*/
- /* ELEMENT BUT FIRST WE NEED TO PERFORM A KEY COMPARISON. */
- /* --------------------------------------------------------------------------------- */
- tslcPageIndex = gePageptr.p->word32[tgeKeyptr] & 0x3ff;
- tslcPagedir = gePageptr.p->word32[tgeKeyptr] >> 10;
- searchLongKey(signal, true);
- if (tslcResult == ZTRUE) {
- register Uint32 TlocData1, TlocData2;
- jam();
- /* --------------------------------------------------------------------------------- */
- /* WE HAVE FOUND THE ELEMENT. GET THE LOCK INDICATOR AND RETURN FOUND. */
- /* --------------------------------------------------------------------------------- */
- tgeLocked = ElementHeader::getLocked(tgeElementHeader);
- tgeResult = ZTRUE;
- TlocData1 = tgeElementptr + tgeForward;
- TlocData2 = TlocData1 + tgeForward;
- operationRecPtr.p->localdata[0] = gePageptr.p->word32[TlocData1];
- operationRecPtr.p->localdata[1] = gePageptr.p->word32[TlocData2];
- return;
- }//if
+ found = (localkey1 == Tkeydata[0]);
}
- /* --------------------------------------------------------------------------------- */
- /* COMPARE NEXT ELEMENT */
- /* --------------------------------------------------------------------------------- */
- if (tgeRemLen <= ZCON_HEAD_SIZE) {
- break;
- }//if
- tgeKeyptr = tgeKeyptr + tgeElemStep;
- tgeElementptr = tgeElementptr + tgeElemStep;
- } while (1);
- } else {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* Search for local key in a lock request */
- /* --------------------------------------------------------------------------------- */
- do {
- tgeRemLen = tgeRemLen - TelemLen;
- // position of local key word 1
- Uint32 TdataIndex = tgeElementptr + tgeForward;
- // XXX assume localkeylen is 1
- if (gePageptr.p->word32[TdataIndex] == Tkeydata[0]) {
+ if (found) {
jam();
- tgeLocked = ElementHeader::getLocked(gePageptr.p->word32[tgeElementptr]);
+ tgeLocked = ElementHeader::getLocked(tgeElementHeader);
tgeResult = ZTRUE;
- // position of local key word 2
- Uint32 TgeIndex = TdataIndex + tgeForward;
- operationRecPtr.p->localdata[0] = gePageptr.p->word32[TdataIndex];
- operationRecPtr.p->localdata[1] = gePageptr.p->word32[TgeIndex];
-
- if (fragrecptr.p->keyLength == 0) {
- // set up long key variables in operation record
- tslcPageIndex = gePageptr.p->word32[tgeKeyptr] & 0x3ff;
- tslcPagedir = gePageptr.p->word32[tgeKeyptr] >> 10;
- // no verification since we have no key data
- searchLongKey(signal, false);
- }
+ operationRecPtr.p->localdata[0] = localkey1;
+ operationRecPtr.p->localdata[1] = localkey2;
return;
- }//if
- if (tgeRemLen <= ZCON_HEAD_SIZE) {
- break;
- }//if
- tgeElementptr = tgeElementptr + tgeElemStep;
- } while (1);
- }//if
+ }
+ }
+ if (tgeRemLen <= ZCON_HEAD_SIZE) {
+ break;
+ }
+ tgeElementptr = tgeElementptr + tgeElemStep;
+ } while (true);
}//if
if (tgeRemLen != ZCON_HEAD_SIZE) {
ACCKEY_error(8); return;
@@ -4756,71 +3414,6 @@ void Dbacc::getElement(Signal* signal)
}//Dbacc::getElement()
/* --------------------------------------------------------------------------------- */
-/* SEARCH_LONG_KEY */
-/* INPUT: */
-/* TSLC_PAGEDIR PAGE DIRECTORY OF LONG PAGE */
-/* TSLC_PAGE_INDEX PAGE INDEX IN LONG PAGE */
-/* GE_OPERATION_REC_PTR */
-/* OUTPUT: */
-/* TSLC_RESULT */
-/* DESCRIPTION: SEARCH FOR AN ELEMENT IN A LONG_KEY_PAGE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::searchLongKey(Signal* signal, bool verify)
-{
- DirRangePtr slcOverflowrangeptr;
- DirectoryarrayPtr slcOverflowDirptr;
- Page8Ptr slcPageptr;
- Uint32 tslcIndexValue;
- Uint32 tslcStartIndex;
- Uint32 tslcIndex;
- Uint32 guard30;
- Uint32* Tkeydata = (Uint32*)&signal->theData[7];
-
-
- slcOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(slcOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard((tslcPagedir >> 8), 256);
- slcOverflowDirptr.i = slcOverflowrangeptr.p->dirArray[tslcPagedir >> 8];
- ptrCheckGuard(slcOverflowDirptr, cdirarraysize, directoryarray);
-
- // dbgWord32(slcOverflowDirptr, (int) (tslcPagedir & 0xff), slcOverflowDirptr.p->pagep[tslcPagedir & 0xff]);
-
- slcPageptr.i = slcOverflowDirptr.p->pagep[tslcPagedir & 0xff];
- ptrCheckGuard(slcPageptr, cpagesize, page8);
- arrGuard(ZWORDS_IN_PAGE - tslcPageIndex, 2048);
- dbgWord32(slcPageptr, ZWORDS_IN_PAGE - tslcPageIndex, (int)slcPageptr.p->word32[ZWORDS_IN_PAGE - tslcPageIndex] & 0xffff);
- dbgWord32(slcPageptr, ZWORDS_IN_PAGE - tslcPageIndex, slcPageptr.p->word32[ZWORDS_IN_PAGE - tslcPageIndex] >> 16);
- tslcIndexValue = slcPageptr.p->word32[ZWORDS_IN_PAGE - tslcPageIndex];
- if (verify) {
- if ((tslcIndexValue >> 16) != operationRecPtr.p->tupkeylen) {
- jam();
- tslcResult = ZFALSE;
- return;
- }//if
- }
- tslcStartIndex = tslcIndexValue & 0xffff;
- guard30 = operationRecPtr.p->tupkeylen - 1;
- arrGuard(guard30, 2048);
- arrGuard(guard30 + tslcStartIndex, 2048);
- if (verify) {
- for (tslcIndex = 0; tslcIndex <= guard30; tslcIndex++) {
- dbgWord32(slcPageptr, tslcIndex + tslcStartIndex, slcPageptr.p->word32[tslcIndex + tslcStartIndex]);
- if (slcPageptr.p->word32[tslcIndex + tslcStartIndex] != Tkeydata[tslcIndex]) {
- jam();
- tslcResult = ZFALSE;
- return;
- }//if
- }//for
- }
- jam();
- tslcResult = ZTRUE;
- operationRecPtr.p->longPagePtr = slcPageptr.i;
- operationRecPtr.p->longKeyPageIndex = tslcPageIndex;
- arrGuard(tslcPageIndex, ZMAX_NO_OF_LONGKEYS_IN_PAGE);
- arrGuard(slcPageptr.i, cpagesize);
-}//Dbacc::searchLongKey()
-
-/* --------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------- */
/* --------------------------------------------------------------------------------- */
/* */
@@ -4863,13 +3456,6 @@ void Dbacc::commitdelete(Signal* signal, bool systemRestart)
EXECUTE_DIRECT(DBTUP, GSN_TUP_DEALLOCREQ, signal, 4);
jamEntry();
}//if
- if (fragrecptr.p->keyLength == 0) {
- jam();
- tdlkLogicalPageIndex = operationRecPtr.p->longKeyPageIndex;
- dlkPageptr.i = operationRecPtr.p->longPagePtr;
- ptrCheckGuard(dlkPageptr, cpagesize, page8);
- deleteLongKey(signal);
- }//if
getdirindex(signal);
tlastPageindex = tgdiPageindex;
lastPageptr.i = gdiPageptr.i;
@@ -5428,50 +4014,6 @@ void Dbacc::checkoverfreelist(Signal* signal)
}//if
}//Dbacc::checkoverfreelist()
-/* --------------------------------------------------------------------------------- */
-/* RELEASE_LONG_PAGE */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::releaseLongPage(Signal* signal)
-{
- DirRangePtr rlpOverflowrangeptr;
- DirectoryarrayPtr rlpOverflowDirptr;
- Uint32 trlpTmp1;
- Uint32 trlpTmp2;
- Uint32 trlpTmp3;
-
- jam();
- seizeOverRec(signal);
- sorOverflowRecPtr.p->dirindex = rlopPageptr.p->word32[ZPOS_PAGE_ID];
- sorOverflowRecPtr.p->overpage = RNIL;
- priOverflowRecPtr = sorOverflowRecPtr;
- putRecInFreeOverdir(signal);
- trlpTmp1 = sorOverflowRecPtr.p->dirindex;
- rlpOverflowrangeptr.i = fragrecptr.p->overflowdir;
- trlpTmp2 = trlpTmp1 >> 8;
- trlpTmp3 = trlpTmp1 & 0xff;
- ptrCheckGuard(rlpOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard(trlpTmp2, 256);
- rlpOverflowDirptr.i = rlpOverflowrangeptr.p->dirArray[trlpTmp2];
- ptrCheckGuard(rlpOverflowDirptr, cdirarraysize, directoryarray);
- rlpOverflowDirptr.p->pagep[trlpTmp3] = RNIL;
-
- if (cundoLogActive != ZTRUE) {
- // Remove from page array.
- trfpArrayPos = rlopPageptr.p->word32[ZPOS_ARRAY_POS];
- rfpPageptr = rlopPageptr;
- removeFromPageArrayList(signal);
- }
-
- // Reset page header
- iloPageptr = rlopPageptr;
- tiloIndex = rlopPageptr.p->word32[ZPOS_PAGE_ID];
- initLongOverpage(signal);
-
- rpPageptr = rlopPageptr;
- releasePage(signal);
-}//Dbacc::releaseLongPage()
-
-
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
/* ------------------------------------------------------------------------- */
@@ -6162,16 +4704,16 @@ Uint32 Dbacc::executeNextOperation(Signal* signal)
else if(operationRecPtr.p->operation == ZWRITE)
{
jam();
- operationRecPtr.p->operation = ZINSERT;
+ operationRecPtr.p->operation = ZUPDATE;
if (operationRecPtr.p->prevParallelQue != RNIL) {
OperationrecPtr prevOpPtr;
jam();
prevOpPtr.i = operationRecPtr.p->prevParallelQue;
ptrCheckGuard(prevOpPtr, coprecsize, operationrec);
- if (prevOpPtr.p->operation != ZDELETE)
+ if (prevOpPtr.p->operation == ZDELETE)
{
jam();
- operationRecPtr.p->operation = ZUPDATE;
+ operationRecPtr.p->operation = ZINSERT;
}
}
}
@@ -6779,14 +5321,8 @@ void Dbacc::expandcontainer(Signal* signal)
Uint32 texcHashvalue;
Uint32 texcTmp;
Uint32 texcIndex;
- Uint32 texpKeyLen;
Uint32 guard20;
- texpKeyLen = fragrecptr.p->keyLength;
- if (texpKeyLen == 0) {
- jam();
- texpKeyLen = ZACTIVE_LONG_KEY_LEN;
- }//if
cexcPrevpageptr = RNIL;
cexcPrevconptr = 0;
cexcForward = ZTRUE;
@@ -6873,18 +5409,10 @@ void Dbacc::expandcontainer(Signal* signal)
clocalkey[texcIndex] = excPageptr.p->word32[texcTmp];
texcTmp = texcTmp + cexcForward;
}//for
- guard20 = texpKeyLen - 1;
- for (texcIndex = 0; texcIndex <= guard20; texcIndex++) {
- arrGuard(texcIndex, 2048);
- arrGuard(texcTmp, 2048);
- ckeys[texcIndex] = excPageptr.p->word32[texcTmp];
- texcTmp = texcTmp + cexcForward;
- }//for
tidrPageindex = fragrecptr.p->expReceiveIndex;
idrPageptr.i = fragrecptr.p->expReceivePageptr;
ptrCheckGuard(idrPageptr, cpagesize, page8);
tidrForward = fragrecptr.p->expReceiveForward;
- tidrKeyLen = texpKeyLen;
insertElement(signal);
fragrecptr.p->expReceiveIndex = tidrPageindex;
fragrecptr.p->expReceivePageptr = idrPageptr.i;
@@ -6966,17 +5494,10 @@ void Dbacc::expandcontainer(Signal* signal)
clocalkey[texcIndex] = lastPageptr.p->word32[texcTmp];
texcTmp = texcTmp + tlastForward;
}//for
- for (texcIndex = 0; texcIndex < texpKeyLen; texcIndex++) {
- arrGuard(texcIndex, 2048);
- arrGuard(texcTmp, 2048);
- ckeys[texcIndex] = lastPageptr.p->word32[texcTmp];
- texcTmp = texcTmp + tlastForward;
- }//for
tidrPageindex = fragrecptr.p->expReceiveIndex;
idrPageptr.i = fragrecptr.p->expReceivePageptr;
ptrCheckGuard(idrPageptr, cpagesize, page8);
tidrForward = fragrecptr.p->expReceiveForward;
- tidrKeyLen = texpKeyLen;
insertElement(signal);
fragrecptr.p->expReceiveIndex = tidrPageindex;
fragrecptr.p->expReceivePageptr = idrPageptr.i;
@@ -7457,18 +5978,12 @@ void Dbacc::shrinkcontainer(Signal* signal)
Uint32 tshrElementptr;
Uint32 tshrRemLen;
Uint32 tshrInc;
- Uint32 tshrKeyLen;
Uint32 tshrTmp;
Uint32 tshrIndex;
Uint32 guard21;
tshrRemLen = cexcContainerlen - ZCON_HEAD_SIZE;
- tshrKeyLen = fragrecptr.p->keyLength;
- if (tshrKeyLen == 0) {
- jam();
- tshrKeyLen = ZACTIVE_LONG_KEY_LEN;
- }//if
- tshrInc = (ZELEM_HEAD_SIZE + tshrKeyLen) + fragrecptr.p->localkeylen;
+ tshrInc = fragrecptr.p->elementLength;
if (cexcForward == ZTRUE) {
jam();
tshrElementptr = cexcContainerptr + ZCON_HEAD_SIZE;
@@ -7517,18 +6032,10 @@ void Dbacc::shrinkcontainer(Signal* signal)
clocalkey[tshrIndex] = excPageptr.p->word32[tshrTmp];
tshrTmp = tshrTmp + cexcForward;
}//for
- guard21 = tshrKeyLen - 1;
- for (tshrIndex = 0; tshrIndex <= guard21; tshrIndex++) {
- arrGuard(tshrIndex, 2048);
- arrGuard(tshrTmp, 2048);
- ckeys[tshrIndex] = excPageptr.p->word32[tshrTmp];
- tshrTmp = tshrTmp + cexcForward;
- }//for
tidrPageindex = fragrecptr.p->expReceiveIndex;
idrPageptr.i = fragrecptr.p->expReceivePageptr;
ptrCheckGuard(idrPageptr, cpagesize, page8);
tidrForward = fragrecptr.p->expReceiveForward;
- tidrKeyLen = tshrKeyLen;
insertElement(signal);
/* --------------------------------------------------------------------------------- */
/* TAKE CARE OF RESULT FROM INSERT_ELEMENT. */
@@ -8256,14 +6763,6 @@ void Dbacc::saveOverPagesLab(Signal* signal)
jam();
ropPageptr = sopPageptr;
releaseOverpage(signal);
- } else if (((sopPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) ==
- ZLONG_PAGE_TYPE) {
- //----------------------------------------------------------------------
- // The long key page is empty, release it.
- //----------------------------------------------------------------------
- jam();
- rlopPageptr = sopPageptr;
- releaseLongPage(signal);
} else {
jam();
sendSystemerror(signal);
@@ -8655,66 +7154,63 @@ void Dbacc::lcpCopyPage(Signal* signal)
}//for
}//for
tlcnChecksum = Tchs;
- if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != ZLONG_PAGE_TYPE) {
- jam();
- if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
- jam();
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
- /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
- /* HEADER OF 2 WORDS. */
- /*-----------------------------------------------------------------*/
- tlcnConIndex = ZHEAD_SIZE;
- tlupForward = 1;
- for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- lcpUpdatePage(signal);
- tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
- }//for
- }//if
+ if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
+ jam();
/*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
+ /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
+ /* HEADER OF 2 WORDS. */
/*-----------------------------------------------------------------*/
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlcnConIndex = ZHEAD_SIZE;
+ tlupForward = 1;
+ for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
tlupIndex = tlcnConIndex;
tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- tlupForward = 1;
lcpUpdatePage(signal);
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
- jam();
- sendSystemerror(signal);
- return;
- }//if
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
- /*-----------------------------------------------------------------*/
- tlupForward = cminusOne;
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex - 1;
- lcpUpdatePage(signal);
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
- jam();
- sendSystemerror(signal);
- return;
- }//if
+ tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
+ }//for
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ tlupForward = 1;
+ lcpUpdatePage(signal);
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return;
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlupForward = cminusOne;
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex - 1;
+ lcpUpdatePage(signal);
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ sendSystemerror(signal);
+ return;
}//if
lcnCopyPageptr.p->word32[ZPOS_CHECKSUM] = tlcnChecksum;
}//Dbacc::lcpCopyPage()
@@ -8775,78 +7271,75 @@ void Dbacc::srCheckPage(Signal* signal)
Uint32 tlcnIndex;
lupPageptr.p = lcnCopyPageptr.p;
- if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) != ZLONG_PAGE_TYPE) {
- jam();
- if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
- jam();
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
- /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
- /* HEADER OF 2 WORDS. */
- /*-----------------------------------------------------------------*/
- tlcnConIndex = ZHEAD_SIZE;
- tlupForward = 1;
- for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- srCheckContainer(signal);
- if (tresult != 0) {
- jam();
- return;
- }//if
- tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
- }//for
- }//if
+ if (((lcnCopyPageptr.p->word32[ZPOS_PAGE_TYPE] >> ZPOS_PAGE_TYPE_BIT) & 3) == ZNORMAL_PAGE_TYPE) {
+ jam();
/*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /* TAKE CARE OF ALL 64 BUFFERS ADDRESSED BY ALGORITHM IN */
+ /* FIRST PAGE. IF THEY ARE EMPTY THEY STILL HAVE A CONTAINER */
+ /* HEADER OF 2 WORDS. */
/*-----------------------------------------------------------------*/
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlcnConIndex = ZHEAD_SIZE;
+ tlupForward = 1;
+ for (tlcnIndex = 0; tlcnIndex <= ZNO_CONTAINERS - 1; tlcnIndex++) {
tlupIndex = tlcnConIndex;
tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
- tlupForward = 1;
srCheckContainer(signal);
if (tresult != 0) {
jam();
return;
}//if
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
+ tlcnConIndex = tlcnConIndex + ZBUF_SIZE;
+ }//for
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE LEFT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 23) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ZHEAD_SIZE;
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex + ZCON_HEAD_SIZE;
+ tlupForward = 1;
+ srCheckContainer(signal);
+ if (tresult != 0) {
jam();
- tresult = 4;
return;
}//if
- /*-----------------------------------------------------------------*/
- /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
- /*-----------------------------------------------------------------*/
- tlupForward = cminusOne;
- tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
- while (tlcnNextContainer < ZEMPTYLIST) {
- tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
- tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
- tlupIndex = tlcnConIndex;
- tlupElemIndex = tlcnConIndex - 1;
- srCheckContainer(signal);
- if (tresult != 0) {
- jam();
- return;
- }//if
- tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
- }//while
- if (tlcnNextContainer == ZEMPTYLIST) {
- jam();
- /*empty*/;
- } else {
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ tresult = 4;
+ return;
+ }//if
+ /*-----------------------------------------------------------------*/
+ /* TAKE CARE OF ALL USED BUFFERS ON THE RIGHT SIDE. */
+ /*-----------------------------------------------------------------*/
+ tlupForward = cminusOne;
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[ZPOS_EMPTY_LIST] >> 16) & 0x7f;
+ while (tlcnNextContainer < ZEMPTYLIST) {
+ tlcnConIndex = (tlcnNextContainer << ZSHIFT_PLUS) - (tlcnNextContainer << ZSHIFT_MINUS);
+ tlcnConIndex = tlcnConIndex + ((ZHEAD_SIZE + ZBUF_SIZE) - ZCON_HEAD_SIZE);
+ tlupIndex = tlcnConIndex;
+ tlupElemIndex = tlcnConIndex - 1;
+ srCheckContainer(signal);
+ if (tresult != 0) {
jam();
- tresult = 4;
return;
}//if
+ tlcnNextContainer = (lcnCopyPageptr.p->word32[tlcnConIndex] >> 11) & 0x7f;
+ }//while
+ if (tlcnNextContainer == ZEMPTYLIST) {
+ jam();
+ /*empty*/;
+ } else {
+ jam();
+ tresult = 4;
+ return;
}//if
}//Dbacc::srCheckPage()
@@ -9030,50 +7523,14 @@ void Dbacc::undoWritingProcess(Signal* signal)
writeUndoDataInfo(signal);
checkUndoPages(signal);
}//if
- } else if (tpageType != ZLONG_PAGE_TYPE) {
+ } else {
jam();
/* --------------------------------------------------------------------------- */
/* ONLY PAGE INFO AND OVERFLOW PAGE INFO CAN BE LOGGED BY THIS ROUTINE. A */
/* SERIOUS ERROR. */
/* --------------------------------------------------------------------------- */
sendSystemerror(signal);
- } else {
- /* --------------------------------------------------------------------------------- */
- /* THIS LOG RECORD IS GENERATED ON A LONG KEY PAGE. THESE PAGES USE LOGICAL */
- /* LOGGING. */
- /* --------------------------------------------------------------------------------- */
- if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW PAGES SINCE THE */
- /* START OF THE LOCAL CHECKPOINT. WE NEED NOT LOG ANY UPDATES OF PAGES THAT DID*/
- /* NOT EXIST AT START OF LCP. */
- /* --------------------------------------------------------------------------------- */
- /*empty*/;
- } else {
- jam();
- /* --------------------------------------------------------------------------------- */
- /* LOGICAL LOGGING OF LONG KEY PAGES CAN EITHER BE UNDO OF AN INSERT OR UNDO */
- /* OF A DELETE KEY. UNDO OF DELETE NEEDS TO LOG THE KEY TO BE REINSERTED WHILE */
- /* UNDO OF INSERT ONLY NEEDS TO LOG THE INDEX TO BE DELETED. */
- /* --------------------------------------------------------------------------------- */
- undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
- ptrAss(undopageptr, undopage);
- theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
- tundoindex = theadundoindex + ZUNDOHEADSIZE;
- if (cundoinfolength == 0) {
- jam();
- writeUndoHeader(signal, tactivePageDir, UndoHeader::ZUNDO_INSERT_LONG_KEY);
- } else {
- jam();
- writeUndoHeader(signal, tactivePageDir, UndoHeader::ZUNDO_DELETE_LONG_KEY);
- arrGuard(ZWORDS_IN_PAGE - cundoElemIndex, 2048);
- tundoElemIndex = datapageptr.p->word32[ZWORDS_IN_PAGE - cundoElemIndex] & 0xffff;
- writeUndoDataInfo(signal);
- }//if
- checkUndoPages(signal);
- }//if
- }//if
+ }
} else {
if (fragrecptr.p->fragState == LCP_SEND_OVER_PAGES) {
jam();
@@ -9108,46 +7565,7 @@ void Dbacc::undoWritingProcess(Signal* signal)
checkUndoPages(signal);
}//if
}//if
- } else if (tpageType == ZLONG_PAGE_TYPE) {
- if (tactivePageDir < fragrecptr.p->lcpDirIndex) {
- jam();
- // -------------------------------------------------------------
- // THIS PAGE HAS ALREADY BEEN WRITTEN IN THE LOCAL CHECKPOINT.
- // -------------------------------------------------------------
- } else {
- if (tactivePageDir >= fragrecptr.p->lcpMaxOverDirIndex) {
- jam();
- // -------------------------------------------------------------
- // OBVIOUSLY THE FRAGMENT HAS EXPANDED THE NUMBER OF OVERFLOW
- // PAGES SINCE THE START OF THE LOCAL CHECKPOINT. WE NEED NOT
- // LOG ANY UPDATES OF PAGES THAT DID NOT EXIST AT START OF LCP.
- // -------------------------------------------------------------
- } else {
- jam();
- // -------------------------------------------------------------
- // LOGICAL LOGGING OF LONG KEY PAGES CAN EITHER BE UNDO OF AN
- // INSERT OR UNDO OF A DELETE KEY. UNDO OF DELETE NEEDS TO LOG
- // THE KEY TO BE REINSERTED WHILE UNDO OF INSERT ONLY NEEDS TO
- // LOG THE INDEX TO BE DELETED.
- // -------------------------------------------------------------
- undopageptr.i = (cundoposition >> ZUNDOPAGEINDEXBITS) & (cundopagesize - 1);
- ptrAss(undopageptr, undopage);
- theadundoindex = cundoposition & ZUNDOPAGEINDEX_MASK;
- tundoindex = theadundoindex + ZUNDOHEADSIZE;
- if (cundoinfolength == 0) {
- jam();
- writeUndoHeader(signal, tactivePageDir, UndoHeader::ZUNDO_INSERT_LONG_KEY);
- } else {
- jam();
- writeUndoHeader(signal, tactivePageDir, UndoHeader::ZUNDO_DELETE_LONG_KEY);
- arrGuard(ZWORDS_IN_PAGE - cundoElemIndex, 2048);
- tundoElemIndex = datapageptr.p->word32[ZWORDS_IN_PAGE - cundoElemIndex] & 0xffff;
- writeUndoDataInfo(signal);
- }//if
- checkUndoPages(signal);
- }//if
- }//if
- }//if
+ }
}//if
}//if
}//Dbacc::undoWritingProcess()
@@ -9200,8 +7618,9 @@ void Dbacc::writeUndoHeader(Signal* signal,
(UndoHeader *) &undopageptr.p->undoword[theadundoindex];
undoHeaderPtr->tableId = rootfragrecptr.p->mytabptr;
- undoHeaderPtr->rootFragId = rootfragrecptr.p->fragmentid[0];
+ undoHeaderPtr->rootFragId = rootfragrecptr.p->fragmentid[0] >> 1;
undoHeaderPtr->localFragId = fragrecptr.p->myfid;
+ ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
Uint32 Ttmp = cundoinfolength;
Ttmp = (Ttmp << 4) + pageType;
Ttmp = Ttmp << 14;
@@ -9231,52 +7650,16 @@ void Dbacc::writeUndoOpInfo(Signal* signal)
undopageptr.p->undoword[tundoindex + 1] = operationRecPtr.p->hashValue;
undopageptr.p->undoword[tundoindex + 2] = operationRecPtr.p->tupkeylen;
tundoindex = tundoindex + 3;
- if (fragrecptr.p->keyLength != 0) {
- // Fixed size keys
- jam();
- locPageptr.i = operationRecPtr.p->elementPage;
- ptrCheckGuard(locPageptr, cpagesize, page8);
- Uint32 Tforward = operationRecPtr.p->elementIsforward;
- Uint32 TelemPtr = operationRecPtr.p->elementPointer;
- TelemPtr += Tforward;
- TelemPtr += Tforward;
- //---------------------------------------------------------------------------------
- // Now the pointer is at the start of the key part of the element. Now copy from there
- // to the UNDO log.
- //---------------------------------------------------------------------------------
- Uint32 keyLen = operationRecPtr.p->tupkeylen;
- ndbrequire(keyLen <= 8);
- arrGuard(tundoindex+keyLen, 8192);
- for (Uint32 twuoiIndex = 0; twuoiIndex < keyLen; twuoiIndex++) {
- jam();
- arrGuard(TelemPtr, 2048);
- undopageptr.p->undoword[tundoindex] = locPageptr.p->word32[TelemPtr];
- tundoindex++;
- TelemPtr += Tforward;
- }//for
- cundoinfolength = ZOP_HEAD_INFO_LN + operationRecPtr.p->tupkeylen;
- } else {
- // Long keys
- jam();
-
- arrGuard(operationRecPtr.p->longKeyPageIndex, ZMAX_NO_OF_LONGKEYS_IN_PAGE);
- locPageptr.i = operationRecPtr.p->longPagePtr;
- ptrCheckGuard(locPageptr, cpagesize, page8);
-
- Uint32 indexValue =
- locPageptr.p->word32[ZWORDS_IN_PAGE - operationRecPtr.p->longKeyPageIndex];
- Uint32 keyLen = indexValue >> 16;
- Uint32 physPageIndex = indexValue & 0xffff;
- ndbrequire(keyLen == operationRecPtr.p->tupkeylen);
-
- arrGuard(tundoindex+keyLen, 8192);
- arrGuard(physPageIndex+keyLen, 2048);
- for (Uint32 i = 0; i < keyLen; i++){
- undopageptr.p->undoword[tundoindex + i] = locPageptr.p->word32[physPageIndex+i];
- }
- tundoindex = tundoindex + keyLen;
- cundoinfolength = ZOP_HEAD_INFO_LN + keyLen;
- }//if
+ // log localkey1
+ locPageptr.i = operationRecPtr.p->elementPage;
+ ptrCheckGuard(locPageptr, cpagesize, page8);
+ Uint32 Tforward = operationRecPtr.p->elementIsforward;
+ Uint32 TelemPtr = operationRecPtr.p->elementPointer;
+ TelemPtr += Tforward; // ZELEM_HEAD_SIZE
+ arrGuard(tundoindex+1, 8192);
+ undopageptr.p->undoword[tundoindex] = locPageptr.p->word32[TelemPtr];
+ tundoindex++;
+ cundoinfolength = ZOP_HEAD_INFO_LN + 1;
}//Dbacc::writeUndoOpInfo()
/* --------------------------------------------------------------------------------- */
@@ -9460,7 +7843,7 @@ void Dbacc::initFragAdd(Signal* signal,
}//if
regFragPtr.p->fragState = ACTIVEFRAG;
// NOTE: next line must match calculation in Dblqh::execLQHFRAGREQ
- regFragPtr.p->myfid = (rootFragIndex << (lhFragBits - 1)) | req->fragId;
+ regFragPtr.p->myfid = (req->fragId << 1) | rootFragIndex;
regFragPtr.p->myroot = rootIndex;
regFragPtr.p->myTableId = req->tableId;
ndbrequire(req->kValue == 6);
@@ -9488,17 +7871,16 @@ void Dbacc::initFragAdd(Signal* signal,
regFragPtr.p->dirsize = 1;
regFragPtr.p->loadingFlag = ZFALSE;
regFragPtr.p->keyLength = req->keyLength;
- if (req->keyLength == 0) {
- jam();
- regFragPtr.p->elementLength = (1 + ZELEM_HEAD_SIZE) + regFragPtr.p->localkeylen;
- } else {
- jam();
- regFragPtr.p->elementLength = (ZELEM_HEAD_SIZE + regFragPtr.p->localkeylen) + regFragPtr.p->keyLength;
- }//if
+ ndbrequire(req->keyLength != 0);
+ regFragPtr.p->elementLength = ZELEM_HEAD_SIZE + regFragPtr.p->localkeylen;
Uint32 Tmp1 = (regFragPtr.p->maxp + 1) + regFragPtr.p->p;
Uint32 Tmp2 = regFragPtr.p->maxloadfactor - regFragPtr.p->minloadfactor;
Tmp2 = Tmp1 * Tmp2;
regFragPtr.p->slackCheck = Tmp2;
+
+ Uint32 hasCharAttr = g_key_descriptor_pool.getPtr(req->tableId)->hasCharAttr;
+ regFragPtr.p->hasCharAttr = hasCharAttr;
+
}//Dbacc::initFragAdd()
void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr)
@@ -9518,6 +7900,7 @@ void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr)
regFragPtr.p->activeDataPage = 0;
regFragPtr.p->createLcp = ZFALSE;
regFragPtr.p->stopQueOp = ZFALSE;
+ regFragPtr.p->hasCharAttr = ZFALSE;
regFragPtr.p->nextAllocPage = 0;
regFragPtr.p->nrWaitWriteUndoExit = 0;
regFragPtr.p->lastUndoIsStored = ZFALSE;
@@ -10103,7 +8486,7 @@ void Dbacc::startUndoLab(Signal* signal)
}//for
// Send report of how many undo log records where executed
- signal->theData[0] = EventReport::UNDORecordsExecuted;
+ signal->theData[0] = NDB_LE_UNDORecordsExecuted;
signal->theData[1] = DBACC; // From block
signal->theData[2] = 0; // Total records executed
for (int i = 0; i < 10; i++){
@@ -10296,6 +8679,7 @@ void Dbacc::srDoUndoLab(Signal* signal)
// ROOT FRAGMENT ID
tfid = undoHeaderPtr->rootFragId;
+ ndbrequire((undoHeaderPtr->localFragId >> 1) == undoHeaderPtr->rootFragId);
if (!getrootfragmentrec(signal, rootfragrecptr, tfid)) {
jam();
/*---------------------------------------------------------------------*/
@@ -10305,7 +8689,10 @@ void Dbacc::srDoUndoLab(Signal* signal)
creadyUndoaddress = cprevUndoaddress;
// PREVIOUS UNDO LOG RECORD FOR ALL FRAGMENTS
cprevUndoaddress = undoHeaderPtr->prevUndoAddress;
- undoNext2Lab(signal);
+ undoNext2Lab(signal);
+#ifdef VM_TRACE
+ ndbout_c("ignoring root fid %d", (int)tfid);
+#endif
return;
}//if
/*-----------------------------------------------------------------------*/
@@ -10415,103 +8802,6 @@ void Dbacc::srDoUndoLab(Signal* signal)
break;
}
- case UndoHeader::ZUNDO_INSERT_LONG_KEY:{
- jam();
- /*---------------------------------------------------------------------*/
- /* WE WILL UNDO AN INSERT OF A LONG KEY. THIS IS PERFORMED BY DELETING */
- /* THE LONG KEY. */
- /*---------------------------------------------------------------------*/
- souDirRangePtr.i = fragrecptr.p->overflowdir;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- arrGuard(tmpP2, 256);
- ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
- souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
- ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
- dlkPageptr.i = souDirptr.p->pagep[tmpP];
- ptrCheckGuard(dlkPageptr, cpagesize, page8);
- tdlkLogicalPageIndex = tundoPageindex;
- deleteLongKey(signal);
- break;
- }
-
- case UndoHeader::ZUNDO_DELETE_LONG_KEY: {
- jam();
- /*----------------------------------------------------------------------*/
- /* WE WILL UNDO DELETE OF A LONG KEY. THIS IS PERFORMED BY INSERTING */
- /* IT AGAIN. */
- /*----------------------------------------------------------------------*/
- souDirRangePtr.i = fragrecptr.p->overflowdir;
- taslpDirIndex = tmpP;
- tmpP2 = tmpP >> 8;
- tmpP = tmpP & 0xff;
- ptrCheckGuard(souDirRangePtr, cdirrangesize, dirRange);
- arrGuard(tmpP2, 256);
- souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
-
- if(souDirptr.i == RNIL) {
- //----------------------------------------------------------------
- // Allocate a directory.
- //----------------------------------------------------------------
- jam();
- seizeDirectory(signal);
- if (tresult > ZLIMIT_OF_ERROR) {
- jam();
- sendSystemerror(signal);
- return;
- }
- souDirRangePtr.p->dirArray[tmpP2] = sdDirptr.i;
- souDirptr.i = souDirRangePtr.p->dirArray[tmpP2];
- }
-
- ptrCheckGuard(souDirptr, cdirarraysize, directoryarray);
- slkapPageptr.i = souDirptr.p->pagep[tmpP];
-
- if(slkapPageptr.i == RNIL) {
- //----------------------------------------------------------------
- // The delete operation was probably the last on the page and the
- // page was released and not written down to disk. We need to
- // allocate a page and put it in the same dirindex as it was in
- // before it was released.
- // This is because an eventual UNDO_INSERT on the same key in the
- // same LCP must be able to find the key and it has only the
- // dirindex to go on, the key itself is not saved on disk in a
- // UNDO_INSERT.
- //----------------------------------------------------------------
- jam();
- allocSpecificLongOverflowPage(signal);
- slkapPageptr.i = aslpPageptr.i;
- }
-
- ptrCheckGuard(slkapPageptr, cpagesize, page8);
- seizePage(signal);
- ndbrequire(tresult <= ZLIMIT_OF_ERROR);
-
- slkapCopyPageptr = spPageptr;
- ndbrequire(cundoinfolength <= 2048);
-
- for (Uint32 tmp = 0; tmp < cundoinfolength; tmp++) {
- dbgWord32(slkapCopyPageptr, tmp, undopageptr.p->undoword[tmpindex]);
- slkapCopyPageptr.p->word32[tmp] = undopageptr.p->undoword[tmpindex];
- tmpindex = tmpindex + 1;
- }//for
- jam();
- //----------------------------------------------------------------
- // We must store the key at the same place it was deleted from.
- // This is because an eventual UNDO_INSERT on the same key in the
- // same LCP must be able to find the key and it has only the index
- // information stored on disk to go on, the key itself is not
- // saved on disk in an UNDO_INSERT.
- //----------------------------------------------------------------
- tslkapKeyLen = cundoinfolength;
- tslkapPageIndex = tundoPageindex;
- storeLongKeysAtPos(signal);
-
- rpPageptr = slkapCopyPageptr;
- releasePage(signal);
- break;
- }
-
case UndoHeader::ZOP_INFO: {
jam();
/*---------------------------------------------------------------------*/
@@ -10550,6 +8840,7 @@ void Dbacc::srDoUndoLab(Signal* signal)
operationRecPtr.p->longKeyPageIndex = RNIL;
operationRecPtr.p->scanRecPtr = RNIL;
operationRecPtr.p->isAccLockReq = ZFALSE;
+ operationRecPtr.p->isUndoLogReq = ZTRUE;
// Read operation values from undo page
operationRecPtr.p->operation = undopageptr.p->undoword[tmpindex];
@@ -10559,17 +8850,15 @@ void Dbacc::srDoUndoLab(Signal* signal)
const Uint32 tkeylen = undopageptr.p->undoword[tmpindex];
tmpindex++;
operationRecPtr.p->tupkeylen = tkeylen;
+ operationRecPtr.p->xfrmtupkeylen = 0; // not used
operationRecPtr.p->fragptr = fragrecptr.i;
- ndbrequire((fragrecptr.p->keyLength == 0) ||
- ((fragrecptr.p->keyLength != 0) &&
- (fragrecptr.p->keyLength == tkeylen)));
+ ndbrequire(fragrecptr.p->keyLength != 0 &&
+ fragrecptr.p->keyLength == tkeylen);
- // Read keydata from undo page
- for (Uint32 tmp = 0; tmp < tkeylen; tmp++) {
- signal->theData[7+tmp] = undopageptr.p->undoword[tmpindex];
- tmpindex = tmpindex + 1;
- }//for
+ // Read localkey1 from undo page
+ signal->theData[7 + 0] = undopageptr.p->undoword[tmpindex];
+ tmpindex = tmpindex + 1;
arrGuard((tmpindex - 1), 8192);
getElement(signal);
if (tgeResult != ZTRUE) {
@@ -10752,43 +9041,25 @@ void Dbacc::execACC_OVER_REC(Signal* signal)
ptrCheckGuard(pnoPageidptr, cpagesize, page8);
tpnoPageType = pnoPageidptr.p->word32[ZPOS_PAGE_TYPE];
tpnoPageType = (tpnoPageType >> ZPOS_PAGE_TYPE_BIT) & 3;
- if (tpnoPageType == ZLONG_PAGE_TYPE) {
+ if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
jam();
- // This is to clean the list parameters.
- pnoPageidptr.p->word32[ZPOS_PREV_PAGE] = RNIL;
- pnoPageidptr.p->word32[ZPOS_NEXT_PAGE] = RNIL;
- if (pnoPageidptr.p->word32[ZPOS_ARRAY_POS] != 4) {
- jam();
- /*---------------------------------------------------------------------------*/
- /* THE PAGE WAS A LONG PAGE AND IT BELONGED TO A FREE LIST. PUT IT INTO ONE */
- /* OF THE FREE LIST THEN. */
- /*---------------------------------------------------------------------------*/
- // Insert page!
- ipaPagePtr = pnoPageidptr;
- tipaArrayPos = pnoPageidptr.p->word32[ZPOS_ARRAY_POS];
- insertPageArrayList(signal);
- }//if
+ dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, RNIL);
+ pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
+ ndbrequire(pnoPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
} else {
- if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] > ZFREE_LIMIT) {
- jam();
- dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, RNIL);
- pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = RNIL;
- ndbrequire(pnoPageidptr.p->word32[ZPOS_PAGE_ID] == fragrecptr.p->nextAllocPage);
- } else {
+ jam();
+ seizeOverRec(signal);
+ sorOverflowRecPtr.p->dirindex = pnoPageidptr.p->word32[ZPOS_PAGE_ID];
+ ndbrequire(sorOverflowRecPtr.p->dirindex == fragrecptr.p->nextAllocPage);
+ dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
+ pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
+ sorOverflowRecPtr.p->overpage = pnoPageidptr.i;
+ porOverflowRecPtr = sorOverflowRecPtr;
+ putOverflowRecInFrag(signal);
+ if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0) {
jam();
- seizeOverRec(signal);
- sorOverflowRecPtr.p->dirindex = pnoPageidptr.p->word32[ZPOS_PAGE_ID];
- ndbrequire(sorOverflowRecPtr.p->dirindex == fragrecptr.p->nextAllocPage);
- dbgWord32(pnoPageidptr, ZPOS_OVERFLOWREC, sorOverflowRecPtr.i);
- pnoPageidptr.p->word32[ZPOS_OVERFLOWREC] = sorOverflowRecPtr.i;
- sorOverflowRecPtr.p->overpage = pnoPageidptr.i;
- porOverflowRecPtr = sorOverflowRecPtr;
- putOverflowRecInFrag(signal);
- if (pnoPageidptr.p->word32[ZPOS_ALLOC_CONTAINERS] == 0) {
- jam();
- ropPageptr = pnoPageidptr;
- releaseOverpage(signal);
- }//if
+ ropPageptr = pnoPageidptr;
+ releaseOverpage(signal);
}//if
}//if
}//if
@@ -10863,7 +9134,6 @@ void Dbacc::execACC_SCANREQ(Signal* signal)
rootfragrecptr.p->scan[i] = scanPtr.i;
scanPtr.p->scanBucketState = ScanRec::FIRST_LAP;
scanPtr.p->scanLockMode = AccScanReq::getLockMode(tscanFlag);
- scanPtr.p->scanKeyinfoFlag = AccScanReq::getKeyinfoFlag(tscanFlag);
scanPtr.p->scanReadCommittedFlag = AccScanReq::getReadCommittedFlag(tscanFlag);
/* TWELVE BITS OF THE ELEMENT HEAD ARE SCAN */
@@ -11611,11 +9881,6 @@ void Dbacc::initScanOpRec(Signal* signal)
Uint32 tisoTmp;
Uint32 tisoLocalPtr;
Uint32 guard24;
- Uint32 tisoPageIndex;
- Uint32 tisoPagedir;
- DirRangePtr tisoOverflowrangeptr;
- DirectoryarrayPtr tisoOverflowDirptr;
- Page8Ptr tisoPageptr;
scanPtr.p->scanOpsAllocated++;
@@ -11644,6 +9909,7 @@ void Dbacc::initScanOpRec(Signal* signal)
operationRecPtr.p->elementPointer = tisoElementptr;
operationRecPtr.p->elementPage = isoPageptr.i;
operationRecPtr.p->isAccLockReq = ZFALSE;
+ operationRecPtr.p->isUndoLogReq = ZFALSE;
tisoLocalPtr = tisoElementptr + tisoIsforward;
guard24 = fragrecptr.p->localkeylen - 1;
for (tisoTmp = 0; tisoTmp <= guard24; tisoTmp++) {
@@ -11654,40 +9920,8 @@ void Dbacc::initScanOpRec(Signal* signal)
}//for
arrGuard(tisoLocalPtr, 2048);
operationRecPtr.p->keydata[0] = isoPageptr.p->word32[tisoLocalPtr];
- if (fragrecptr.p->keyLength != 0) {
- jam();
- operationRecPtr.p->tupkeylen = fragrecptr.p->keyLength;
- guard24 = fragrecptr.p->keyLength - 1;
- for (tisoTmp = 0; tisoTmp <= guard24; tisoTmp++) {
- arrGuard(tisoTmp, 8);
- arrGuard(tisoLocalPtr, 2048);
- operationRecPtr.p->keydata[tisoTmp] = isoPageptr.p->word32[tisoLocalPtr];
- tisoLocalPtr = tisoLocalPtr + tisoIsforward;
- }//for
- } else {
- // Long key handling. Put the long key reference in the operation records.
- tisoPageIndex = operationRecPtr.p->keydata[0] & 0x3ff;
- arrGuard(ZWORDS_IN_PAGE - tisoPageIndex, 2048);
-
- tisoPagedir = operationRecPtr.p->keydata[0] >> 10;
- arrGuard((tisoPagedir >> 8), 256);
-
- tisoOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(tisoOverflowrangeptr, cdirrangesize, dirRange);
-
- tisoOverflowDirptr.i = tisoOverflowrangeptr.p->dirArray[tisoPagedir >> 8];
- ptrCheckGuard(tisoOverflowDirptr, cdirarraysize, directoryarray);
-
- tisoPageptr.i = tisoOverflowDirptr.p->pagep[tisoPagedir & 0xff];
- ptrCheckGuard(tisoPageptr, cpagesize, page8);
-
- operationRecPtr.p->longPagePtr = tisoPageptr.i;
- operationRecPtr.p->longKeyPageIndex = tisoPageIndex;
-
- // Read length of key from page
- Uint32 tmp = tisoPageptr.p->word32[ZWORDS_IN_PAGE - tisoPageIndex];
- operationRecPtr.p->tupkeylen = tmp >> 16;
- }
+ operationRecPtr.p->tupkeylen = fragrecptr.p->keyLength;
+ operationRecPtr.p->xfrmtupkeylen = 0; // not used
}//Dbacc::initScanOpRec()
/* --------------------------------------------------------------------------------- */
@@ -11882,21 +10116,15 @@ void Dbacc::releaseScanContainer(Signal* signal)
Uint32 trscElemlens;
Uint32 trscElemlen;
- if (trscContainerlen < 5) {
+ if (trscContainerlen < 4) {
if (trscContainerlen != ZCON_HEAD_SIZE) {
jam();
sendSystemerror(signal);
}//if
- return; /* 3 IS THE MINIMUM SIZE OF THE ELEMENT */
- }//if
- trscElemlens = trscContainerlen - 2;
- if (fragrecptr.p->keyLength != 0) {
- jam();
- trscElemlen = (1 + fragrecptr.p->keyLength) + fragrecptr.p->localkeylen; /* LENGTH OF THE ELEMENT */
- } else {
- jam();
- trscElemlen = (1 + ZACTIVE_LONG_KEY_LEN) + fragrecptr.p->localkeylen; /* LENGTH OF THE ELEMENT */
+ return; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
}//if
+ trscElemlens = trscContainerlen - ZCON_HEAD_SIZE;
+ trscElemlen = fragrecptr.p->elementLength;
if (trscIsforward == 1) {
jam();
trscElementptr = trscContainerptr + ZCON_HEAD_SIZE;
@@ -11923,7 +10151,7 @@ void Dbacc::releaseScanContainer(Signal* signal)
}//if
trscElemlens = trscElemlens - trscElemlen;
trscElementptr = trscElementptr + trscElemStep;
- } while (trscElemlens > 2);
+ } while (trscElemlens > 1);
if (trscElemlens != 0) {
jam();
sendSystemerror(signal);
@@ -11982,19 +10210,12 @@ bool Dbacc::searchScanContainer(Signal* signal)
Uint32 tsscElemlen;
Uint32 tsscElemStep;
- if (tsscContainerlen < 5) {
+ if (tsscContainerlen < 4) {
jam();
- return false; /* 3 IS THE MINIMUM SIZE OF THE ELEMENT */
+ return false; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */
}//if
tsscElemlens = tsscContainerlen - ZCON_HEAD_SIZE;
- if (fragrecptr.p->keyLength == 0) {
- jam();
- tsscElemlen = (ZELEM_HEAD_SIZE + ZACTIVE_LONG_KEY_LEN) + fragrecptr.p->localkeylen;
- } else {
- jam();
- /* LENGTH OF THE ELEMENT */
- tsscElemlen = (ZELEM_HEAD_SIZE + fragrecptr.p->keyLength) + fragrecptr.p->localkeylen;
- }//if
+ tsscElemlen = fragrecptr.p->elementLength;
/* LENGTH OF THE ELEMENT */
if (tsscIsforward == 1) {
jam();
@@ -12032,7 +10253,7 @@ bool Dbacc::searchScanContainer(Signal* signal)
/* THE ELEMENT IS ALREADY SENT. */
/* SEARCH FOR NEXT ONE */
tsscElemlens = tsscElemlens - tsscElemlen;
- if (tsscElemlens > 2) {
+ if (tsscElemlens > 1) {
jam();
tsscElementptr = tsscElementptr + tsscElemStep;
goto SCANELEMENTLOOP001;
@@ -12047,172 +10268,20 @@ void Dbacc::sendNextScanConf(Signal* signal)
{
scanPtr.p->scanTimer = scanPtr.p->scanContinuebCounter;
Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
- if (!scanPtr.p->scanKeyinfoFlag){
- jam();
- /** ---------------------------------------------------------------------
- * LQH WILL NOT HAVE ANY USE OF THE TUPLE KEY LENGTH IN THIS CASE AND
- * SO WE DO NOT PROVIDE IT. IN THIS CASE THESE VALUES ARE UNDEFINED.
- * ---------------------------------------------------------------------- */
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = operationRecPtr.i;
- signal->theData[2] = operationRecPtr.p->fid;
- signal->theData[3] = operationRecPtr.p->localdata[0];
- signal->theData[4] = operationRecPtr.p->localdata[1];
- signal->theData[5] = fragrecptr.p->localkeylen;
- EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
- return;
- }//if
-
- if (fragrecptr.p->keyLength != 0) {
- jam();
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = operationRecPtr.i;
- signal->theData[2] = operationRecPtr.p->fid;
- signal->theData[3] = operationRecPtr.p->localdata[0];
- signal->theData[4] = operationRecPtr.p->localdata[1];
- signal->theData[5] = fragrecptr.p->localkeylen;
- signal->theData[6] = fragrecptr.p->keyLength;
- signal->theData[7] = operationRecPtr.p->keydata[0];
- signal->theData[8] = operationRecPtr.p->keydata[1];
- signal->theData[9] = operationRecPtr.p->keydata[2];
- signal->theData[10] = operationRecPtr.p->keydata[3];
- EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 11);
- if (fragrecptr.p->keyLength > ZKEYINKEYREQ) {
- jam();
- /* = 4 */
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = operationRecPtr.i;
- signal->theData[2] = operationRecPtr.p->fid;
- signal->theData[3] = fragrecptr.p->keyLength - ZKEYINKEYREQ;
- signal->theData[4] = operationRecPtr.p->keydata[4];
- signal->theData[5] = operationRecPtr.p->keydata[5];
- signal->theData[6] = operationRecPtr.p->keydata[6];
- signal->theData[7] = operationRecPtr.p->keydata[7];
- EXECUTE_DIRECT(blockNo, GSN_ACC_SCAN_INFO, signal, 8);
- return;
- }//if
- } else {
- jam();
- sendScaninfo(signal);
- return;
- }//if
-}//Dbacc::sendNextScanConf()
-
-/* --------------------------------------------------------------------------------- */
-/* SEND_SCANINFO */
-/* DESCRIPTION: SCAN AN ELEMENT OF A LONG_KEY_PAGE. */
-/* --------------------------------------------------------------------------------- */
-void Dbacc::sendScaninfo(Signal* signal)
-{
- DirRangePtr ssiOverflowrangeptr;
- DirectoryarrayPtr ssiOverflowDirptr;
- Page8Ptr ssiPageptr;
- Uint32 tssiPageIndex;
- Uint32 tssiPagedir;
- Uint32 tssiKeyLen;
- Uint32 tssiStartIndex;
- Uint32 tssiIndexValue;
- Uint32 tssiTmp;
-
- Uint32 blockNo = refToBlock(scanPtr.p->scanUserblockref);
-
- tssiPageIndex = operationRecPtr.p->keydata[0] & 0x3ff;
- tssiPagedir = operationRecPtr.p->keydata[0] >> 10;
- ssiOverflowrangeptr.i = fragrecptr.p->overflowdir;
- ptrCheckGuard(ssiOverflowrangeptr, cdirrangesize, dirRange);
- arrGuard((tssiPagedir >> 8), 256);
- ssiOverflowDirptr.i = ssiOverflowrangeptr.p->dirArray[tssiPagedir >> 8];
- ptrCheckGuard(ssiOverflowDirptr, cdirarraysize, directoryarray);
- ssiPageptr.i = ssiOverflowDirptr.p->pagep[tssiPagedir & 0xff];
- ptrCheckGuard(ssiPageptr, cpagesize, page8);
- arrGuard(ZWORDS_IN_PAGE - tssiPageIndex, 2048);
- tssiIndexValue = ssiPageptr.p->word32[ZWORDS_IN_PAGE - tssiPageIndex];
- tssiStartIndex = tssiIndexValue & 0xffff;
- tssiKeyLen = tssiIndexValue >> 16;
+ jam();
+ /** ---------------------------------------------------------------------
+ * LQH WILL NOT HAVE ANY USE OF THE TUPLE KEY LENGTH IN THIS CASE AND
+ * SO WE DO NOT PROVIDE IT. IN THIS CASE THESE VALUES ARE UNDEFINED.
+ * ---------------------------------------------------------------------- */
signal->theData[0] = scanPtr.p->scanUserptr;
signal->theData[1] = operationRecPtr.i;
signal->theData[2] = operationRecPtr.p->fid;
signal->theData[3] = operationRecPtr.p->localdata[0];
signal->theData[4] = operationRecPtr.p->localdata[1];
signal->theData[5] = fragrecptr.p->localkeylen;
- signal->theData[6] = tssiKeyLen;
- arrGuard(tssiStartIndex + 3, 2048);
- signal->theData[7] = ssiPageptr.p->word32[tssiStartIndex];
- signal->theData[8] = ssiPageptr.p->word32[tssiStartIndex + 1];
- signal->theData[9] = ssiPageptr.p->word32[tssiStartIndex + 2];
- signal->theData[10] = ssiPageptr.p->word32[tssiStartIndex + 3];
- EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 11);
- if (tssiKeyLen > 4) {
- tssiKeyLen = tssiKeyLen - 4;
- tssiStartIndex = tssiStartIndex + 4;
- SSI_LOOP_10:
- jamEntry();
- if (tssiKeyLen > ZMAXSCANSIGNALLEN) {
- jam();
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = operationRecPtr.i;
- signal->theData[2] = operationRecPtr.p->fid;
- signal->theData[3] = ZMAXSCANSIGNALLEN;
- arrGuard(tssiStartIndex + 19, 2048);
- signal->theData[4] = ssiPageptr.p->word32[tssiStartIndex];
- signal->theData[5] = ssiPageptr.p->word32[tssiStartIndex + 1];
- signal->theData[6] = ssiPageptr.p->word32[tssiStartIndex + 2];
- signal->theData[7] = ssiPageptr.p->word32[tssiStartIndex + 3];
- signal->theData[8] = ssiPageptr.p->word32[tssiStartIndex + 4];
- signal->theData[9] = ssiPageptr.p->word32[tssiStartIndex + 5];
- signal->theData[10] = ssiPageptr.p->word32[tssiStartIndex + 6];
- signal->theData[11] = ssiPageptr.p->word32[tssiStartIndex + 7];
- signal->theData[12] = ssiPageptr.p->word32[tssiStartIndex + 8];
- signal->theData[13] = ssiPageptr.p->word32[tssiStartIndex + 9];
- signal->theData[14] = ssiPageptr.p->word32[tssiStartIndex + 10];
- signal->theData[15] = ssiPageptr.p->word32[tssiStartIndex + 11];
- signal->theData[16] = ssiPageptr.p->word32[tssiStartIndex + 12];
- signal->theData[17] = ssiPageptr.p->word32[tssiStartIndex + 13];
- signal->theData[18] = ssiPageptr.p->word32[tssiStartIndex + 14];
- signal->theData[19] = ssiPageptr.p->word32[tssiStartIndex + 15];
- signal->theData[20] = ssiPageptr.p->word32[tssiStartIndex + 16];
- signal->theData[21] = ssiPageptr.p->word32[tssiStartIndex + 17];
- signal->theData[22] = ssiPageptr.p->word32[tssiStartIndex + 18];
- signal->theData[23] = ssiPageptr.p->word32[tssiStartIndex + 19];
- EXECUTE_DIRECT(blockNo, GSN_ACC_SCAN_INFO24, signal, 24);
- tssiStartIndex = tssiStartIndex + ZMAXSCANSIGNALLEN;
- tssiKeyLen = tssiKeyLen - ZMAXSCANSIGNALLEN;
- goto SSI_LOOP_10;
- } else {
- jam();
- ndbrequire((tssiStartIndex + tssiKeyLen) <= 2048);
- for (tssiTmp = 0; tssiTmp < tssiKeyLen; tssiTmp++) {
- ckeys[tssiTmp] = ssiPageptr.p->word32[tssiStartIndex + tssiTmp];
- }//for
- signal->theData[0] = scanPtr.p->scanUserptr;
- signal->theData[1] = operationRecPtr.i;
- signal->theData[2] = operationRecPtr.p->fid;
- /* LOCAL FRAGMENT IDENTITY */
- signal->theData[3] = tssiKeyLen;
- signal->theData[4] = ckeys[0];
- signal->theData[5] = ckeys[1];
- signal->theData[6] = ckeys[2];
- signal->theData[7] = ckeys[3];
- signal->theData[8] = ckeys[4];
- signal->theData[9] = ckeys[5];
- signal->theData[10] = ckeys[6];
- signal->theData[11] = ckeys[7];
- signal->theData[12] = ckeys[8];
- signal->theData[13] = ckeys[9];
- signal->theData[14] = ckeys[10];
- signal->theData[15] = ckeys[11];
- signal->theData[16] = ckeys[12];
- signal->theData[17] = ckeys[13];
- signal->theData[18] = ckeys[14];
- signal->theData[19] = ckeys[15];
- signal->theData[20] = ckeys[16];
- signal->theData[21] = ckeys[17];
- signal->theData[22] = ckeys[18];
- signal->theData[23] = ckeys[19];
- EXECUTE_DIRECT(blockNo, GSN_ACC_SCAN_INFO24, signal, 24);
- }//if
- }//if
-}//Dbacc::sendScaninfo()
+ EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 6);
+ return;
+}//Dbacc::sendNextScanConf()
/*---------------------------------------------------------------------------
* sendScanHbRep
@@ -13265,7 +11334,7 @@ void Dbacc::takeRecOutOfFreeOverpage(Signal* signal)
void
Dbacc::reportMemoryUsage(Signal* signal, int gth){
- signal->theData[0] = EventReport::MemoryUsage;
+ signal->theData[0] = NDB_LE_MemoryUsage;
signal->theData[1] = gth;
signal->theData[2] = sizeof(* rpPageptr.p);
signal->theData[3] = cnoOfAllocatedPages;
@@ -13315,13 +11384,12 @@ Dbacc::execDUMP_STATE_ORD(Signal* signal)
scanPtr.p->minBucketIndexToRescan,
scanPtr.p->maxBucketIndexToRescan);
infoEvent(" scanBucketState=%d, scanLockHeld=%d, userBlockRef=%d, "
- "scanMask=%d scanLockMode=%d, keyInfoFlag=%d",
+ "scanMask=%d scanLockMode=%d",
scanPtr.p->scanBucketState,
scanPtr.p->scanLockHeld,
scanPtr.p->scanUserblockref,
scanPtr.p->scanMask,
- scanPtr.p->scanLockMode,
- scanPtr.p->scanKeyinfoFlag);
+ scanPtr.p->scanLockMode);
return;
}
diff --git a/ndb/src/kernel/blocks/dbacc/Makefile.am b/ndb/src/kernel/blocks/dbacc/Makefile.am
index e44524c3edd..ca1b1efac37 100644
--- a/ndb/src/kernel/blocks/dbacc/Makefile.am
+++ b/ndb/src/kernel/blocks/dbacc/Makefile.am
@@ -3,6 +3,8 @@ noinst_LIBRARIES = libdbacc.a
libdbacc_a_SOURCES = DbaccInit.cpp DbaccMain.cpp
+INCLUDES_LOC = -I$(top_srcdir)/ndb/src/kernel/blocks/dbtup
+
include $(top_srcdir)/ndb/config/common.mk.am
include $(top_srcdir)/ndb/config/type_kernel.mk.am
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
index 5bd35812b47..7b440749e0c 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp
@@ -27,6 +27,7 @@
#include <SectionReader.hpp>
#include <SimpleProperties.hpp>
#include <AttributeHeader.hpp>
+#include <KeyDescriptor.hpp>
#include <signaldata/DictSchemaInfo.hpp>
#include <signaldata/DictTabInfo.hpp>
#include <signaldata/DropTabFile.hpp>
@@ -228,7 +229,7 @@ void Dbdict::packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId)
8 * ZSIZE_OF_PAGES_IN_WORDS);
w.first();
- packTableIntoPagesImpl(w, tablePtr);
+ packTableIntoPagesImpl(w, tablePtr, signal);
Uint32 wordsOfTable = w.getWordsUsed();
Uint32 pagesUsed =
@@ -257,7 +258,8 @@ void Dbdict::packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId)
void
Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
- TableRecordPtr tablePtr){
+ TableRecordPtr tablePtr,
+ Signal* signal){
w.add(DictTabInfo::TableName, tablePtr.p->tableName);
w.add(DictTabInfo::TableId, tablePtr.i);
@@ -278,9 +280,32 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
w.add(DictTabInfo::MaxLoadFactor, tablePtr.p->maxLoadFactor);
w.add(DictTabInfo::TableKValue, tablePtr.p->kValue);
w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType);
- w.add(DictTabInfo::FragmentKeyTypeVal, tablePtr.p->fragmentKeyType);
w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType);
- w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
+
+ if(!signal)
+ {
+ w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount);
+ }
+ else
+ {
+ Uint32 * theData = signal->getDataPtrSend();
+ CreateFragmentationReq * const req = (CreateFragmentationReq*)theData;
+ req->senderRef = 0;
+ req->senderData = RNIL;
+ req->fragmentationType = tablePtr.p->fragmentType;
+ req->noOfFragments = 0;
+ req->fragmentNode = 0;
+ req->primaryTableId = tablePtr.i;
+ EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal,
+ CreateFragmentationReq::SignalLength);
+ if(signal->theData[0] == 0)
+ {
+ Uint16 *data = (Uint16*)&signal->theData[25];
+ Uint32 count = 2 + data[0] * data[1];
+ w.add(DictTabInfo::FragmentDataLen, 2*count);
+ w.add(DictTabInfo::FragmentData, data, 2*count);
+ }
+ }
if (tablePtr.p->primaryTableId != RNIL){
TableRecordPtr primTab;
@@ -311,18 +336,14 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w,
const Uint32 attrSize = AttributeDescriptor::getSize(desc);
const Uint32 arraySize = AttributeDescriptor::getArraySize(desc);
const Uint32 nullable = AttributeDescriptor::getNullable(desc);
- const Uint32 DGroup = AttributeDescriptor::getDGroup(desc);
const Uint32 DKey = AttributeDescriptor::getDKey(desc);
- const Uint32 attrStoredInd = AttributeDescriptor::getStoredInTup(desc);
- w.add(DictTabInfo::AttributeType, attrType);
+ // AttributeType deprecated
w.add(DictTabInfo::AttributeSize, attrSize);
w.add(DictTabInfo::AttributeArraySize, arraySize);
w.add(DictTabInfo::AttributeNullableFlag, nullable);
- w.add(DictTabInfo::AttributeDGroup, DGroup);
w.add(DictTabInfo::AttributeDKey, DKey);
- w.add(DictTabInfo::AttributeStoredInd, attrStoredInd);
- w.add(DictTabInfo::AttributeExtType, attrPtr.p->extType);
+ w.add(DictTabInfo::AttributeExtType, attrType);
w.add(DictTabInfo::AttributeExtPrecision, attrPtr.p->extPrecision);
w.add(DictTabInfo::AttributeExtScale, attrPtr.p->extScale);
w.add(DictTabInfo::AttributeExtLength, attrPtr.p->extLength);
@@ -370,7 +391,7 @@ void Dbdict::execFSCLOSECONF(Signal* signal)
closeWriteTableConf(signal, fsPtr);
break;
case FsConnectRecord::OPEN_READ_SCHEMA2:
- openSchemaFile(signal, 1, fsPtr.i, false);
+ openSchemaFile(signal, 1, fsPtr.i, false, false);
break;
default:
jamLine((fsPtr.p->fsState & 0xFFF));
@@ -631,7 +652,7 @@ void Dbdict::writeTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZALLOCATE;
+ fsRWReq->varIndex = ZBAT_TABLE_FILE;
fsRWReq->numberOfPages = c_writeTableRecord.noOfPages;
fsRWReq->data.arrayOfPages.varIndex = c_writeTableRecord.pageId;
fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
@@ -708,7 +729,7 @@ void Dbdict::readTableFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZALLOCATE;
+ fsRWReq->varIndex = ZBAT_TABLE_FILE;
fsRWReq->numberOfPages = c_readTableRecord.noOfPages;
fsRWReq->data.arrayOfPages.varIndex = c_readTableRecord.pageId;
fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
@@ -774,11 +795,9 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
SchemaFile::TableEntry* te, Callback* callback){
jam();
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
-
ndbrequire(tableId < c_tableRecordPool.getSize());
- SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tableId);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId);
SchemaFile::TableState newState =
(SchemaFile::TableState)te->m_tableState;
@@ -825,12 +844,15 @@ Dbdict::updateSchemaState(Signal* signal, Uint32 tableId,
ndbrequire(ok);
* tableEntry = * te;
- computeChecksum((SchemaFile*)pagePtr.p);
+ computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
ndbrequire(c_writeSchemaRecord.inUse == false);
c_writeSchemaRecord.inUse = true;
c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = false;
+ c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
+ c_writeSchemaRecord.noOfPages = 1;
c_writeSchemaRecord.m_callback = * callback;
startWriteSchemaFile(signal);
@@ -841,14 +863,15 @@ void Dbdict::startWriteSchemaFile(Signal* signal)
FsConnectRecordPtr fsPtr;
c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
- openSchemaFile(signal, 0, fsPtr.i, true);
+ openSchemaFile(signal, 0, fsPtr.i, true, c_writeSchemaRecord.newFile);
c_writeSchemaRecord.noOfSchemaFilesHandled = 0;
}//Dbdict::startWriteSchemaFile()
void Dbdict::openSchemaFile(Signal* signal,
Uint32 fileNo,
Uint32 fsConPtr,
- bool writeFlag)
+ bool writeFlag,
+ bool newFile)
{
FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
fsOpenReq->userReference = reference();
@@ -857,9 +880,11 @@ void Dbdict::openSchemaFile(Signal* signal,
jam();
fsOpenReq->fileFlags =
FsOpenReq::OM_WRITEONLY |
- FsOpenReq::OM_TRUNCATE |
- FsOpenReq::OM_CREATE |
FsOpenReq::OM_SYNC;
+ if (newFile)
+ fsOpenReq->fileFlags |=
+ FsOpenReq::OM_TRUNCATE |
+ FsOpenReq::OM_CREATE;
} else {
jam();
fsOpenReq->fileFlags = FsOpenReq::OM_READONLY;
@@ -884,6 +909,12 @@ void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
{
FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+ // check write record
+ WriteSchemaRecord & wr = c_writeSchemaRecord;
+ ndbrequire(wr.pageId == (wr.pageId != 0) * NDB_SF_MAX_PAGES);
+ ndbrequire(wr.noOfPages != 0);
+ ndbrequire(wr.firstPage + wr.noOfPages <= NDB_SF_MAX_PAGES);
+
fsRWReq->filePointer = filePtr;
fsRWReq->userReference = reference();
fsRWReq->userPointer = fsConPtr;
@@ -891,11 +922,11 @@ void Dbdict::writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 1);
FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZALLOCATE;
- fsRWReq->numberOfPages = 1;
-// Write from memory page
- fsRWReq->data.arrayOfPages.varIndex = c_writeSchemaRecord.pageId;
- fsRWReq->data.arrayOfPages.fileOffset = 0; // Write to file page 0
+ fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
+ fsRWReq->numberOfPages = wr.noOfPages;
+ // Write from memory page
+ fsRWReq->data.arrayOfPages.varIndex = wr.pageId + wr.firstPage;
+ fsRWReq->data.arrayOfPages.fileOffset = wr.firstPage;
sendSignal(NDBFS_REF, GSN_FSWRITEREQ, signal, 8, JBA);
}//writeSchemaFile()
@@ -925,7 +956,7 @@ void Dbdict::closeWriteSchemaConf(Signal* signal,
if (c_writeSchemaRecord.noOfSchemaFilesHandled < 2) {
jam();
fsPtr.p->fsState = FsConnectRecord::OPEN_WRITE_SCHEMA;
- openSchemaFile(signal, 1, fsPtr.i, true);
+ openSchemaFile(signal, 1, fsPtr.i, true, c_writeSchemaRecord.newFile);
return;
}
ndbrequire(c_writeSchemaRecord.noOfSchemaFilesHandled == 2);
@@ -943,20 +974,26 @@ void Dbdict::startReadSchemaFile(Signal* signal)
FsConnectRecordPtr fsPtr;
c_fsConnectRecordPool.getPtr(fsPtr, getFsConnRecord());
fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA1;
- openSchemaFile(signal, 0, fsPtr.i, false);
+ openSchemaFile(signal, 0, fsPtr.i, false, false);
}//Dbdict::startReadSchemaFile()
void Dbdict::openReadSchemaRef(Signal* signal,
FsConnectRecordPtr fsPtr)
{
fsPtr.p->fsState = FsConnectRecord::OPEN_READ_SCHEMA2;
- openSchemaFile(signal, 1, fsPtr.i, false);
+ openSchemaFile(signal, 1, fsPtr.i, false, false);
}//Dbdict::openReadSchemaRef()
void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
{
FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)&signal->theData[0];
+ // check read record
+ ReadSchemaRecord & rr = c_readSchemaRecord;
+ ndbrequire(rr.pageId == (rr.pageId != 0) * NDB_SF_MAX_PAGES);
+ ndbrequire(rr.noOfPages != 0);
+ ndbrequire(rr.firstPage + rr.noOfPages <= NDB_SF_MAX_PAGES);
+
fsRWReq->filePointer = filePtr;
fsRWReq->userReference = reference();
fsRWReq->userPointer = fsConPtr;
@@ -964,10 +1001,10 @@ void Dbdict::readSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsConPtr)
FsReadWriteReq::setSyncFlag(fsRWReq->operationFlag, 0);
FsReadWriteReq::setFormatFlag(fsRWReq->operationFlag,
FsReadWriteReq::fsFormatArrayOfPages);
- fsRWReq->varIndex = ZALLOCATE;
- fsRWReq->numberOfPages = 1;
- fsRWReq->data.arrayOfPages.varIndex = c_readSchemaRecord.pageId;
- fsRWReq->data.arrayOfPages.fileOffset = 0;
+ fsRWReq->varIndex = ZBAT_SCHEMA_FILE;
+ fsRWReq->numberOfPages = rr.noOfPages;
+ fsRWReq->data.arrayOfPages.varIndex = rr.pageId + rr.firstPage;
+ fsRWReq->data.arrayOfPages.fileOffset = rr.firstPage;
sendSignal(NDBFS_REF, GSN_FSREADREQ, signal, 8, JBA);
}//readSchemaFile()
@@ -985,20 +1022,61 @@ void Dbdict::readSchemaConf(Signal* signal,
jam();
crashInd = true;
}//if
- PageRecordPtr tmpPagePtr;
- c_pageRecordArray.getPtr(tmpPagePtr, c_readSchemaRecord.pageId);
- Uint32 sz = ZSIZE_OF_PAGES_IN_WORDS;
- Uint32 chk = computeChecksum((const Uint32*)tmpPagePtr.p, sz);
+ ReadSchemaRecord & rr = c_readSchemaRecord;
+ XSchemaFile * xsf = &c_schemaFile[rr.pageId != 0];
- ndbrequire((chk == 0) || !crashInd);
+ if (rr.schemaReadState == ReadSchemaRecord::INITIAL_READ_HEAD) {
+ jam();
+ ndbrequire(rr.firstPage == 0);
+ SchemaFile * sf = &xsf->schemaPage[0];
+ Uint32 noOfPages;
+ if (sf->NdbVersion < NDB_SF_VERSION_5_0_6) {
+ jam();
+ const Uint32 pageSize_old = 32 * 1024;
+ noOfPages = pageSize_old / NDB_SF_PAGE_SIZE - 1;
+ } else {
+ noOfPages = sf->FileSize / NDB_SF_PAGE_SIZE - 1;
+ }
+ rr.schemaReadState = ReadSchemaRecord::INITIAL_READ;
+ if (noOfPages != 0) {
+ rr.firstPage = 1;
+ rr.noOfPages = noOfPages;
+ readSchemaFile(signal, fsPtr.p->filePtr, fsPtr.i);
+ return;
+ }
+ }
+
+ SchemaFile * sf0 = &xsf->schemaPage[0];
+ xsf->noOfPages = sf0->FileSize / NDB_SF_PAGE_SIZE;
- if (chk != 0){
+ if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6 &&
+ ! convertSchemaFileTo_5_0_6(xsf)) {
jam();
+ ndbrequire(! crashInd);
ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
readSchemaRef(signal, fsPtr);
return;
- }//if
+ }
+
+ for (Uint32 n = 0; n < xsf->noOfPages; n++) {
+ SchemaFile * sf = &xsf->schemaPage[n];
+ bool ok =
+ memcmp(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic)) == 0 &&
+ sf->FileSize != 0 &&
+ sf->FileSize % NDB_SF_PAGE_SIZE == 0 &&
+ sf->FileSize == sf0->FileSize &&
+ sf->PageNumber == n &&
+ computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS) == 0;
+ ndbrequire(ok || !crashInd);
+ if (! ok) {
+ jam();
+ ndbrequire(fsPtr.p->fsState == FsConnectRecord::READ_SCHEMA1);
+ readSchemaRef(signal, fsPtr);
+ return;
+ }
+ }
+
fsPtr.p->fsState = FsConnectRecord::CLOSE_READ_SCHEMA;
closeFile(signal, fsPtr.p->filePtr, fsPtr.i);
return;
@@ -1025,7 +1103,27 @@ void Dbdict::closeReadSchemaConf(Signal* signal,
switch(state) {
case ReadSchemaRecord::INITIAL_READ :
jam();
- sendNDB_STTORRY(signal);
+ {
+ // write back both copies
+
+ ndbrequire(c_writeSchemaRecord.inUse == false);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0 ];
+ Uint32 noOfPages =
+ (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1) /
+ NDB_SF_PAGE_ENTRIES;
+ resizeSchemaFile(xsf, noOfPages);
+
+ c_writeSchemaRecord.inUse = true;
+ c_writeSchemaRecord.pageId = c_schemaRecord.oldSchemaPage;
+ c_writeSchemaRecord.newFile = true;
+ c_writeSchemaRecord.firstPage = 0;
+ c_writeSchemaRecord.noOfPages = xsf->noOfPages;
+
+ c_writeSchemaRecord.m_callback.m_callbackFunction =
+ safe_cast(&Dbdict::initSchemaFile_conf);
+
+ startWriteSchemaFile(signal);
+ }
break;
default :
@@ -1035,6 +1133,54 @@ void Dbdict::closeReadSchemaConf(Signal* signal,
}//switch
}//Dbdict::closeReadSchemaConf()
+bool
+Dbdict::convertSchemaFileTo_5_0_6(XSchemaFile * xsf)
+{
+ const Uint32 pageSize_old = 32 * 1024;
+ Uint32 page_old[pageSize_old >> 2];
+ SchemaFile * sf_old = (SchemaFile *)page_old;
+
+ if (xsf->noOfPages * NDB_SF_PAGE_SIZE != pageSize_old)
+ return false;
+ SchemaFile * sf0 = &xsf->schemaPage[0];
+ memcpy(sf_old, sf0, pageSize_old);
+
+ // init max number new pages needed
+ xsf->noOfPages = (sf_old->NoOfTableEntries + NDB_SF_PAGE_ENTRIES - 1) /
+ NDB_SF_PAGE_ENTRIES;
+ initSchemaFile(xsf, 0, xsf->noOfPages, true);
+
+ Uint32 noOfPages = 1;
+ Uint32 n, i, j;
+ for (n = 0; n < xsf->noOfPages; n++) {
+ jam();
+ for (i = 0; i < NDB_SF_PAGE_ENTRIES; i++) {
+ j = n * NDB_SF_PAGE_ENTRIES + i;
+ if (j >= sf_old->NoOfTableEntries)
+ continue;
+ const SchemaFile::TableEntry_old & te_old = sf_old->TableEntries_old[j];
+ if (te_old.m_tableState == SchemaFile::INIT ||
+ te_old.m_tableState == SchemaFile::DROP_TABLE_COMMITTED ||
+ te_old.m_noOfPages == 0)
+ continue;
+ SchemaFile * sf = &xsf->schemaPage[n];
+ SchemaFile::TableEntry & te = sf->TableEntries[i];
+ te.m_tableState = te_old.m_tableState;
+ te.m_tableVersion = te_old.m_tableVersion;
+ te.m_tableType = te_old.m_tableType;
+ te.m_info_words = te_old.m_noOfPages * ZSIZE_OF_PAGES_IN_WORDS -
+ ZPAGE_HEADER_SIZE;
+ te.m_gcp = te_old.m_gcp;
+ if (noOfPages < n)
+ noOfPages = n;
+ }
+ }
+ xsf->noOfPages = noOfPages;
+ initSchemaFile(xsf, 0, xsf->noOfPages, false);
+
+ return true;
+}
+
/* **************************************************************** */
/* ---------------------------------------------------------------- */
/* MODULE: INITIALISATION MODULE ------------------------- */
@@ -1306,6 +1452,7 @@ void Dbdict::initRetrieveRecord(Signal* signal, Uint32 i, Uint32 returnCode)
void Dbdict::initSchemaRecord()
{
c_schemaRecord.schemaPage = RNIL;
+ c_schemaRecord.oldSchemaPage = RNIL;
}//Dbdict::initSchemaRecord()
void Dbdict::initRestartRecord()
@@ -1327,10 +1474,10 @@ void Dbdict::initNodeRecords()
void Dbdict::initPageRecords()
{
- c_schemaRecord.schemaPage = ZMAX_PAGES_OF_TABLE_DEFINITION;
- c_schemaRecord.oldSchemaPage = ZMAX_PAGES_OF_TABLE_DEFINITION + 1;
- c_retrieveRecord.retrievePage = ZMAX_PAGES_OF_TABLE_DEFINITION + 2;
- ndbrequire(ZNUMBER_OF_PAGES >= (2 * ZMAX_PAGES_OF_TABLE_DEFINITION + 2));
+ c_retrieveRecord.retrievePage = ZMAX_PAGES_OF_TABLE_DEFINITION;
+ ndbrequire(ZNUMBER_OF_PAGES >= (ZMAX_PAGES_OF_TABLE_DEFINITION + 1));
+ c_schemaRecord.schemaPage = 0;
+ c_schemaRecord.oldSchemaPage = NDB_SF_MAX_PAGES;
}//Dbdict::initPageRecords()
void Dbdict::initTableRecords()
@@ -1360,9 +1507,7 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr)
tablePtr.p->tableVersion = (Uint32)-1;
tablePtr.p->tabState = TableRecord::NOT_DEFINED;
tablePtr.p->tabReturnState = TableRecord::TRS_IDLE;
- tablePtr.p->storageType = DictTabInfo::MainMemory;
tablePtr.p->fragmentType = DictTabInfo::AllNodesSmallTable;
- tablePtr.p->fragmentKeyType = DictTabInfo::PrimaryKey;
memset(tablePtr.p->tableName, 0, sizeof(tablePtr.p->tableName));
tablePtr.p->gciTableCreated = 0;
tablePtr.p->noOfAttributes = ZNIL;
@@ -1599,8 +1744,10 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
c_fsConnectRecordPool.setSize(ZFS_CONNECT_SIZE);
c_nodes.setSize(MAX_NODES);
c_pageRecordArray.setSize(ZNUMBER_OF_PAGES);
+ c_schemaPageRecordArray.setSize(2 * NDB_SF_MAX_PAGES);
c_tableRecordPool.setSize(tablerecSize);
c_tableRecordHash.setSize(tablerecSize);
+ g_key_descriptor_pool.setSize(tablerecSize);
c_triggerRecordPool.setSize(c_maxNoOfTriggers);
c_triggerRecordHash.setSize(c_maxNoOfTriggers);
c_opRecordPool.setSize(256); // XXX need config params
@@ -1617,12 +1764,23 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal)
c_opCreateTrigger.setSize(8);
c_opDropTrigger.setSize(8);
c_opAlterTrigger.setSize(8);
+
+ // Initialize schema file copies
+ c_schemaFile[0].schemaPage =
+ (SchemaFile*)c_schemaPageRecordArray.getPtr(0 * NDB_SF_MAX_PAGES);
+ c_schemaFile[0].noOfPages = 0;
+ c_schemaFile[1].schemaPage =
+ (SchemaFile*)c_schemaPageRecordArray.getPtr(1 * NDB_SF_MAX_PAGES);
+ c_schemaFile[1].noOfPages = 0;
// Initialize BAT for interface to file system
- PageRecordPtr pageRecPtr;
- c_pageRecordArray.getPtr(pageRecPtr, 0);
NewVARIABLE* bat = allocateBat(2);
- bat[1].WA = &pageRecPtr.p->word[0];
+ bat[0].WA = &c_schemaPageRecordArray.getPtr(0)->word[0];
+ bat[0].nrr = 2 * NDB_SF_MAX_PAGES;
+ bat[0].ClusterSize = NDB_SF_PAGE_SIZE;
+ bat[0].bits.q = NDB_SF_PAGE_SIZE_IN_WORDS_LOG2;
+ bat[0].bits.v = 5; // 32 bits per element
+ bat[1].WA = &c_pageRecordArray.getPtr(0)->word[0];
bat[1].nrr = ZNUMBER_OF_PAGES;
bat[1].ClusterSize = ZSIZE_OF_PAGES_IN_WORDS * 4;
bat[1].bits.q = ZLOG_SIZE_OF_PAGES_IN_WORDS; // 2**13 = 8192 elements
@@ -1767,16 +1925,23 @@ void Dbdict::execHOT_SPAREREP(Signal* signal)
void Dbdict::initSchemaFile(Signal* signal)
{
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
- SchemaFile * schemaFile = (SchemaFile *)pagePtr.p;
- initSchemaFile(schemaFile, 4 * ZSIZE_OF_PAGES_IN_WORDS);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ xsf->noOfPages = (c_tableRecordPool.getSize() + NDB_SF_PAGE_ENTRIES - 1)
+ / NDB_SF_PAGE_ENTRIES;
+ initSchemaFile(xsf, 0, xsf->noOfPages, true);
+ // init alt copy too for INR
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ oldxsf->noOfPages = xsf->noOfPages;
+ memcpy(&oldxsf->schemaPage[0], &xsf->schemaPage[0], xsf->schemaPage[0].FileSize);
if (c_initialStart || c_initialNodeRestart) {
jam();
ndbrequire(c_writeSchemaRecord.inUse == false);
c_writeSchemaRecord.inUse = true;
c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = true;
+ c_writeSchemaRecord.firstPage = 0;
+ c_writeSchemaRecord.noOfPages = xsf->noOfPages;
c_writeSchemaRecord.m_callback.m_callbackFunction =
safe_cast(&Dbdict::initSchemaFile_conf);
@@ -1786,7 +1951,9 @@ void Dbdict::initSchemaFile(Signal* signal)
jam();
ndbrequire(c_readSchemaRecord.schemaReadState == ReadSchemaRecord::IDLE);
c_readSchemaRecord.pageId = c_schemaRecord.oldSchemaPage;
- c_readSchemaRecord.schemaReadState = ReadSchemaRecord::INITIAL_READ;
+ c_readSchemaRecord.firstPage = 0;
+ c_readSchemaRecord.noOfPages = 1;
+ c_readSchemaRecord.schemaReadState = ReadSchemaRecord::INITIAL_READ_HEAD;
startReadSchemaFile(signal);
} else {
ndbrequire(false);
@@ -1925,7 +2092,7 @@ void Dbdict::execDICTSTARTREQ(Signal* signal)
safe_cast(&Dbdict::masterRestart_checkSchemaStatusComplete);
c_restartRecord.activeTable = 0;
- c_schemaRecord.schemaPage = c_schemaRecord.oldSchemaPage;
+ c_schemaRecord.schemaPage = c_schemaRecord.oldSchemaPage; // ugly
checkSchemaStatus(signal);
}//execDICTSTARTREQ()
@@ -1934,15 +2101,13 @@ Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal,
Uint32 callbackData,
Uint32 returnCode){
- c_schemaRecord.schemaPage = ZMAX_PAGES_OF_TABLE_DEFINITION;
+ c_schemaRecord.schemaPage = 0; // ugly
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ ndbrequire(oldxsf->noOfPages != 0);
LinearSectionPtr ptr[3];
-
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.oldSchemaPage);
-
- ptr[0].p = &pagePtr.p->word[0];
- ptr[0].sz = ZSIZE_OF_PAGES_IN_WORDS;
+ ptr[0].p = (Uint32*)&oldxsf->schemaPage[0];
+ ptr[0].sz = oldxsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
c_sendSchemaRecord.m_SCHEMAINFO_Counter = c_aliveNodes;
NodeReceiverGroup rg(DBDICT, c_aliveNodes);
@@ -1958,10 +2123,10 @@ Dbdict::masterRestart_checkSchemaStatusComplete(Signal* signal,
1,
c);
- PageRecordPtr newPagePtr;
- c_pageRecordArray.getPtr(newPagePtr, c_schemaRecord.schemaPage);
- memcpy(&newPagePtr.p->word[0], &pagePtr.p->word[0],
- 4 * ZSIZE_OF_PAGES_IN_WORDS);
+ XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ newxsf->noOfPages = oldxsf->noOfPages;
+ memcpy(&newxsf->schemaPage[0], &oldxsf->schemaPage[0],
+ oldxsf->noOfPages * NDB_SF_PAGE_SIZE);
signal->theData[0] = getOwnNodeId();
sendSignal(reference(), GSN_SCHEMA_INFOCONF, signal, 1, JBB);
@@ -1978,11 +2143,11 @@ Dbdict::execGET_SCHEMA_INFOREQ(Signal* signal){
LinearSectionPtr ptr[3];
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ ndbrequire(xsf->noOfPages != 0);
- ptr[0].p = &pagePtr.p->word[0];
- ptr[0].sz = ZSIZE_OF_PAGES_IN_WORDS;
+ ptr[0].p = (Uint32*)&xsf->schemaPage[0];
+ ptr[0].sz = xsf->noOfPages * NDB_SF_PAGE_SIZE_IN_WORDS;
Callback c = { safe_cast(&Dbdict::sendSchemaComplete), 0 };
sendFragmentedSignal(ref,
@@ -2024,12 +2189,22 @@ void Dbdict::execSCHEMA_INFO(Signal* signal)
SegmentedSectionPtr schemaDataPtr;
signal->getSection(schemaDataPtr, 0);
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
- copy(&pagePtr.p->word[0], schemaDataPtr);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ ndbrequire(schemaDataPtr.sz % NDB_SF_PAGE_SIZE_IN_WORDS == 0);
+ xsf->noOfPages = schemaDataPtr.sz / NDB_SF_PAGE_SIZE_IN_WORDS;
+ copy((Uint32*)&xsf->schemaPage[0], schemaDataPtr);
releaseSections(signal);
+
+ SchemaFile * sf0 = &xsf->schemaPage[0];
+ if (sf0->NdbVersion < NDB_SF_VERSION_5_0_6) {
+ bool ok = convertSchemaFileTo_5_0_6(xsf);
+ ndbrequire(ok);
+ }
- validateChecksum((SchemaFile*)pagePtr.p);
+ validateChecksum(xsf);
+
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ resizeSchemaFile(xsf, oldxsf->noOfPages);
ndbrequire(signal->getSendersBlockRef() != reference());
@@ -2054,7 +2229,11 @@ Dbdict::restart_checkSchemaStatusComplete(Signal * signal,
ndbrequire(c_writeSchemaRecord.inUse == false);
c_writeSchemaRecord.inUse = true;
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = true;
+ c_writeSchemaRecord.firstPage = 0;
+ c_writeSchemaRecord.noOfPages = xsf->noOfPages;
c_writeSchemaRecord.m_callback.m_callbackData = 0;
c_writeSchemaRecord.m_callback.m_callbackFunction =
safe_cast(&Dbdict::restart_writeSchemaConf);
@@ -2103,20 +2282,18 @@ void Dbdict::execSCHEMA_INFOCONF(Signal* signal)
void Dbdict::checkSchemaStatus(Signal* signal)
{
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
+ XSchemaFile * newxsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ XSchemaFile * oldxsf = &c_schemaFile[c_schemaRecord.oldSchemaPage != 0];
+ ndbrequire(newxsf->noOfPages == oldxsf->noOfPages);
+ const Uint32 noOfEntries = newxsf->noOfPages * NDB_SF_PAGE_ENTRIES;
- PageRecordPtr oldPagePtr;
- c_pageRecordArray.getPtr(oldPagePtr, c_schemaRecord.oldSchemaPage);
-
- for (; c_restartRecord.activeTable < MAX_TABLES;
+ for (; c_restartRecord.activeTable < noOfEntries;
c_restartRecord.activeTable++) {
jam();
Uint32 tableId = c_restartRecord.activeTable;
- SchemaFile::TableEntry *newEntry = getTableEntry(pagePtr.p, tableId);
- SchemaFile::TableEntry *oldEntry = getTableEntry(oldPagePtr.p, tableId,
- true);
+ SchemaFile::TableEntry *newEntry = getTableEntry(newxsf, tableId);
+ SchemaFile::TableEntry *oldEntry = getTableEntry(oldxsf, tableId);
SchemaFile::TableState schemaState =
(SchemaFile::TableState)newEntry->m_tableState;
SchemaFile::TableState oldSchemaState =
@@ -2247,7 +2424,7 @@ void Dbdict::checkSchemaStatus(Signal* signal)
return;
}//if
}
- ndbrequire(ok);
+ ndbrequire(ok);
break;
}
case SchemaFile::DROP_TABLE_STARTED:
@@ -2350,7 +2527,8 @@ Dbdict::restartCreateTab(Signal* signal, Uint32 tableId,
if(file && !ERROR_INSERTED(6002)){
jam();
- c_readTableRecord.noOfPages = te->m_noOfPages;
+ c_readTableRecord.noOfPages =
+ DIV(te->m_info_words + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
c_readTableRecord.pageId = 0;
c_readTableRecord.m_callback.m_callbackData = createTabPtr.p->key;
c_readTableRecord.m_callback.m_callbackFunction =
@@ -3244,8 +3422,8 @@ Dbdict::execALTER_TAB_REQ(Signal * signal)
tabEntry.m_tableType = tablePtr.p->tableType;
tabEntry.m_tableState = SchemaFile::ALTER_TABLE_COMMITTED;
tabEntry.m_gcp = gci;
- tabEntry.m_noOfPages =
- DIV(tabInfoPtr.sz + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+ tabEntry.m_info_words = tabInfoPtr.sz;
+ memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
Callback callback;
callback.m_callbackData = senderData;
@@ -3776,9 +3954,8 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){
/**
* Update table version
*/
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
- SchemaFile::TableEntry * tabEntry = getTableEntry(pagePtr.p, tabPtr.i);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tabEntry = getTableEntry(xsf, tabPtr.i);
tabPtr.p->tableVersion = create_table_inc_schema_version(tabEntry->m_tableVersion);
@@ -4084,8 +4261,8 @@ Dbdict::createTab_prepare(Signal* signal, CreateTabReq * req){
tabEntry.m_tableType = tabPtr.p->tableType;
tabEntry.m_tableState = SchemaFile::ADD_STARTED;
tabEntry.m_gcp = gci;
- tabEntry.m_noOfPages =
- DIV(tabInfoPtr.sz + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+ tabEntry.m_info_words = tabInfoPtr.sz;
+ memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
Callback callback;
callback.m_callbackData = createTabPtr.p->key;
@@ -4248,14 +4425,14 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
req->maxLoadFactor = tabPtr.p->maxLoadFactor;
req->minLoadFactor = tabPtr.p->minLoadFactor;
req->kValue = tabPtr.p->kValue;
- req->lh3DistrBits = lhDistrBits;
- req->lh3PageBits = lhPageBits;
+ req->lh3DistrBits = 0; //lhDistrBits;
+ req->lh3PageBits = 0; //lhPageBits;
req->noOfAttributes = tabPtr.p->noOfAttributes;
- req->noOfNullAttributes = tabPtr.p->noOfNullAttr;
+ req->noOfNullAttributes = tabPtr.p->noOfNullBits;
req->noOfPagesToPreAllocate = 0;
req->schemaVersion = tabPtr.p->tableVersion;
Uint32 keyLen = tabPtr.p->tupKeyLength;
- req->keyLength = keyLen > 8 ? 0 : keyLen; // Put this into ACC instead
+ req->keyLength = keyLen; // wl-2066 no more "long keys"
req->nextLCP = lcpNo;
req->noOfKeyAttr = tabPtr.p->noOfPrimkey;
@@ -4271,6 +4448,44 @@ Dbdict::execADD_FRAGREQ(Signal* signal) {
sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal,
LqhFragReq::SignalLength, JBB);
}
+
+ /**
+ * Create KeyDescriptor
+ */
+ KeyDescriptor* desc= g_key_descriptor_pool.getPtr(tabPtr.i);
+ new (desc) KeyDescriptor();
+
+ Uint32 key = 0;
+ Uint32 tAttr = tabPtr.p->firstAttribute;
+ while (tAttr != RNIL)
+ {
+ jam();
+ AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->tupleKey)
+ {
+ desc->noOfKeyAttr ++;
+ desc->keyAttr[key].attributeDescriptor = aRec->attributeDescriptor;
+
+ Uint32 csNumber = (aRec->extPrecision >> 16);
+ if(csNumber)
+ {
+ desc->keyAttr[key].charsetInfo = all_charsets[csNumber];
+ ndbrequire(all_charsets[csNumber]);
+ desc->hasCharAttr = 1;
+ }
+ else
+ {
+ desc->keyAttr[key].charsetInfo = 0;
+ }
+ if(AttributeDescriptor::getDKey(aRec->attributeDescriptor))
+ {
+ desc->noOfDistrKeys ++;
+ }
+ key++;
+ }
+ tAttr = aRec->nextAttrInTable;
+ }
+ ndbrequire(key == tabPtr.p->noOfPrimkey);
}
void
@@ -4322,7 +4537,7 @@ Dbdict::sendLQHADDATTRREQ(Signal* signal,
LqhAddAttrReq::Entry& entry = req->attributes[i];
entry.attrId = attrPtr.p->attributeId;
entry.attrDescriptor = attrPtr.p->attributeDescriptor;
- entry.extTypeInfo = attrPtr.p->extType;
+ entry.extTypeInfo = 0;
// charset number passed to TUP, TUX in upper half
entry.extTypeInfo |= (attrPtr.p->extPrecision & ~0xFFFF);
if (tabPtr.p->isIndex()) {
@@ -4464,10 +4679,12 @@ Dbdict::execTAB_COMMITCONF(Signal* signal){
signal->theData[3] = reference();
signal->theData[4] = (Uint32)tabPtr.p->tableType;
signal->theData[5] = createTabPtr.p->key;
- sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 6, JBB);
+ signal->theData[6] = (Uint32)tabPtr.p->noOfPrimkey;
+
+ sendSignal(DBTC_REF, GSN_TC_SCHVERREQ, signal, 7, JBB);
return;
}
-
+
ndbrequire(false);
}
@@ -4520,8 +4737,8 @@ Dbdict::createTab_commit(Signal * signal, CreateTabReq * req){
tabEntry.m_tableType = tabPtr.p->tableType;
tabEntry.m_tableState = SchemaFile::TABLE_ADD_COMMITTED;
tabEntry.m_gcp = tabPtr.p->gciTableCreated;
- tabEntry.m_noOfPages =
- DIV(tabPtr.p->packedSize + ZPAGE_HEADER_SIZE, ZSIZE_OF_PAGES_IN_WORDS);
+ tabEntry.m_info_words = tabPtr.p->packedSize;
+ memset(tabEntry.m_unused, 0, sizeof(tabEntry.m_unused));
Callback callback;
callback.m_callbackData = createTabPtr.p->key;
@@ -4622,10 +4839,9 @@ Dbdict::createTab_dropComplete(Signal* signal,
c_tableRecordPool.getPtr(tabPtr, createTabPtr.p->m_tablePtrI);
releaseTableObject(tabPtr.i);
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
- SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tabPtr.i);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tabPtr.i);
tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
//@todo check error
@@ -4823,7 +5039,6 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it,
tablePtr.p->minLoadFactor = tableDesc.MinLoadFactor;
tablePtr.p->maxLoadFactor = tableDesc.MaxLoadFactor;
tablePtr.p->fragmentType = (DictTabInfo::FragmentType)tableDesc.FragmentType;
- tablePtr.p->fragmentKeyType = (DictTabInfo::FragmentKeyType)tableDesc.FragmentKeyType;
tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType;
tablePtr.p->kValue = tableDesc.TableKValue;
tablePtr.p->fragmentCount = tableDesc.FragmentCount;
@@ -4872,6 +5087,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
Uint32 keyLength = 0;
Uint32 attrCount = tablePtr.p->noOfAttributes;
Uint32 nullCount = 0;
+ Uint32 nullBits = 0;
Uint32 noOfCharsets = 0;
Uint16 charsets[128];
Uint32 recordLength = 0;
@@ -4924,19 +5140,24 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
attrPtr.p->attributeId = attrDesc.AttributeId;
attrPtr.p->tupleKey = (keyCount + 1) * attrDesc.AttributeKeyFlag;
- attrPtr.p->extType = attrDesc.AttributeExtType;
attrPtr.p->extPrecision = attrDesc.AttributeExtPrecision;
attrPtr.p->extScale = attrDesc.AttributeExtScale;
attrPtr.p->extLength = attrDesc.AttributeExtLength;
// charset in upper half of precision
unsigned csNumber = (attrPtr.p->extPrecision >> 16);
if (csNumber != 0) {
+ /*
+ * A new charset is first accessed here on this node.
+ * TODO use separate thread (e.g. via NDBFS) if need to load from file
+ */
CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
if (cs == NULL) {
parseP->errorCode = CreateTableRef::InvalidCharset;
parseP->errorLine = __LINE__;
return;
}
+ // XXX should be done somewhere in mysql
+ all_charsets[cs->number] = cs;
unsigned i = 0;
while (i < noOfCharsets) {
if (charsets[i] == csNumber)
@@ -4954,9 +5175,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
}
}
- /**
- * Ignore incoming old-style type and recompute it.
- */
+ // compute attribute size and array size
bool translateOk = attrDesc.translateExtType();
tabRequire(translateOk, CreateTableRef::Inconsistency);
@@ -4969,15 +5188,12 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
}
Uint32 desc = 0;
- AttributeDescriptor::setType(desc, attrDesc.AttributeType);
+ AttributeDescriptor::setType(desc, attrDesc.AttributeExtType);
AttributeDescriptor::setSize(desc, attrDesc.AttributeSize);
AttributeDescriptor::setArray(desc, attrDesc.AttributeArraySize);
AttributeDescriptor::setNullable(desc, attrDesc.AttributeNullableFlag);
- AttributeDescriptor::setDGroup(desc, attrDesc.AttributeDGroup);
AttributeDescriptor::setDKey(desc, attrDesc.AttributeDKey);
AttributeDescriptor::setPrimaryKey(desc, attrDesc.AttributeKeyFlag);
-
- AttributeDescriptor::setStoredInTup(desc, attrDesc.AttributeStoredInd);
attrPtr.p->attributeDescriptor = desc;
attrPtr.p->autoIncrement = attrDesc.AttributeAutoIncrement;
strcpy(attrPtr.p->defaultValue, attrDesc.AttributeDefaultValue);
@@ -4989,7 +5205,25 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
nullCount += attrDesc.AttributeNullableFlag;
const Uint32 aSz = (1 << attrDesc.AttributeSize);
- const Uint32 sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5;
+ Uint32 sz;
+ if(aSz != 1)
+ {
+ sz = ((aSz * attrDesc.AttributeArraySize) + 31) >> 5;
+ }
+ else
+ {
+ sz = 0;
+ nullBits += attrDesc.AttributeArraySize;
+ }
+
+ if(attrDesc.AttributeArraySize == 0)
+ {
+ parseP->errorCode = CreateTableRef::InvalidArraySize;
+ parseP->status = status;
+ parseP->errorKey = it.getKey();
+ parseP->errorLine = __LINE__;
+ return;
+ }
recordLength += sz;
if(attrDesc.AttributeKeyFlag){
@@ -5018,6 +5252,7 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it,
tablePtr.p->noOfNullAttr = nullCount;
tablePtr.p->noOfCharsets = noOfCharsets;
tablePtr.p->tupKeyLength = keyLength;
+ tablePtr.p->noOfNullBits = nullCount + nullBits;
tabRequire(recordLength<= MAX_TUPLE_SIZE_IN_WORDS,
CreateTableRef::RecordTooBig);
@@ -5480,21 +5715,22 @@ Dbdict::execPREP_DROP_TAB_REQ(Signal* signal){
/**
* Modify schema
*/
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
-
- SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tablePtr.i);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tablePtr.i);
SchemaFile::TableState tabState =
(SchemaFile::TableState)tableEntry->m_tableState;
ndbrequire(tabState == SchemaFile::TABLE_ADD_COMMITTED ||
tabState == SchemaFile::ALTER_TABLE_COMMITTED);
tableEntry->m_tableState = SchemaFile::DROP_TABLE_STARTED;
- computeChecksum((SchemaFile*)pagePtr.p);
+ computeChecksum(xsf, tablePtr.i / NDB_SF_PAGE_ENTRIES);
ndbrequire(c_writeSchemaRecord.inUse == false);
c_writeSchemaRecord.inUse = true;
c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.newFile = false;
+ c_writeSchemaRecord.firstPage = tablePtr.i / NDB_SF_PAGE_ENTRIES;
+ c_writeSchemaRecord.noOfPages = 1;
c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
c_writeSchemaRecord.m_callback.m_callbackFunction =
safe_cast(&Dbdict::prepDropTab_writeSchemaConf);
@@ -5655,20 +5891,20 @@ Dbdict::dropTab_complete(Signal* signal,
/**
* Write to schema file
*/
- PageRecordPtr pagePtr;
- c_pageRecordArray.getPtr(pagePtr, c_schemaRecord.schemaPage);
-
- SchemaFile::TableEntry * tableEntry = getTableEntry(pagePtr.p, tableId);
+ XSchemaFile * xsf = &c_schemaFile[c_schemaRecord.schemaPage != 0];
+ SchemaFile::TableEntry * tableEntry = getTableEntry(xsf, tableId);
SchemaFile::TableState tabState =
(SchemaFile::TableState)tableEntry->m_tableState;
ndbrequire(tabState == SchemaFile::DROP_TABLE_STARTED);
tableEntry->m_tableState = SchemaFile::DROP_TABLE_COMMITTED;
- computeChecksum((SchemaFile*)pagePtr.p);
+ computeChecksum(xsf, tableId / NDB_SF_PAGE_ENTRIES);
ndbrequire(c_writeSchemaRecord.inUse == false);
c_writeSchemaRecord.inUse = true;
c_writeSchemaRecord.pageId = c_schemaRecord.schemaPage;
+ c_writeSchemaRecord.firstPage = tableId / NDB_SF_PAGE_ENTRIES;
+ c_writeSchemaRecord.noOfPages = 1;
c_writeSchemaRecord.m_callback.m_callbackData = dropTabPtr.p->key;
c_writeSchemaRecord.m_callback.m_callbackFunction =
safe_cast(&Dbdict::dropTab_writeSchemaConf);
@@ -5852,7 +6088,10 @@ void Dbdict::sendGET_TABLEID_REF(Signal* signal,
void Dbdict::execGET_TABINFOREQ(Signal* signal)
{
jamEntry();
- if(!assembleFragments(signal)) { return; }
+ if(!assembleFragments(signal))
+ {
+ return;
+ }
GetTabInfoReq * const req = (GetTabInfoReq *)&signal->theData[0];
@@ -6437,11 +6676,15 @@ void
Dbdict::createIndex_slavePrepare(Signal* signal, OpCreateIndexPtr opPtr)
{
jam();
+ if (ERROR_INSERTED(6006) && ! opPtr.p->m_isMaster) {
+ ndbrequire(false);
+ }
}
void
Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
{
+ Uint32 attrid_map[MAX_ATTRIBUTES_IN_INDEX];
Uint32 k;
jam();
const CreateIndxReq* const req = &opPtr.p->m_request;
@@ -6511,39 +6754,49 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
// tree node size in words (make configurable later)
indexPtr.p->tupKeyLength = MAX_TTREE_NODE_SIZE;
}
- // hash index attributes must currently be in table order
- Uint32 prevAttrId = RNIL;
+
+ AttributeMask mask;
+ mask.clear();
for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
jam();
- bool found = false;
- for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
- AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
- tAttr = aRec->nextAttrInTable;
- if (aRec->attributeId != opPtr.p->m_attrList.id[k])
+ unsigned current_id= opPtr.p->m_attrList.id[k];
+ AttributeRecord* aRec= NULL;
+ Uint32 tAttr= tablePtr.p->firstAttribute;
+ for (; tAttr != RNIL; tAttr= aRec->nextAttrInTable)
+ {
+ aRec = c_attributeRecordPool.getPtr(tAttr);
+ if (aRec->attributeId != current_id)
continue;
jam();
- found = true;
- const Uint32 a = aRec->attributeDescriptor;
- if (indexPtr.p->isHashIndex()) {
- const Uint32 s1 = AttributeDescriptor::getSize(a);
- const Uint32 s2 = AttributeDescriptor::getArraySize(a);
- indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5;
- }
+ break;
}
- if (! found) {
+ if (tAttr == RNIL) {
jam();
opPtr.p->m_errorCode = CreateIndxRef::BadRequestType;
opPtr.p->m_errorLine = __LINE__;
return;
}
- if (indexPtr.p->isHashIndex() &&
- k > 0 && prevAttrId >= opPtr.p->m_attrList.id[k]) {
+ if (mask.get(current_id))
+ {
jam();
- opPtr.p->m_errorCode = CreateIndxRef::InvalidAttributeOrder;
+ opPtr.p->m_errorCode = CreateIndxRef::DuplicateAttributes;
opPtr.p->m_errorLine = __LINE__;
return;
}
- prevAttrId = opPtr.p->m_attrList.id[k];
+ mask.set(current_id);
+
+ const Uint32 a = aRec->attributeDescriptor;
+ unsigned kk= k;
+ if (indexPtr.p->isHashIndex()) {
+ const Uint32 s1 = AttributeDescriptor::getSize(a);
+ const Uint32 s2 = AttributeDescriptor::getArraySize(a);
+ indexPtr.p->tupKeyLength += ((1 << s1) * s2 + 31) >> 5;
+ // reorder the attributes according to the tableid order
+ // for unque indexes
+ for (; kk > 0 && current_id < attrid_map[kk-1]>>16; kk--)
+ attrid_map[kk]= attrid_map[kk-1];
+ }
+ attrid_map[kk]= k | (current_id << 16);
}
indexPtr.p->noOfPrimkey = indexPtr.p->noOfAttributes;
// plus concatenated primary table key attribute
@@ -6564,15 +6817,21 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
AttributeRecordPtr aRecPtr;
c_attributeRecordPool.getPtr(aRecPtr, tablePtr.p->firstAttribute);
for (k = 0; k < opPtr.p->m_attrList.sz; k++) {
+ // insert the attributes in the order decided above in attrid_map
+ // k is new order, current_id is in previous order
+ // ToDo: make sure "current_id" is stored with the table and
+ // passed up to NdbDictionary
+ unsigned current_id= opPtr.p->m_attrList.id[attrid_map[k] & 0xffff];
jam();
for (Uint32 tAttr = tablePtr.p->firstAttribute; tAttr != RNIL; ) {
AttributeRecord* aRec = c_attributeRecordPool.getPtr(tAttr);
tAttr = aRec->nextAttrInTable;
- if (aRec->attributeId != opPtr.p->m_attrList.id[k])
+ if (aRec->attributeId != current_id)
continue;
jam();
const Uint32 a = aRec->attributeDescriptor;
bool isNullable = AttributeDescriptor::getNullable(a);
+ Uint32 attrType = AttributeDescriptor::getType(a);
w.add(DictTabInfo::AttributeName, aRec->attributeName);
w.add(DictTabInfo::AttributeId, k);
if (indexPtr.p->isHashIndex()) {
@@ -6583,9 +6842,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
w.add(DictTabInfo::AttributeNullableFlag, (Uint32)isNullable);
}
- w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored);
- // ext type overrides
- w.add(DictTabInfo::AttributeExtType, aRec->extType);
+ w.add(DictTabInfo::AttributeExtType, attrType);
w.add(DictTabInfo::AttributeExtPrecision, aRec->extPrecision);
w.add(DictTabInfo::AttributeExtScale, aRec->extScale);
w.add(DictTabInfo::AttributeExtLength, aRec->extLength);
@@ -6598,9 +6855,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::AttributeName, "NDB$PK");
w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
w.add(DictTabInfo::AttributeKeyFlag, (Uint32)false);
- w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored);
w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
- // ext type overrides
w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
w.add(DictTabInfo::AttributeExtLength, tablePtr.p->tupKeyLength);
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
@@ -6611,9 +6866,7 @@ Dbdict::createIndex_toCreateTable(Signal* signal, OpCreateIndexPtr opPtr)
w.add(DictTabInfo::AttributeName, "NDB$TNODE");
w.add(DictTabInfo::AttributeId, opPtr.p->m_attrList.sz);
w.add(DictTabInfo::AttributeKeyFlag, (Uint32)true);
- w.add(DictTabInfo::AttributeStoredInd, (Uint32)DictTabInfo::Stored);
w.add(DictTabInfo::AttributeNullableFlag, (Uint32)false);
- // ext type overrides
w.add(DictTabInfo::AttributeExtType, (Uint32)DictTabInfo::ExtUnsigned);
w.add(DictTabInfo::AttributeExtLength, indexPtr.p->tupKeyLength);
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
@@ -8023,7 +8276,7 @@ void Dbdict::createEventUTIL_EXECUTE(Signal *signal,
break;
case ZALREADYEXIST:
jam();
- evntRecPtr.p->m_errorCode = CreateEvntRef::EventExists;
+ evntRecPtr.p->m_errorCode = CreateEvntRef::EventNameExists;
break;
default:
jam();
@@ -9627,7 +9880,7 @@ Dbdict::alterIndex_toDropTc(Signal* signal, OpAlterIndexPtr opPtr)
// broken index allowed if force
if (! (indexPtr.p->indexLocal & TableRecord::IL_CREATED_TC)) {
jam();
- ndbrequire(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE);
+ ndbassert(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE);
alterIndex_sendReply(signal, opPtr, false);
return;
}
@@ -11684,7 +11937,7 @@ Dbdict::alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
// broken trigger allowed if force
if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_TC)) {
jam();
- ndbrequire(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE);
+ ndbassert(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE);
alterTrigger_sendReply(signal, opPtr, false);
return;
}
@@ -11694,7 +11947,7 @@ Dbdict::alterTrigger_toDropLocal(Signal* signal, OpAlterTriggerPtr opPtr)
// broken trigger allowed if force
if (! (triggerPtr.p->triggerLocal & TriggerRecord::TL_CREATED_LQH)) {
jam();
- ndbrequire(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE);
+ ndbassert(opPtr.p->m_requestFlag & RequestFlag::RF_FORCE);
alterTrigger_sendReply(signal, opPtr, false);
return;
}
@@ -11913,36 +12166,75 @@ Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask)
/* **************************************************************** */
void
-Dbdict::initSchemaFile(SchemaFile * sf, Uint32 fileSz){
- memcpy(sf->Magic, "NDBSCHMA", sizeof(sf->Magic));
- sf->ByteOrder = 0x12345678;
- sf->NdbVersion = NDB_VERSION;
- sf->FileSize = fileSz;
- sf->CheckSum = 0;
-
- Uint32 headSz = (sizeof(SchemaFile)-sizeof(SchemaFile::TableEntry));
- Uint32 noEntries = (fileSz - headSz) / sizeof(SchemaFile::TableEntry);
- Uint32 slack = (fileSz - headSz) - noEntries * sizeof(SchemaFile::TableEntry);
-
- ndbrequire(noEntries > MAX_TABLES);
+Dbdict::initSchemaFile(XSchemaFile * xsf, Uint32 firstPage, Uint32 lastPage,
+ bool initEntries)
+{
+ ndbrequire(lastPage <= xsf->noOfPages);
+ for (Uint32 n = firstPage; n < lastPage; n++) {
+ SchemaFile * sf = &xsf->schemaPage[n];
+ if (initEntries)
+ memset(sf, 0, NDB_SF_PAGE_SIZE);
+
+ Uint32 ndb_version = NDB_VERSION;
+ if (ndb_version < NDB_SF_VERSION_5_0_6)
+ ndb_version = NDB_SF_VERSION_5_0_6;
- sf->NoOfTableEntries = noEntries;
- memset(sf->TableEntries, 0, noEntries*sizeof(SchemaFile::TableEntry));
- memset(&(sf->TableEntries[noEntries]), 0, slack);
- computeChecksum(sf);
+ memcpy(sf->Magic, NDB_SF_MAGIC, sizeof(sf->Magic));
+ sf->ByteOrder = 0x12345678;
+ sf->NdbVersion = ndb_version;
+ sf->FileSize = xsf->noOfPages * NDB_SF_PAGE_SIZE;
+ sf->PageNumber = n;
+ sf->CheckSum = 0;
+ sf->NoOfTableEntries = NDB_SF_PAGE_ENTRIES;
+
+ computeChecksum(xsf, n);
+ }
}
void
-Dbdict::computeChecksum(SchemaFile * sf){
+Dbdict::resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages)
+{
+ ndbrequire(noOfPages <= NDB_SF_MAX_PAGES);
+ if (xsf->noOfPages < noOfPages) {
+ jam();
+ Uint32 firstPage = xsf->noOfPages;
+ xsf->noOfPages = noOfPages;
+ initSchemaFile(xsf, 0, firstPage, false);
+ initSchemaFile(xsf, firstPage, xsf->noOfPages, true);
+ }
+ if (xsf->noOfPages > noOfPages) {
+ jam();
+ Uint32 tableId = noOfPages * NDB_SF_PAGE_ENTRIES;
+ while (tableId < xsf->noOfPages * NDB_SF_PAGE_ENTRIES) {
+ SchemaFile::TableEntry * te = getTableEntry(xsf, tableId);
+ if (te->m_tableState != SchemaFile::INIT &&
+ te->m_tableState != SchemaFile::DROP_TABLE_COMMITTED) {
+ ndbrequire(false);
+ }
+ tableId++;
+ }
+ xsf->noOfPages = noOfPages;
+ initSchemaFile(xsf, 0, xsf->noOfPages, false);
+ }
+}
+
+void
+Dbdict::computeChecksum(XSchemaFile * xsf, Uint32 pageNo){
+ SchemaFile * sf = &xsf->schemaPage[pageNo];
sf->CheckSum = 0;
- sf->CheckSum = computeChecksum((const Uint32*)sf, sf->FileSize/4);
+ sf->CheckSum = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
}
bool
-Dbdict::validateChecksum(const SchemaFile * sf){
+Dbdict::validateChecksum(const XSchemaFile * xsf){
- Uint32 c = computeChecksum((const Uint32*)sf, sf->FileSize/4);
- return c == 0;
+ for (Uint32 n = 0; n < xsf->noOfPages; n++) {
+ SchemaFile * sf = &xsf->schemaPage[n];
+ Uint32 c = computeChecksum((Uint32*)sf, NDB_SF_PAGE_SIZE_IN_WORDS);
+ if ( c != 0)
+ return false;
+ }
+ return true;
}
Uint32
@@ -11954,11 +12246,14 @@ Dbdict::computeChecksum(const Uint32 * src, Uint32 len){
}
SchemaFile::TableEntry *
-Dbdict::getTableEntry(void * p, Uint32 tableId, bool allowTooBig){
- SchemaFile * sf = (SchemaFile*)p;
-
- ndbrequire(allowTooBig || tableId < sf->NoOfTableEntries);
- return &sf->TableEntries[tableId];
+Dbdict::getTableEntry(XSchemaFile * xsf, Uint32 tableId)
+{
+ Uint32 n = tableId / NDB_SF_PAGE_ENTRIES;
+ Uint32 i = tableId % NDB_SF_PAGE_ENTRIES;
+ ndbrequire(n < xsf->noOfPages);
+
+ SchemaFile * sf = &xsf->schemaPage[n];
+ return &sf->TableEntries[i];
}
// global metadata support
@@ -12063,3 +12358,5 @@ Dbdict::getMetaAttribute(MetaData::Attribute& attr, const MetaData::Table& table
new (&attr) MetaData::Attribute(*attrPtr.p);
return 0;
}
+
+CArray<KeyDescriptor> g_key_descriptor_pool;
diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
index bcee4a52b6a..6b78fb86534 100644
--- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
+++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp
@@ -78,7 +78,8 @@
/*--------------------------------------------------------------*/
// Page constants
/*--------------------------------------------------------------*/
-#define ZALLOCATE 1 //Variable number of page for NDBFS
+#define ZBAT_SCHEMA_FILE 0 //Variable number of page for NDBFS
+#define ZBAT_TABLE_FILE 1 //Variable number of page for NDBFS
#define ZPAGE_HEADER_SIZE 32
#define ZPOS_PAGE_SIZE 16
#define ZPOS_CHECKSUM 17
@@ -92,7 +93,7 @@
#define ZSIZE_OF_PAGES_IN_WORDS 8192
#define ZLOG_SIZE_OF_PAGES_IN_WORDS 13
#define ZMAX_PAGES_OF_TABLE_DEFINITION 8
-#define ZNUMBER_OF_PAGES (2 * ZMAX_PAGES_OF_TABLE_DEFINITION + 2)
+#define ZNUMBER_OF_PAGES (ZMAX_PAGES_OF_TABLE_DEFINITION + 1)
#define ZNO_OF_FRAGRECORD 5
/*--------------------------------------------------------------*/
@@ -212,7 +213,9 @@ public:
IL_CREATED_TC = 1 << 0 // created in TC
};
Uint32 indexLocal;
-
+
+ Uint32 noOfNullBits;
+
inline bool equal(TableRecord & rec) const {
return strcmp(tableName, rec.tableName) == 0;
}
@@ -426,6 +429,12 @@ public:
typedef Ptr<PageRecord> PageRecordPtr;
CArray<PageRecord> c_pageRecordArray;
+ struct SchemaPageRecord {
+ Uint32 word[NDB_SF_PAGE_SIZE_IN_WORDS];
+ };
+
+ CArray<SchemaPageRecord> c_schemaPageRecordArray;
+
/**
* A page for create index table signal.
*/
@@ -653,16 +662,20 @@ private:
struct ReadSchemaRecord {
/** Page Id of schema page */
Uint32 pageId;
+ /** First page to read */
+ Uint32 firstPage;
+ /** Number of pages to read */
+ Uint32 noOfPages;
/** State, indicates from where it was called */
enum SchemaReadState {
IDLE = 0,
- INITIAL_READ = 1
+ INITIAL_READ_HEAD = 1,
+ INITIAL_READ = 2
};
SchemaReadState schemaReadState;
};
ReadSchemaRecord c_readSchemaRecord;
-private:
/**
* This record stores all the state needed
* when a schema file is being written to disk
@@ -670,6 +683,12 @@ private:
struct WriteSchemaRecord {
/** Page Id of schema page */
Uint32 pageId;
+ /** Rewrite entire file */
+ Uint32 newFile;
+ /** First page to write */
+ Uint32 firstPage;
+ /** Number of pages to write */
+ Uint32 noOfPages;
/** Schema Files Handled, local state variable */
Uint32 noOfSchemaFilesHandled;
@@ -750,21 +769,33 @@ private:
* Word 4: Currently zero
****************************************************************************/
struct SchemaRecord {
- /** Schema page */
+ /** Schema file first page (0) */
Uint32 schemaPage;
- /** Old Schema page (used at node restart) */
+ /** Old Schema file first page (used at node restart) */
Uint32 oldSchemaPage;
Callback m_callback;
};
SchemaRecord c_schemaRecord;
- void initSchemaFile(SchemaFile *, Uint32 sz);
- void computeChecksum(SchemaFile *);
- bool validateChecksum(const SchemaFile *);
- SchemaFile::TableEntry * getTableEntry(void * buf, Uint32 tableId,
- bool allowTooBig = false);
+ /*
+ * Schema file, list of schema pages. Use an array until a pool
+ * exists and NDBFS interface can use it.
+ */
+ struct XSchemaFile {
+ SchemaFile* schemaPage;
+ Uint32 noOfPages;
+ };
+ // 0-normal 1-old
+ XSchemaFile c_schemaFile[2];
+
+ void initSchemaFile(XSchemaFile *, Uint32 firstPage, Uint32 lastPage,
+ bool initEntries);
+ void resizeSchemaFile(XSchemaFile * xsf, Uint32 noOfPages);
+ void computeChecksum(XSchemaFile *, Uint32 pageNo);
+ bool validateChecksum(const XSchemaFile *);
+ SchemaFile::TableEntry * getTableEntry(XSchemaFile *, Uint32 tableId);
Uint32 computeChecksum(const Uint32 * src, Uint32 len);
@@ -1713,7 +1744,8 @@ private:
bool getNewAttributeRecord(TableRecordPtr tablePtr,
AttributeRecordPtr & attrPtr);
void packTableIntoPages(Signal* signal, Uint32 tableId, Uint32 pageId);
- void packTableIntoPagesImpl(SimpleProperties::Writer &, TableRecordPtr);
+ void packTableIntoPagesImpl(SimpleProperties::Writer &, TableRecordPtr,
+ Signal* signal= 0);
void sendGET_TABINFOREQ(Signal* signal,
Uint32 tableId);
@@ -1761,7 +1793,8 @@ private:
void openSchemaFile(Signal* signal,
Uint32 fileNo,
Uint32 fsPtr,
- bool writeFlag);
+ bool writeFlag,
+ bool newFile);
void writeSchemaFile(Signal* signal, Uint32 filePtr, Uint32 fsPtr);
void writeSchemaConf(Signal* signal,
FsConnectRecordPtr fsPtr);
@@ -1803,6 +1836,7 @@ private:
void readSchemaRef(Signal* signal, FsConnectRecordPtr fsPtr);
void closeReadSchemaConf(Signal* signal,
FsConnectRecordPtr fsPtr);
+ bool convertSchemaFileTo_5_0_6(XSchemaFile*);
/* ------------------------------------------------------------ */
// Get table definitions
diff --git a/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp b/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
index 7c3223d3d14..0226991a073 100644
--- a/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
+++ b/ndb/src/kernel/blocks/dbdict/SchemaFile.hpp
@@ -18,16 +18,35 @@
#define DBDICT_SCHEMA_FILE_HPP
#include <ndb_types.h>
+#include <ndb_version.h>
#include <string.h>
+#define NDB_SF_MAGIC "NDBSCHMA"
+
+// page size 4k
+#define NDB_SF_PAGE_SIZE_IN_WORDS_LOG2 10
+#define NDB_SF_PAGE_SIZE_IN_WORDS (1 << NDB_SF_PAGE_SIZE_IN_WORDS_LOG2)
+#define NDB_SF_PAGE_SIZE (NDB_SF_PAGE_SIZE_IN_WORDS << 2)
+
+// 4k = (1 + 127) * 32
+#define NDB_SF_PAGE_ENTRIES 127
+
+// 160 pages = 20320 objects
+#define NDB_SF_MAX_PAGES 160
+
+// versions where format changed
+#define NDB_SF_VERSION_5_0_6 MAKE_VERSION(5, 0, 6)
+
+// One page in schema file.
struct SchemaFile {
+ // header size 32 bytes
char Magic[8];
Uint32 ByteOrder;
Uint32 NdbVersion;
Uint32 FileSize; // In bytes
- Uint32 Unused;
-
- Uint32 CheckSum;
+ Uint32 PageNumber;
+ Uint32 CheckSum; // Of this page
+ Uint32 NoOfTableEntries; // On this page (NDB_SF_PAGE_ENTRIES)
enum TableState {
INIT = 0,
@@ -38,20 +57,33 @@ struct SchemaFile {
ALTER_TABLE_COMMITTED = 5
};
+ // entry size 32 bytes
struct TableEntry {
Uint32 m_tableState;
Uint32 m_tableVersion;
Uint32 m_tableType;
- Uint32 m_noOfPages;
+ Uint32 m_info_words;
Uint32 m_gcp;
+ Uint32 m_unused[3];
bool operator==(const TableEntry& o) const {
return memcmp(this, &o, sizeof(* this))== 0;
}
};
+
+ // pre-5.0.6
+ struct TableEntry_old {
+ Uint32 m_tableState;
+ Uint32 m_tableVersion;
+ Uint32 m_tableType;
+ Uint32 m_noOfPages;
+ Uint32 m_gcp;
+ };
- Uint32 NoOfTableEntries;
- TableEntry TableEntries[1];
+ union {
+ TableEntry TableEntries[NDB_SF_PAGE_ENTRIES];
+ TableEntry_old TableEntries_old[1];
+ };
};
#endif
diff --git a/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp b/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
index a8b84298ebe..f73654fd9d5 100644
--- a/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
+++ b/ndb/src/kernel/blocks/dbdict/printSchemaFile.cpp
@@ -16,19 +16,33 @@
#include <ndb_global.h>
+#include <ndb_version.h>
#include <NdbMain.h>
#include <NdbOut.hpp>
#include <SchemaFile.hpp>
-void
-usage(const char * prg){
- ndbout << "Usage " << prg
- << " P0.SchemaLog" << endl;
+static const char* progname = 0;
+static bool allflag = false;
+static bool checkonly = false;
+static bool equalcontents = false;
+static bool okquiet = false;
+
+static void
+usage()
+{
+ ndbout
+ << "Usage: " << progname << " [-aceq]" << " file ..." << endl
+ << "-a print also unused slots" << endl
+ << "-c check only (return status 1 on error)" << endl
+ << "-e check also that the files have identical contents" << endl
+ << "-q no output if file is ok" << endl
+ << "Example: " << progname << " -ceq ndb_*_fs/D[12]/DBDICT/P0.SchemaLog" << endl;
}
-void
-fill(const char * buf, int mod){
+static void
+fill(const char * buf, int mod)
+{
int len = strlen(buf)+1;
ndbout << buf << " ";
while((len % mod) != 0){
@@ -37,68 +51,222 @@ fill(const char * buf, int mod){
}
}
-void
-print(const char * filename, const SchemaFile * file){
- ndbout << "----- Schemafile: " << filename << " -----" << endl;
- ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %d FileSize: %d",
- sizeof(file->Magic), file->Magic,
- file->ByteOrder,
- file->NdbVersion,
- file->FileSize);
-
- for(Uint32 i = 0; i<file->NoOfTableEntries; i++){
- SchemaFile::TableEntry te = file->TableEntries[i];
- if(te.m_tableState != SchemaFile::INIT){
- ndbout << "Table " << i << ": State = " << te.m_tableState
- << " version = " << te.m_tableVersion
- << " type = " << te.m_tableType
- << " noOfPages = " << te.m_noOfPages
- << " gcp: " << te.m_gcp << endl;
- }
+static const char*
+version(Uint32 v)
+{
+ static char buf[40];
+ sprintf(buf, "%d.%d.%d", v >> 16, (v >> 8) & 0xFF, v & 0xFF);
+ return buf;
+}
+
+static int
+print_head(const char * filename, const SchemaFile * sf)
+{
+ int retcode = 0;
+
+ if (! checkonly) {
+ ndbout << "----- Schemafile: " << filename << " -----" << endl;
+ ndbout_c("Magic: %.*s ByteOrder: %.8x NdbVersion: %s FileSize: %d",
+ sizeof(sf->Magic),
+ sf->Magic,
+ sf->ByteOrder,
+ version(sf->NdbVersion),
+ sf->FileSize);
}
+
+ if (memcmp(sf->Magic, "NDBSCHMA", sizeof(sf->Magic) != 0)) {
+ ndbout << filename << ": invalid header magic" << endl;
+ retcode = 1;
+ }
+
+ if ((sf->NdbVersion >> 16) < 4 || (sf->NdbVersion >> 16) > 9) {
+ ndbout << filename << ": impossible version " << hex << sf->NdbVersion << endl;
+ retcode = 1;
+ }
+
+ return retcode;
}
-NDB_COMMAND(printSchemafile,
- "printSchemafile", "printSchemafile", "Prints a schemafile", 16384){
- if(argc < 2){
- usage(argv[0]);
- return 0;
+static int
+print_old(const char * filename, const SchemaFile * sf, Uint32 sz)
+{
+ int retcode = 0;
+
+ if (print_head(filename, sf) != 0)
+ retcode = 1;
+
+ for (Uint32 i = 0; i < sf->NoOfTableEntries; i++) {
+ SchemaFile::TableEntry_old te = sf->TableEntries_old[i];
+ if (allflag ||
+ (te.m_tableState != SchemaFile::INIT &&
+ te.m_tableState != SchemaFile::DROP_TABLE_COMMITTED)) {
+ if (! checkonly)
+ ndbout << "Table " << i << ":"
+ << " State = " << te.m_tableState
+ << " version = " << te.m_tableVersion
+ << " type = " << te.m_tableType
+ << " noOfPages = " << te.m_noOfPages
+ << " gcp: " << te.m_gcp << endl;
+ }
}
+ return retcode;
+}
+
+static int
+print(const char * filename, const SchemaFile * xsf, Uint32 sz)
+{
+ int retcode = 0;
- const char * filename = argv[1];
+ if (print_head(filename, xsf) != 0)
+ retcode = 1;
- struct stat sbuf;
- const int res = stat(filename, &sbuf);
- if(res != 0){
- ndbout << "Could not find file: \"" << filename << "\"" << endl;
- return 0;
+ assert(sizeof(SchemaFile) == NDB_SF_PAGE_SIZE);
+ if (xsf->FileSize != sz || xsf->FileSize % NDB_SF_PAGE_SIZE != 0) {
+ ndbout << filename << ": invalid FileSize " << xsf->FileSize << endl;
+ retcode = 1;
}
- const Uint32 bytes = sbuf.st_size;
-
- Uint32 * buf = new Uint32[bytes/4+1];
-
- FILE * f = fopen(filename, "rb");
- if(f == 0){
- ndbout << "Failed to open file" << endl;
- delete [] buf;
- return 0;
+ Uint32 noOfPages = xsf->FileSize / NDB_SF_PAGE_SIZE;
+ for (Uint32 n = 0; n < noOfPages; n++) {
+ if (! checkonly) {
+ ndbout << "----- Page: " << n << " (" << noOfPages << ") -----" << endl;
+ }
+ const SchemaFile * sf = &xsf[n];
+ if (memcmp(sf->Magic, xsf->Magic, sizeof(sf->Magic)) != 0) {
+ ndbout << filename << ": page " << n << " invalid magic" << endl;
+ retcode = 1;
+ }
+ if (sf->FileSize != xsf->FileSize) {
+ ndbout << filename << ": page " << n << " FileSize changed to " << sf->FileSize << "!=" << xsf->FileSize << endl;
+ retcode = 1;
+ }
+ Uint32 cs = 0;
+ for (Uint32 j = 0; j < NDB_SF_PAGE_SIZE_IN_WORDS; j++)
+ cs ^= ((const Uint32*)sf)[j];
+ if (cs != 0) {
+ ndbout << filename << ": page " << n << " invalid CheckSum" << endl;
+ retcode = 1;
+ }
+ if (sf->NoOfTableEntries != NDB_SF_PAGE_ENTRIES) {
+ ndbout << filename << ": page " << n << " invalid NoOfTableEntries " << sf->NoOfTableEntries << endl;
+ retcode = 1;
+ }
+ for (Uint32 i = 0; i < NDB_SF_PAGE_ENTRIES; i++) {
+ SchemaFile::TableEntry te = sf->TableEntries[i];
+ Uint32 j = n * NDB_SF_PAGE_ENTRIES + i;
+ if (allflag ||
+ (te.m_tableState != SchemaFile::INIT &&
+ te.m_tableState != SchemaFile::DROP_TABLE_COMMITTED)) {
+ if (! checkonly)
+ ndbout << "Table " << j << ":"
+ << " State = " << te.m_tableState
+ << " version = " << te.m_tableVersion
+ << " type = " << te.m_tableType
+ << " noOfWords = " << te.m_info_words
+ << " gcp: " << te.m_gcp << endl;
+ }
+ if (te.m_unused[0] != 0 || te.m_unused[1] != 0 || te.m_unused[2] != 0) {
+ ndbout << filename << ": entry " << j << " garbage in m_unused[3]" << endl;
+ retcode = 1;
+ }
+ }
}
- Uint32 sz = fread(buf, 1, bytes, f);
- fclose(f);
- if(sz != bytes){
- ndbout << "Failure while reading file" << endl;
- delete [] buf;
- return 0;
+
+ return retcode;
+}
+
+NDB_COMMAND(printSchemafile,
+ "printSchemafile", "printSchemafile", "Prints a schemafile", 16384)
+{
+ progname = argv[0];
+ int exitcode = 0;
+
+ while (argc > 1 && argv[1][0] == '-') {
+ if (strchr(argv[1], 'a') != 0)
+ allflag = true;
+ if (strchr(argv[1], 'c') != 0)
+ checkonly = true;
+ if (strchr(argv[1], 'e') != 0)
+ equalcontents = true;
+ if (strchr(argv[1], 'q') != 0)
+ okquiet = true;
+ if (strchr(argv[1], 'h') != 0 || strchr(argv[1], '?') != 0) {
+ usage();
+ return 0;
+ }
+ argc--, argv++;
}
-
- print(filename, (SchemaFile *)&buf[0]);
- Uint32 chk = 0, i;
- for (i = 0; i < bytes/4; i++)
- chk ^= buf[i];
- if (chk != 0)
- ndbout << "Invalid checksum!" << endl;
+ const char * prevfilename = 0;
+ Uint32 * prevbuf = 0;
+ Uint32 prevbytes = 0;
+
+ while (argc > 1) {
+ const char * filename = argv[1];
+ argc--, argv++;
+
+ struct stat sbuf;
+ const int res = stat(filename, &sbuf);
+ if (res != 0) {
+ ndbout << filename << ": not found errno=" << errno << endl;
+ exitcode = 1;
+ continue;
+ }
+ const Uint32 bytes = sbuf.st_size;
+
+ Uint32 * buf = new Uint32[bytes/4+1];
+
+ FILE * f = fopen(filename, "rb");
+ if (f == 0) {
+ ndbout << filename << ": open failed errno=" << errno << endl;
+ delete [] buf;
+ exitcode = 1;
+ continue;
+ }
+ Uint32 sz = fread(buf, 1, bytes, f);
+ fclose(f);
+ if (sz != bytes) {
+ ndbout << filename << ": read failed errno=" << errno << endl;
+ delete [] buf;
+ exitcode = 1;
+ continue;
+ }
+
+ if (sz < 32) {
+ ndbout << filename << ": too short (no header)" << endl;
+ delete [] buf;
+ exitcode = 1;
+ continue;
+ }
+
+ SchemaFile* sf = (SchemaFile *)&buf[0];
+ int ret;
+ if (sf->NdbVersion < NDB_SF_VERSION_5_0_6)
+ ret = print_old(filename, sf, sz);
+ else
+ ret = print(filename, sf, sz);
+
+ if (ret != 0) {
+ ndbout << filename << ": check failed"
+ << " version=" << version(sf->NdbVersion) << endl;
+ exitcode = 1;
+ } else if (! okquiet) {
+ ndbout << filename << ": ok"
+ << " version=" << version(sf->NdbVersion) << endl;
+ }
+
+ if (equalcontents && prevfilename != 0) {
+ if (prevbytes != bytes || memcmp(prevbuf, buf, bytes) != 0) {
+ ndbout << filename << ": differs from " << prevfilename << endl;
+ exitcode = 1;
+ }
+ }
+
+ prevfilename = filename;
+ delete [] prevbuf;
+ prevbuf = buf;
+ prevbytes = bytes;
+ }
- delete [] buf;
- return 0;
+ delete [] prevbuf;
+ return exitcode;
}
diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
index ca066b588e7..72051777959 100644
--- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
+++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp
@@ -247,7 +247,7 @@ void Dbdih::sendSTART_RECREQ(Signal* signal, Uint32 nodeId)
req->newestGci = SYSFILE->newestRestorableGCI;
sendSignal(ref, GSN_START_RECREQ, signal, StartRecReq::SignalLength, JBB);
- signal->theData[0] = EventReport::StartREDOLog;
+ signal->theData[0] = NDB_LE_StartREDOLog;
signal->theData[1] = nodeId;
signal->theData[2] = SYSFILE->keepGCI;
signal->theData[3] = SYSFILE->lastCompletedGCI[nodeId];
@@ -1815,7 +1815,7 @@ void Dbdih::nodeDictStartConfLab(Signal* signal)
/*-----------------------------------------------------------------*/
// Report that node restart has completed copy of dictionary.
/*-----------------------------------------------------------------*/
- signal->theData[0] = EventReport::NR_CopyDict;
+ signal->theData[0] = NDB_LE_NR_CopyDict;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
}//Dbdih::nodeDictStartConfLab()
@@ -1836,7 +1836,7 @@ void Dbdih::gcpBlockedLab(Signal* signal)
/*-----------------------------------------------------------------*/
// Report that node restart has completed copy of distribution info.
/*-----------------------------------------------------------------*/
- signal->theData[0] = EventReport::NR_CopyDistr;
+ signal->theData[0] = NDB_LE_NR_CopyDistr;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
/**
@@ -1989,7 +1989,7 @@ void Dbdih::execSTART_COPYREQ(Signal* signal)
/*-------------------------------------------------------------------------*/
// REPORT Copy process of node restart is now about to start up.
/*-------------------------------------------------------------------------*/
- signal->theData[0] = EventReport::NR_CopyFragsStarted;
+ signal->theData[0] = NDB_LE_NR_CopyFragsStarted;
signal->theData[1] = startNodeId;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -2954,7 +2954,7 @@ void Dbdih::execCREATE_FRAGCONF(Signal* signal)
/* --------------------------------------------------------------------- */
// REPORT that copy of fragment has been completed.
/* --------------------------------------------------------------------- */
- signal->theData[0] = EventReport::NR_CopyFragDone;
+ signal->theData[0] = NDB_LE_NR_CopyFragDone;
signal->theData[1] = takeOverPtr.p->toStartingNode;
signal->theData[2] = tabPtr.i;
signal->theData[3] = takeOverPtr.p->toCurrentFragid;
@@ -3192,7 +3192,7 @@ Dbdih::switchPrimaryMutex_locked(Signal* signal, Uint32 toPtrI, Uint32 retVal){
void Dbdih::toCopyCompletedLab(Signal * signal, TakeOverRecordPtr takeOverPtr)
{
- signal->theData[0] = EventReport::NR_CopyFragsCompleted;
+ signal->theData[0] = NDB_LE_NR_CopyFragsCompleted;
signal->theData[1] = takeOverPtr.p->toStartingNode;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -4477,7 +4477,7 @@ void Dbdih::startGcpMasterTakeOver(Signal* signal, Uint32 oldMasterId){
sendLoopMacro(MASTER_GCPREQ, sendMASTER_GCPREQ);
cgcpMasterTakeOverState = GMTOS_INITIAL;
- signal->theData[0] = EventReport::GCP_TakeoverStarted;
+ signal->theData[0] = NDB_LE_GCP_TakeoverStarted;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
setLocalNodefailHandling(signal, oldMasterId, NF_GCP_TAKE_OVER);
@@ -4986,7 +4986,7 @@ void Dbdih::MASTER_GCPhandling(Signal* signal, Uint32 failedNodeId)
break;
}//switch
- signal->theData[0] = EventReport::GCP_TakeoverCompleted;
+ signal->theData[0] = NDB_LE_GCP_TakeoverCompleted;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
/*--------------------------------------------------*/
@@ -5419,7 +5419,7 @@ Dbdih::checkEmptyLcpComplete(Signal *signal){
if(isMaster()){
jam();
- signal->theData[0] = EventReport::LCP_TakeoverStarted;
+ signal->theData[0] = NDB_LE_LCP_TakeoverStarted;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
signal->theData[0] = 7012;
@@ -5891,7 +5891,7 @@ void Dbdih::MASTER_LCPhandling(Signal* signal, Uint32 failedNodeId)
ndbrequire(false);
break;
}//switch
- signal->theData[0] = EventReport::LCP_TakeoverCompleted;
+ signal->theData[0] = NDB_LE_LCP_TakeoverCompleted;
signal->theData[1] = c_lcpMasterTakeOverState.state;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -5926,7 +5926,7 @@ void Dbdih::execNF_COMPLETEREP(Signal* signal)
/* -------------------------------------------------------------------- */
// Report the event that DBTC completed node failure handling.
/* -------------------------------------------------------------------- */
- signal->theData[0] = EventReport::NodeFailCompleted;
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
signal->theData[1] = DBTC;
signal->theData[2] = failedNodePtr.i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -5939,7 +5939,7 @@ void Dbdih::execNF_COMPLETEREP(Signal* signal)
/* --------------------------------------------------------------------- */
// Report the event that DBDICT completed node failure handling.
/* --------------------------------------------------------------------- */
- signal->theData[0] = EventReport::NodeFailCompleted;
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
signal->theData[1] = DBDICT;
signal->theData[2] = failedNodePtr.i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -5952,7 +5952,7 @@ void Dbdih::execNF_COMPLETEREP(Signal* signal)
/* --------------------------------------------------------------------- */
// Report the event that DBDIH completed node failure handling.
/* --------------------------------------------------------------------- */
- signal->theData[0] = EventReport::NodeFailCompleted;
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
signal->theData[1] = DBDIH;
signal->theData[2] = failedNodePtr.i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -5965,7 +5965,7 @@ void Dbdih::execNF_COMPLETEREP(Signal* signal)
/* --------------------------------------------------------------------- */
// Report the event that DBDIH completed node failure handling.
/* --------------------------------------------------------------------- */
- signal->theData[0] = EventReport::NodeFailCompleted;
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
signal->theData[1] = DBLQH;
signal->theData[2] = failedNodePtr.i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -6000,7 +6000,7 @@ void Dbdih::execNF_COMPLETEREP(Signal* signal)
/* -------------------------------------------------------------------- */
// Report the event that nodeId has completed node failure handling.
/* -------------------------------------------------------------------- */
- signal->theData[0] = EventReport::NodeFailCompleted;
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
signal->theData[1] = 0;
signal->theData[2] = failedNodePtr.i;
signal->theData[3] = nodeId;
@@ -6073,7 +6073,7 @@ void Dbdih::nodeFailCompletedCheckLab(Signal* signal,
/* ---------------------------------------------------------------------- */
// Report the event that all nodes completed node failure handling.
/* ---------------------------------------------------------------------- */
- signal->theData[0] = EventReport::NodeFailCompleted;
+ signal->theData[0] = NDB_LE_NodeFailCompleted;
signal->theData[1] = 0;
signal->theData[2] = failedNodePtr.i;
signal->theData[3] = 0;
@@ -6228,9 +6228,12 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
if (primaryTableId == RNIL) {
if(fragmentNode == 0){
jam();
- // needs to be fixed for single fragment tables
- NGPtr.i = 0; //c_nextNodeGroup;
- c_nextNodeGroup = (NGPtr.i + 1 == cnoOfNodeGroups ? 0 : NGPtr.i + 1);
+ NGPtr.i = 0;
+ if(noOfFragments < csystemnodes)
+ {
+ NGPtr.i = c_nextNodeGroup;
+ c_nextNodeGroup = (NGPtr.i + 1 == cnoOfNodeGroups ? 0 : NGPtr.i + 1);
+ }
} else if(! (fragmentNode < MAX_NDB_NODES)) {
jam();
err = CreateFragmentationRef::InvalidNodeId;
@@ -6283,33 +6286,28 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
}
}
- //@todo use section writer
Uint32 count = 2;
- Uint32 fragments[2 + 8*MAX_REPLICAS*MAX_NDB_NODES];
- Uint32 next_replica_node[MAX_NDB_NODES];
- memset(next_replica_node,0,sizeof(next_replica_node));
+ Uint16 *fragments = (Uint16*)(signal->theData+25);
if (primaryTableId == RNIL) {
jam();
+ Uint8 next_replica_node[MAX_NDB_NODES];
+ memset(next_replica_node,0,sizeof(next_replica_node));
for(Uint32 fragNo = 0; fragNo<noOfFragments; fragNo++){
jam();
ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
-
- Uint32 ind = next_replica_node[NGPtr.i];
const Uint32 max = NGPtr.p->nodeCount;
-
- //-------------------------------------------------------------------
- // We make an extra step to ensure that the primary replicas are
- // spread among the nodes.
- //-------------------------------------------------------------------
- next_replica_node[NGPtr.i] = (ind + 1 >= max ? 0 : ind + 1);
-
- for(Uint32 replicaNo = 0; replicaNo<noOfReplicas; replicaNo++){
+
+ Uint32 tmp= next_replica_node[NGPtr.i];
+ for(Uint32 replicaNo = 0; replicaNo<noOfReplicas; replicaNo++)
+ {
jam();
- const Uint32 nodeId = NGPtr.p->nodesInGroup[ind++];
+ const Uint32 nodeId = NGPtr.p->nodesInGroup[tmp++];
fragments[count++] = nodeId;
- ind = (ind == max ? 0 : ind);
+ tmp = (tmp >= max ? 0 : tmp);
}
-
+ tmp++;
+ next_replica_node[NGPtr.i]= (tmp >= max ? 0 : tmp);
+
/**
* Next node group for next fragment
*/
@@ -6358,26 +6356,42 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){
fragments[0] = noOfReplicas;
fragments[1] = noOfFragments;
- LinearSectionPtr ptr[3];
- ptr[0].p = &fragments[0];
- ptr[0].sz = count;
- sendSignal(senderRef,
- GSN_CREATE_FRAGMENTATION_CONF,
- signal,
- CreateFragmentationConf::SignalLength,
- JBB,
- ptr,
- 1);
+ if(senderRef != 0)
+ {
+ LinearSectionPtr ptr[3];
+ ptr[0].p = (Uint32*)&fragments[0];
+ ptr[0].sz = (count + 1) / 2;
+ sendSignal(senderRef,
+ GSN_CREATE_FRAGMENTATION_CONF,
+ signal,
+ CreateFragmentationConf::SignalLength,
+ JBB,
+ ptr,
+ 1);
+ }
+ else
+ {
+ // Execute direct
+ signal->theData[0] = 0;
+ }
return;
} while(false);
-
- CreateFragmentationRef * const ref =
- (CreateFragmentationRef*)signal->getDataPtrSend();
- ref->senderRef = reference();
- ref->senderData = senderData;
- ref->errorCode = err;
- sendSignal(senderRef, GSN_CREATE_FRAGMENTATION_REF, signal,
- CreateFragmentationRef::SignalLength, JBB);
+
+ if(senderRef != 0)
+ {
+ CreateFragmentationRef * const ref =
+ (CreateFragmentationRef*)signal->getDataPtrSend();
+ ref->senderRef = reference();
+ ref->senderData = senderData;
+ ref->errorCode = err;
+ sendSignal(senderRef, GSN_CREATE_FRAGMENTATION_REF, signal,
+ CreateFragmentationRef::SignalLength, JBB);
+ }
+ else
+ {
+ // Execute direct
+ signal->theData[0] = err;
+ }
}
void Dbdih::execDIADDTABREQ(Signal* signal)
@@ -6445,12 +6459,15 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
tabPtr.p->method = TabRecord::HASH;
tabPtr.p->kvalue = req->kValue;
- Uint32 fragments[2 + 8*MAX_REPLICAS*MAX_NDB_NODES];
+ union {
+ Uint16 fragments[2 + MAX_FRAG_PER_NODE*MAX_REPLICAS*MAX_NDB_NODES];
+ Uint32 align;
+ };
SegmentedSectionPtr fragDataPtr;
signal->getSection(fragDataPtr, DiAddTabReq::FRAGMENTATION);
- copy(fragments, fragDataPtr);
+ copy((Uint32*)fragments, fragDataPtr);
releaseSections(signal);
-
+
const Uint32 noReplicas = fragments[0];
const Uint32 noFragments = fragments[1];
@@ -6459,6 +6476,7 @@ void Dbdih::execDIADDTABREQ(Signal* signal)
ndbrequire(noReplicas == cnoReplicas); // Only allowed
if (ERROR_INSERTED(7173)) {
+ CLEAR_ERROR_INSERT_VALUE;
addtabrefuseLab(signal, connectPtr, ZREPLERROR1);
return;
}
@@ -6878,8 +6896,7 @@ void Dbdih::execDIGETNODESREQ(Signal* signal)
TabRecord* regTabDesc = tabRecord;
jamEntry();
ptrCheckGuard(tabPtr, ttabFileSize, regTabDesc);
- hashValue = hashValue >> tabPtr.p->kvalue;
- Uint32 fragId = tabPtr.p->mask & hashValue;
+ Uint32 fragId = hashValue & tabPtr.p->mask;
ndbrequire(tabPtr.p->tabStatus == TabRecord::TS_ACTIVE);
if (fragId < tabPtr.p->hashpointer) {
jam();
@@ -7240,7 +7257,7 @@ void Dbdih::startGcpLab(Signal* signal, Uint32 aWaitTime)
/***************************************************************************/
// Report the event that a global checkpoint has started.
/***************************************************************************/
- signal->theData[0] = EventReport::GlobalCheckpointStarted; //Event type
+ signal->theData[0] = NDB_LE_GlobalCheckpointStarted; //Event type
signal->theData[1] = cnewgcp;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -7485,6 +7502,22 @@ void Dbdih::execDIHNDBTAMPER(Signal* signal)
#ifdef ERROR_INSERT
case 5:
jam();
+ if(tuserpointer == 0)
+ {
+ jam();
+ signal->theData[0] = 0;
+ sendSignal(QMGR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBCNTR_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(NDBFS_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBACC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTUP_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBLQH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDICT_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBDIH_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(DBTC_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ sendSignal(CMVMI_REF, GSN_NDB_TAMPER, signal, 1, JBB);
+ return;
+ }
/*----------------------------------------------------------------------*/
// Insert errors.
/*----------------------------------------------------------------------*/
@@ -7703,7 +7736,7 @@ void Dbdih::execCOPY_GCICONF(Signal* signal)
// Report the event that a global checkpoint has completed.
/************************************************************************/
signal->setTrace(0);
- signal->theData[0] = EventReport::GlobalCheckpointCompleted; //Event type
+ signal->theData[0] = NDB_LE_GlobalCheckpointCompleted; //Event type
signal->theData[1] = coldgcp;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -9206,7 +9239,7 @@ void Dbdih::execTCGETOPSIZECONF(Signal* signal)
ndbrequire(((int)c_lcpState.oldestRestorableGci) > 0);
if (ERROR_INSERTED(7011)) {
- signal->theData[0] = EventReport::LCPStoppedInCalcKeepGci;
+ signal->theData[0] = NDB_LE_LCPStoppedInCalcKeepGci;
signal->theData[1] = 0;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
return;
@@ -9292,7 +9325,7 @@ void Dbdih::storeNewLcpIdLab(Signal* signal)
/***************************************************************************/
// Report the event that a local checkpoint has started.
/***************************************************************************/
- signal->theData[0] = EventReport::LocalCheckpointStarted; //Event type
+ signal->theData[0] = NDB_LE_LocalCheckpointStarted; //Event type
signal->theData[1] = SYSFILE->latestLCP_ID + 1;
signal->theData[2] = c_lcpState.keepGci;
signal->theData[3] = c_lcpState.oldestRestorableGci;
@@ -9672,7 +9705,7 @@ void Dbdih::execLCP_FRAG_REP(Signal* signal)
/* --------------------------------------------------------------------- */
// REPORT that local checkpoint have completed this fragment.
/* --------------------------------------------------------------------- */
- signal->theData[0] = EventReport::LCPFragmentCompleted;
+ signal->theData[0] = NDB_LE_LCPFragmentCompleted;
signal->theData[1] = nodeId;
signal->theData[2] = tableId;
signal->theData[3] = fragId;
@@ -10123,7 +10156,7 @@ void Dbdih::allNodesLcpCompletedLab(Signal* signal)
/***************************************************************************/
// Report the event that a local checkpoint has completed.
/***************************************************************************/
- signal->theData[0] = EventReport::LocalCheckpointCompleted; //Event type
+ signal->theData[0] = NDB_LE_LocalCheckpointCompleted; //Event type
signal->theData[1] = SYSFILE->latestLCP_ID;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -10280,7 +10313,87 @@ void Dbdih::tableCloseLab(Signal* signal, FileRecordPtr filePtr)
* GCP stop detected,
* send SYSTEM_ERROR to all other alive nodes
*/
-void Dbdih::crashSystemAtGcpStop(Signal* signal){
+void Dbdih::crashSystemAtGcpStop(Signal* signal)
+{
+ switch(cgcpStatus){
+ case GCP_NODE_FINISHED:
+ {
+ /**
+ * We're waiting for a GCP save conf
+ */
+ ndbrequire(!c_GCP_SAVEREQ_Counter.done());
+ NodeReceiverGroup rg(DBLQH, c_GCP_SAVEREQ_Counter);
+ signal->theData[0] = 2305;
+ sendSignal(rg, GSN_DUMP_STATE_ORD, signal, 1, JBB);
+
+ infoEvent("Detected GCP stop...sending kill to %s",
+ c_GCP_SAVEREQ_Counter.getText());
+ ndbout_c("Detected GCP stop...sending kill to %s",
+ c_GCP_SAVEREQ_Counter.getText());
+ return;
+ }
+ case GCP_SAVE_LQH_FINISHED:
+ ndbout_c("m_copyReason: %d m_waiting: %d",
+ c_copyGCIMaster.m_copyReason,
+ c_copyGCIMaster.m_waiting);
+ break;
+ }
+
+ ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d",
+ c_copyGCISlave.m_senderData,
+ c_copyGCISlave.m_senderRef,
+ c_copyGCISlave.m_copyReason,
+ c_copyGCISlave.m_expectedNextWord);
+
+ FileRecordPtr file0Ptr;
+ file0Ptr.i = crestartInfoFile[0];
+ ptrCheckGuard(file0Ptr, cfileFileSize, fileRecord);
+ FileRecordPtr file1Ptr;
+ file1Ptr.i = crestartInfoFile[1];
+ ptrCheckGuard(file1Ptr, cfileFileSize, fileRecord);
+
+ ndbout_c("file[0] status: %d type: %d reqStatus: %d file1: %d %d %d",
+ file0Ptr.p->fileStatus, file0Ptr.p->fileType, file0Ptr.p->reqStatus,
+ file1Ptr.p->fileStatus, file1Ptr.p->fileType, file1Ptr.p->reqStatus
+ );
+
+ signal->theData[0] = 404;
+ signal->theData[1] = file0Ptr.p->fileRef;
+ EXECUTE_DIRECT(NDBFS, GSN_DUMP_STATE_ORD, signal, 2);
+
+ signal->theData[0] = 404;
+ signal->theData[1] = file1Ptr.p->fileRef;
+ EXECUTE_DIRECT(NDBFS, GSN_DUMP_STATE_ORD, signal, 2);
+
+ ndbout_c("c_COPY_GCIREQ_Counter = %s",
+ c_COPY_GCIREQ_Counter.getText());
+ ndbout_c("c_COPY_TABREQ_Counter = %s",
+ c_COPY_TABREQ_Counter.getText());
+ ndbout_c("c_CREATE_FRAGREQ_Counter = %s",
+ c_CREATE_FRAGREQ_Counter.getText());
+ ndbout_c("c_DIH_SWITCH_REPLICA_REQ_Counter = %s",
+ c_DIH_SWITCH_REPLICA_REQ_Counter.getText());
+ ndbout_c("c_EMPTY_LCP_REQ_Counter = %s",c_EMPTY_LCP_REQ_Counter.getText());
+ ndbout_c("c_END_TOREQ_Counter = %s", c_END_TOREQ_Counter.getText());
+ ndbout_c("c_GCP_COMMIT_Counter = %s", c_GCP_COMMIT_Counter.getText());
+ ndbout_c("c_GCP_PREPARE_Counter = %s", c_GCP_PREPARE_Counter.getText());
+ ndbout_c("c_GCP_SAVEREQ_Counter = %s", c_GCP_SAVEREQ_Counter.getText());
+ ndbout_c("c_INCL_NODEREQ_Counter = %s", c_INCL_NODEREQ_Counter.getText());
+ ndbout_c("c_MASTER_GCPREQ_Counter = %s",
+ c_MASTER_GCPREQ_Counter.getText());
+ ndbout_c("c_MASTER_LCPREQ_Counter = %s",
+ c_MASTER_LCPREQ_Counter.getText());
+ ndbout_c("c_START_INFOREQ_Counter = %s",
+ c_START_INFOREQ_Counter.getText());
+ ndbout_c("c_START_RECREQ_Counter = %s", c_START_RECREQ_Counter.getText());
+ ndbout_c("c_START_TOREQ_Counter = %s", c_START_TOREQ_Counter.getText());
+ ndbout_c("c_STOP_ME_REQ_Counter = %s", c_STOP_ME_REQ_Counter.getText());
+ ndbout_c("c_TC_CLOPSIZEREQ_Counter = %s",
+ c_TC_CLOPSIZEREQ_Counter.getText());
+ ndbout_c("c_TCGETOPSIZEREQ_Counter = %s",
+ c_TCGETOPSIZEREQ_Counter.getText());
+ ndbout_c("c_UPDATE_TOREQ_Counter = %s", c_UPDATE_TOREQ_Counter.getText());
+
NodeRecordPtr nodePtr;
for (nodePtr.i = 1; nodePtr.i < MAX_NDB_NODES; nodePtr.i++) {
jam();
@@ -11452,7 +11565,6 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[])
NodeRecordPtr mngNodeptr;
Uint32 tmngNode;
Uint32 tmngNodeGroup;
- Uint32 tmngReplica;
Uint32 tmngLimit;
Uint32 i;
@@ -11461,7 +11573,6 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[])
* TO NODE GROUP ZNIL
*-----------------------------------------------------------------------*/
tmngNodeGroup = 0;
- tmngReplica = 0;
tmngLimit = csystemnodes - cnoHotSpare;
ndbrequire(tmngLimit < MAX_NDB_NODES);
for (i = 0; i < tmngLimit; i++) {
@@ -11473,13 +11584,11 @@ void Dbdih::makeNodeGroups(Uint32 nodeArray[])
mngNodeptr.p->nodeGroup = tmngNodeGroup;
NGPtr.i = tmngNodeGroup;
ptrCheckGuard(NGPtr, MAX_NDB_NODES, nodeGroupRecord);
- arrGuard(tmngReplica, MAX_REPLICAS);
- NGPtr.p->nodesInGroup[tmngReplica] = mngNodeptr.i;
- tmngReplica++;
- if (tmngReplica == cnoReplicas) {
+ arrGuard(NGPtr.p->nodeCount, MAX_REPLICAS);
+ NGPtr.p->nodesInGroup[NGPtr.p->nodeCount++] = mngNodeptr.i;
+ if (NGPtr.p->nodeCount == cnoReplicas) {
jam();
tmngNodeGroup++;
- tmngReplica = 0;
}//if
}//for
cnoOfNodeGroups = tmngNodeGroup;
diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
index 951d1e90251..81fd61520cf 100644
--- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
+++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp
@@ -29,6 +29,9 @@
#include <signaldata/LqhTransConf.hpp>
#include <signaldata/LqhFrag.hpp>
+// primary key is stored in TUP
+#include <../dbtup/Dbtup.hpp>
+
#ifdef DBLQH_C
// Constants
/* ------------------------------------------------------------------------- */
@@ -511,9 +514,7 @@ public:
WAIT_DELETE_STORED_PROC_ID_COPY = 6,
WAIT_ACC_COPY = 7,
WAIT_ACC_SCAN = 8,
- WAIT_SCAN_KEYINFO = 9,
WAIT_SCAN_NEXTREQ = 10,
- WAIT_COPY_KEYINFO = 11,
WAIT_CLOSE_SCAN = 12,
WAIT_CLOSE_COPY = 13,
WAIT_RELEASE_LOCK = 14,
@@ -575,6 +576,9 @@ public:
NodeId scanNodeId;
Uint16 scanReleaseCounter;
Uint16 scanNumber;
+
+ // scan source block ACC TUX TUP
+ BlockReference scanBlockref;
Uint8 scanCompletedStatus;
Uint8 scanFlag;
@@ -582,6 +586,8 @@ public:
Uint8 scanLockMode;
Uint8 readCommitted;
Uint8 rangeScan;
+ Uint8 descending;
+ Uint8 tupScan;
Uint8 scanTcWaiting;
Uint8 scanKeyinfoFlag;
Uint8 m_last_row;
@@ -878,10 +884,6 @@ public:
*/
Uint8 fragDistributionKey;
/**
- * Used to calculate which local fragment to use.
- */
- Uint8 hashCheckBit;
- /**
* The identity of the next local checkpoint this fragment
* should perform.
*/
@@ -2021,8 +2023,10 @@ public:
BlockReference tcTuxBlockref;
BlockReference tcTupBlockref;
Uint32 commitAckMarker;
- UintR noFiredTriggers;
-
+ union {
+ Uint32 m_scan_curr_range_no;
+ UintR noFiredTriggers;
+ };
Uint16 errorCode;
Uint16 logStartPageIndex;
Uint16 logStartPageNo;
@@ -2044,6 +2048,7 @@ public:
Uint8 opExec;
Uint8 operation;
Uint8 reclenAiLqhkey;
+ Uint8 m_offset_current_keybuf;
Uint8 replicaType;
Uint8 simpleRead;
Uint8 seqNoReplica;
@@ -2147,8 +2152,6 @@ private:
void execACC_SCANREF(Signal* signal);
void execNEXT_SCANCONF(Signal* signal);
void execNEXT_SCANREF(Signal* signal);
- void execACC_SCAN_INFO(Signal* signal);
- void execACC_SCAN_INFO24(Signal* signal);
void execACC_TO_REF(Signal* signal);
void execSTORED_PROCCONF(Signal* signal);
void execSTORED_PROCREF(Signal* signal);
@@ -2238,7 +2241,7 @@ private:
void LQHKEY_abort(Signal* signal, int errortype);
void LQHKEY_error(Signal* signal, int errortype);
void nextRecordCopy(Signal* signal);
- void calculateHash(Signal* signal);
+ Uint32 calculateHash(Uint32 tableId, const Uint32* src);
void continueAfterCheckLcpStopBlocked(Signal* signal);
void checkLcpStopBlockedLab(Signal* signal);
void sendCommittedTc(Signal* signal, BlockReference atcBlockref);
@@ -2266,7 +2269,7 @@ private:
void finishScanrec(Signal* signal);
void releaseScanrec(Signal* signal);
void seizeScanrec(Signal* signal);
- void sendKeyinfo20(Signal* signal, ScanRecord *, TcConnectionrec *);
+ Uint32 sendKeyinfo20(Signal* signal, ScanRecord *, TcConnectionrec *);
void sendScanFragConf(Signal* signal, Uint32 scanCompleted);
void initCopyrec(Signal* signal);
void initCopyTc(Signal* signal);
@@ -2396,6 +2399,8 @@ private:
void seizeAttrinbuf(Signal* signal);
Uint32 seize_attrinbuf();
Uint32 release_attrinbuf(Uint32);
+ Uint32 copy_bounds(Uint32 * dst, TcConnectionrec*);
+
void seizeFragmentrec(Signal* signal);
void seizePageRef(Signal* signal);
void seizeTcrec();
@@ -2444,7 +2449,6 @@ private:
void localCommitLab(Signal* signal);
void abortErrorLab(Signal* signal);
void continueAfterReceivingAllAiLab(Signal* signal);
- void sendScanFragRefLateLab(Signal* signal);
void abortStateHandlerLab(Signal* signal);
void writeAttrinfoLab(Signal* signal);
void scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
@@ -2511,7 +2515,7 @@ private:
void nextScanConfScanLab(Signal* signal);
void nextScanConfCopyLab(Signal* signal);
void continueScanNextReqLab(Signal* signal);
- bool keyinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length);
+ void keyinfoLab(const Uint32 * src, const Uint32 * end);
void copySendTupkeyReqLab(Signal* signal);
void storedProcConfScanLab(Signal* signal);
void storedProcConfCopyLab(Signal* signal);
@@ -2567,7 +2571,6 @@ private:
void accScanConfScanLab(Signal* signal);
void accScanConfCopyLab(Signal* signal);
void scanLockReleasedLab(Signal* signal);
- void accScanInfoEnterLab(Signal* signal, Uint32* dataPtr, Uint32 length);
void openSrFourthNextLab(Signal* signal);
void closingInitLab(Signal* signal);
void closeExecSrCompletedLab(Signal* signal);
@@ -2582,6 +2585,8 @@ private:
void initData();
void initRecords();
+ Dbtup* c_tup;
+ Uint32 readPrimaryKeys(ScanRecord*, TcConnectionrec*, Uint32 * dst);
// ----------------------------------------------------------------
// These are variables handling the records. For most records one
// pointer to the array of structs, one pointer-struct, a file size
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
index f9dd63e782d..04400f75255 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhInit.cpp
@@ -273,8 +273,6 @@ Dblqh::Dblqh(const class Configuration & conf):
addRecSignal(GSN_ACC_SCANREF, &Dblqh::execACC_SCANREF);
addRecSignal(GSN_NEXT_SCANCONF, &Dblqh::execNEXT_SCANCONF);
addRecSignal(GSN_NEXT_SCANREF, &Dblqh::execNEXT_SCANREF);
- addRecSignal(GSN_ACC_SCAN_INFO, &Dblqh::execACC_SCAN_INFO);
- addRecSignal(GSN_ACC_SCAN_INFO24, &Dblqh::execACC_SCAN_INFO24);
addRecSignal(GSN_STORED_PROCCONF, &Dblqh::execSTORED_PROCCONF);
addRecSignal(GSN_STORED_PROCREF, &Dblqh::execSTORED_PROCREF);
addRecSignal(GSN_COPY_FRAGREQ, &Dblqh::execCOPY_FRAGREQ);
diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
index b6178227d31..365c28f1229 100644
--- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
+++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp
@@ -55,6 +55,7 @@
#include <signaldata/AlterTab.hpp>
#include <signaldata/LCP.hpp>
+#include <KeyDescriptor.hpp>
// Use DEBUG to print messages that should be
// seen only when we debug the product
@@ -169,6 +170,8 @@ void Dblqh::execTUP_COM_UNBLOCK(Signal* signal)
/* ------------------------------------------------------------------------- */
void Dblqh::systemError(Signal* signal)
{
+ signal->theData[0] = 2304;
+ execDUMP_STATE_ORD(signal);
progError(0, 0);
}//Dblqh::systemError()
@@ -420,7 +423,7 @@ void Dblqh::execCONTINUEB(Signal* signal)
// Report information about transaction activity once per second.
/* --------------------------------------------------------------------- */
if (signal->theData[1] == 0) {
- signal->theData[0] = EventReport::OperationReportCounters;
+ signal->theData[0] = NDB_LE_OperationReportCounters;
signal->theData[1] = c_Counters.operations;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}//if
@@ -490,6 +493,8 @@ void Dblqh::execSTTOR(Signal* signal)
jam();
cstartPhase = tstartPhase;
sttorStartphase1Lab(signal);
+ c_tup = (Dbtup*)globalData.getBlock(DBTUP);
+ ndbrequire(c_tup != 0);
return;
break;
default:
@@ -1087,8 +1092,8 @@ void Dblqh::execLQHFRAGREQ(Signal* signal)
if (DictTabInfo::isOrderedIndex(tableType)) {
jam();
// NOTE: next 2 lines stolen from ACC
- addfragptr.p->fragid1 = (0 << tlhstar) | fragId;
- addfragptr.p->fragid2 = (1 << tlhstar) | fragId;
+ addfragptr.p->fragid1 = (fragId << 1) | 0;
+ addfragptr.p->fragid2 = (fragId << 1) | 1;
addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
sendAddFragReq(signal);
return;
@@ -1108,7 +1113,6 @@ void Dblqh::execACCFRAGCONF(Signal* signal)
Uint32 fragId2 = signal->theData[3];
Uint32 accFragPtr1 = signal->theData[4];
Uint32 accFragPtr2 = signal->theData[5];
- Uint32 hashCheckBit = signal->theData[6];
ptrCheckGuard(addfragptr, caddfragrecFileSize, addFragRecord);
ndbrequire(addfragptr.p->addfragStatus == AddFragRecord::ACC_ADDFRAG);
@@ -1119,7 +1123,6 @@ void Dblqh::execACCFRAGCONF(Signal* signal)
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
fragptr.p->accFragptr[0] = accFragPtr1;
fragptr.p->accFragptr[1] = accFragPtr2;
- fragptr.p->hashCheckBit = hashCheckBit;
addfragptr.p->addfragStatus = AddFragRecord::WAIT_TWO_TUP;
sendAddFragReq(signal);
@@ -1277,7 +1280,7 @@ Dblqh::sendAddFragReq(Signal* signal)
tuxreq->noOfAttr = addfragptr.p->noOfAttr - 1; /* skip NDB$TNODE */
tuxreq->fragId =
addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUX
- ? addfragptr.p->fragid1 : addfragptr.p->fragid2;
+ ? addfragptr.p->fragid1: addfragptr.p->fragid2;
tuxreq->fragOff = addfragptr.p->lh3DistrBits;
tuxreq->tableType = addfragptr.p->tableType;
tuxreq->primaryTableId = addfragptr.p->primaryTableId;
@@ -2075,7 +2078,7 @@ void Dblqh::execTIME_SIGNAL(Signal* signal)
if ((cCounterAccCommitBlocked > 0) ||
(cCounterTupCommitBlocked > 0)) {
jam();
- signal->theData[0] = EventReport::UndoLogBlocked;
+ signal->theData[0] = NDB_LE_UndoLogBlocked;
signal->theData[1] = cCounterTupCommitBlocked;
signal->theData[2] = cCounterAccCommitBlocked;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -2619,12 +2622,20 @@ Dblqh::execREAD_PSUEDO_REQ(Signal* signal){
regTcPtr.i = signal->theData[0];
ptrCheckGuard(regTcPtr, ctcConnectrecFileSize, tcConnectionrec);
- FragrecordPtr regFragptr;
- regFragptr.i = regTcPtr.p->fragmentptr;
- ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
-
- signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
- EXECUTE_DIRECT(DBACC, GSN_READ_PSUEDO_REQ, signal, 2);
+ if(signal->theData[1] != AttributeHeader::RANGE_NO)
+ {
+ jam();
+ FragrecordPtr regFragptr;
+ regFragptr.i = regTcPtr.p->fragmentptr;
+ ptrCheckGuard(regFragptr, cfragrecFileSize, fragrecord);
+
+ signal->theData[0] = regFragptr.p->accFragptr[regTcPtr.p->localFragptr];
+ EXECUTE_DIRECT(DBACC, GSN_READ_PSUEDO_REQ, signal, 2);
+ }
+ else
+ {
+ signal->theData[0] = regTcPtr.p->m_scan_curr_range_no;
+ }
}
/* ************>> */
@@ -2639,11 +2650,11 @@ void Dblqh::execTUPKEYCONF(Signal* signal)
jamEntry();
tcConnectptr.i = tcIndex;
ptrCheckGuard(tcConnectptr, ttcConnectrecFileSize, regTcConnectionrec);
- if (tcConnectptr.p->seqNoReplica == 0) // Primary replica
- tcConnectptr.p->noFiredTriggers = tupKeyConf->noFiredTriggers;
switch (tcConnectptr.p->transactionState) {
case TcConnectionrec::WAIT_TUP:
jam();
+ if (tcConnectptr.p->seqNoReplica == 0) // Primary replica
+ tcConnectptr.p->noFiredTriggers = tupKeyConf->noFiredTriggers;
tupkeyConfLab(signal);
break;
case TcConnectionrec::COPY_TUPKEY:
@@ -3525,7 +3536,7 @@ void Dblqh::execLQHKEYREQ(Signal* signal)
LQHKEY_error(signal, 6);
return;
}//if
- regTcPtr->localFragptr = (regTcPtr->hashValue >> fragptr.p->hashCheckBit) & 1;
+ regTcPtr->localFragptr = regTcPtr->hashValue & 1;
Uint8 TcopyType = fragptr.p->fragCopy;
tfragDistKey = fragptr.p->fragDistributionKey;
if (fragptr.p->fragStatus == Fragrecord::ACTIVE_CREATION) {
@@ -6835,49 +6846,6 @@ Dblqh::scanMarkers(Signal* signal,
* ALL TUPLES IN THE FRAGMENT. TUP PERFORMS THE NECESSARY SEARCH CONDITIONS
* TO ENSURE THAT ONLY VALID TUPLES ARE RETURNED TO THE APPLICATION.
* ------------------------------------------------------------------------- */
-
-void Dblqh::execACC_SCAN_INFO(Signal* signal)
-{
- jamEntry();
- scanptr.i = signal->theData[0];
- c_scanRecordPool.getPtr(scanptr);
- Uint32 length = signal->theData[3];
- ndbrequire(length <= 4);
- accScanInfoEnterLab(signal, &signal->theData[4], length);
-}//Dblqh::execACC_SCAN_INFO()
-
-
-void Dblqh::execACC_SCAN_INFO24(Signal* signal)
-{
- jamEntry();
- scanptr.i = signal->theData[0];
- c_scanRecordPool.getPtr(scanptr);
- Uint32 length = signal->theData[3];
- ndbrequire(length <= 20);
- accScanInfoEnterLab(signal, &signal->theData[4], length);
-}//Dblqh::execACC_SCAN_INFO24()
-
-void Dblqh::accScanInfoEnterLab(Signal* signal,
- Uint32* dataPtr,
- Uint32 length)
-{
- ndbrequire(length != 0);
- if (scanptr.p->scanState == ScanRecord::WAIT_SCAN_KEYINFO) {
- jam();
- if (keyinfoLab(signal, dataPtr, length)) {
- jam();
- nextScanConfLoopLab(signal);
- }//if
- } else {
- ndbrequire(scanptr.p->scanState == ScanRecord::WAIT_COPY_KEYINFO);
- jam();
- if (keyinfoLab(signal, dataPtr, length)) {
- jam();
- copySendTupkeyReqLab(signal);
- }//if
- }//if
-}//Dblqh::accScanInfoEnterLab()
-
/* *************** */
/* ACC_SCANCONF > */
/* *************** */
@@ -7007,6 +6975,7 @@ void Dblqh::execSTORED_PROCREF(Signal* signal)
switch (scanptr.p->scanState) {
case ScanRecord::WAIT_STORED_PROC_SCAN:
jam();
+ scanptr.p->scanCompletedStatus = ZTRUE;
scanptr.p->scanStoredProcId = signal->theData[2];
tcConnectptr.p->errorCode = errorCode;
closeScanLab(signal);
@@ -7212,10 +7181,7 @@ void Dblqh::continueScanReleaseAfterBlockedLab(Signal* signal)
scanptr.p->scanReleaseCounter -1,
false);
signal->theData[2] = NextScanReq::ZSCAN_COMMIT;
- if (! scanptr.p->rangeScan)
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
- else
- sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
}//Dblqh::continueScanReleaseAfterBlockedLab()
/* -------------------------------------------------------------------------
@@ -7243,7 +7209,6 @@ void Dblqh::closeScanRequestLab(Signal* signal)
jam();
tupScanCloseConfLab(signal);
break;
- case ScanRecord::WAIT_SCAN_KEYINFO:
case ScanRecord::WAIT_NEXT_SCAN:
jam();
/* -------------------------------------------------------------------
@@ -7307,6 +7272,7 @@ void Dblqh::closeScanRequestLab(Signal* signal)
* WE ARE STILL WAITING FOR THE ATTRIBUTE INFORMATION THAT
* OBVIOUSLY WILL NOT ARRIVE. WE CAN QUIT IMMEDIATELY HERE.
* --------------------------------------------------------------------- */
+ //XXX jonas this have to be wrong...
releaseOprec(signal);
if (tcConnectptr.p->abortState == TcConnectionrec::NEW_FROM_TC) {
jam();
@@ -7528,6 +7494,7 @@ void Dblqh::execSCAN_FRAGREQ(Signal* signal)
const Uint32 scanLockMode = ScanFragReq::getLockMode(reqinfo);
const Uint8 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
const Uint8 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
+ const Uint8 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
ptrCheckGuard(tabptr, ctabrecFileSize, tablerec);
if(tabptr.p->tableStatus != Tablerec::TABLE_DEFINED){
@@ -7672,18 +7639,13 @@ void Dblqh::continueAfterReceivingAllAiLab(Signal* signal)
req->fragmentNo = tcConnectptr.p->fragmentid;
req->requestInfo = 0;
AccScanReq::setLockMode(req->requestInfo, scanptr.p->scanLockMode);
- AccScanReq::setKeyinfoFlag(req->requestInfo, scanptr.p->scanKeyinfoFlag);
AccScanReq::setReadCommittedFlag(req->requestInfo, scanptr.p->readCommitted);
+ AccScanReq::setDescendingFlag(req->requestInfo, scanptr.p->descending);
req->transId1 = tcConnectptr.p->transid[0];
req->transId2 = tcConnectptr.p->transid[1];
req->savePointId = tcConnectptr.p->savePointId;
- // always use if-stmt to switch (instead of setting a "scan block ref")
- if (! scanptr.p->rangeScan)
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_ACC_SCANREQ, signal,
- AccScanReq::SignalLength, JBB);
- else
- sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_ACC_SCANREQ, signal,
- AccScanReq::SignalLength, JBB);
+ sendSignal(scanptr.p->scanBlockref, GSN_ACC_SCANREQ, signal,
+ AccScanReq::SignalLength, JBB);
}//Dblqh::continueAfterReceivingAllAiLab()
void Dblqh::scanAttrinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
@@ -7777,11 +7739,6 @@ void Dblqh::execSCAN_HBREP(Signal* signal)
}
}
-void Dblqh::sendScanFragRefLateLab(Signal* signal)
-{
-}//Dblqh::sendScanFragRefLateLab()
-
-
void Dblqh::accScanConfScanLab(Signal* signal)
{
AccScanConf * const accScanConf = (AccScanConf *)&signal->theData[0];
@@ -7800,17 +7757,15 @@ void Dblqh::accScanConfScanLab(Signal* signal)
return;
}//if
scanptr.p->scanAccPtr = accScanConf->accPtr;
- Uint32 boundAiLength = tcConnectptr.p->primKeyLen - 4;
if (scanptr.p->rangeScan) {
jam();
- TuxBoundInfo* const req = (TuxBoundInfo*)signal->getDataPtrSend();
+ TuxBoundInfo* req = (TuxBoundInfo*)signal->getDataPtrSend();
req->errorCode = RNIL;
req->tuxScanPtrI = scanptr.p->scanAccPtr;
- req->boundAiLength = boundAiLength;
- if(boundAiLength > 0)
- sendKeyinfoAcc(signal, TuxBoundInfo::SignalLength);
- EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO,
- signal, TuxBoundInfo::SignalLength + boundAiLength);
+ Uint32 len = req->boundAiLength = copy_bounds(req->data, tcConnectptr.p);
+ EXECUTE_DIRECT(DBTUX, GSN_TUX_BOUND_INFO, signal,
+ TuxBoundInfo::SignalLength + len);
+
jamEntry();
if (req->errorCode != 0) {
jam();
@@ -7822,35 +7777,176 @@ void Dblqh::accScanConfScanLab(Signal* signal)
tcConnectptr.p->errorCode = req->errorCode;
}
}
- scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_SCAN;
- signal->theData[0] = tcConnectptr.p->tupConnectrec;
- signal->theData[1] = tcConnectptr.p->tableref;
- signal->theData[2] = scanptr.p->scanSchemaVersion;
- signal->theData[3] = ZSTORED_PROC_SCAN;
-
- signal->theData[4] = scanptr.p->scanAiLength;
- sendSignal(tcConnectptr.p->tcTupBlockref,
- GSN_STORED_PROCREQ, signal, 5, JBB);
- signal->theData[0] = tcConnectptr.p->tupConnectrec;
- AttrbufPtr regAttrinbufptr;
- regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf;
- while (regAttrinbufptr.i != RNIL) {
- ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ scanptr.p->scanState = ScanRecord::WAIT_STORED_PROC_SCAN;
+ if(scanptr.p->scanStoredProcId == RNIL)
+ {
jam();
- Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
- ndbrequire(dataLen != 0);
- // first 3 words already set in STORED_PROCREQ
- MEMCOPY_NO_WORDS(&signal->theData[3],
- &regAttrinbufptr.p->attrbuf[0],
- dataLen);
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ signal->theData[1] = tcConnectptr.p->tableref;
+ signal->theData[2] = scanptr.p->scanSchemaVersion;
+ signal->theData[3] = ZSTORED_PROC_SCAN;
+
+ signal->theData[4] = scanptr.p->scanAiLength;
sendSignal(tcConnectptr.p->tcTupBlockref,
- GSN_ATTRINFO, signal, dataLen + 3, JBB);
- regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
- }//while
- releaseOprec(signal);
+ GSN_STORED_PROCREQ, signal, 5, JBB);
+
+ signal->theData[0] = tcConnectptr.p->tupConnectrec;
+ AttrbufPtr regAttrinbufptr;
+ Uint32 firstAttr = regAttrinbufptr.i = tcConnectptr.p->firstAttrinbuf;
+ while (regAttrinbufptr.i != RNIL) {
+ ptrCheckGuard(regAttrinbufptr, cattrinbufFileSize, attrbuf);
+ jam();
+ Uint32 dataLen = regAttrinbufptr.p->attrbuf[ZINBUF_DATA_LEN];
+ ndbrequire(dataLen != 0);
+ // first 3 words already set in STORED_PROCREQ
+ MEMCOPY_NO_WORDS(&signal->theData[3],
+ &regAttrinbufptr.p->attrbuf[0],
+ dataLen);
+ sendSignal(tcConnectptr.p->tcTupBlockref,
+ GSN_ATTRINFO, signal, dataLen + 3, JBB);
+ regAttrinbufptr.i = regAttrinbufptr.p->attrbuf[ZINBUF_NEXT];
+ c_no_attrinbuf_recs++;
+ }//while
+
+ /**
+ * Release attr info
+ */
+ if(firstAttr != RNIL)
+ {
+ regAttrinbufptr.p->attrbuf[ZINBUF_NEXT] = cfirstfreeAttrinbuf;
+ cfirstfreeAttrinbuf = firstAttr;
+ tcConnectptr.p->firstAttrinbuf = tcConnectptr.p->lastAttrinbuf = RNIL;
+ }
+ }
+ else
+ {
+ jam();
+ storedProcConfScanLab(signal);
+ }
}//Dblqh::accScanConfScanLab()
+#define print_buf(s,idx,len) {\
+ printf(s); Uint32 t2=len; DatabufPtr t3; t3.i = idx; \
+ while(t3.i != RNIL && t2-- > 0){\
+ ptrCheckGuard(t3, cdatabufFileSize, databuf);\
+ printf("%d ", t3.i); t3.i= t3.p->nextDatabuf;\
+ } printf("\n"); }
+
+Uint32
+Dblqh::copy_bounds(Uint32 * dst, TcConnectionrec* tcPtrP)
+{
+ /**
+ * copy_bounds handles multiple bounds by
+ * in the 16 upper bits of the first words (used to specify bound type)
+ * setting the length of this specific bound
+ *
+ */
+
+ DatabufPtr regDatabufptr;
+ Uint32 left = 4 - tcPtrP->m_offset_current_keybuf; // left in buf
+ Uint32 totalLen = tcPtrP->primKeyLen - 4;
+ regDatabufptr.i = tcPtrP->firstTupkeybuf;
+
+ ndbassert(tcPtrP->primKeyLen >= 4);
+ ndbassert(tcPtrP->m_offset_current_keybuf < 4);
+ ndbassert(!(totalLen == 0 && regDatabufptr.i != RNIL));
+ ndbassert(!(totalLen != 0 && regDatabufptr.i == RNIL));
+
+ if(totalLen)
+ {
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ Uint32 sig0 = regDatabufptr.p->data[0];
+ Uint32 sig1 = regDatabufptr.p->data[1];
+ Uint32 sig2 = regDatabufptr.p->data[2];
+ Uint32 sig3 = regDatabufptr.p->data[3];
+
+ switch(left){
+ case 4:
+ * dst++ = sig0;
+ case 3:
+ * dst++ = sig1;
+ case 2:
+ * dst++ = sig2;
+ case 1:
+ * dst++ = sig3;
+ }
+
+ Uint32 first = (* (dst - left)); // First word in range
+
+ // Length of this range
+ Uint8 offset;
+ const Uint32 len = (first >> 16) ? (first >> 16) : totalLen;
+ tcPtrP->m_scan_curr_range_no = (first & 0xFFF0) >> 4;
+ (* (dst - left)) = (first & 0xF); // Remove length & range no
+
+ if(len < left)
+ {
+ offset = len;
+ }
+ else
+ {
+ Databuf * lastP;
+ left = (len - left);
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+
+ while(left >= 4)
+ {
+ left -= 4;
+ lastP = regDatabufptr.p;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ sig0 = regDatabufptr.p->data[0];
+ sig1 = regDatabufptr.p->data[1];
+ sig2 = regDatabufptr.p->data[2];
+ sig3 = regDatabufptr.p->data[3];
+ regDatabufptr.i = regDatabufptr.p->nextDatabuf;
+
+ * dst++ = sig0;
+ * dst++ = sig1;
+ * dst++ = sig2;
+ * dst++ = sig3;
+ }
+
+ if(left > 0)
+ {
+ lastP = regDatabufptr.p;
+ ptrCheckGuard(regDatabufptr, cdatabufFileSize, databuf);
+ sig0 = regDatabufptr.p->data[0];
+ sig1 = regDatabufptr.p->data[1];
+ sig2 = regDatabufptr.p->data[2];
+ sig3 = regDatabufptr.p->data[3];
+ * dst++ = sig0;
+ * dst++ = sig1;
+ * dst++ = sig2;
+ * dst++ = sig3;
+ }
+ else
+ {
+ lastP = regDatabufptr.p;
+ }
+ offset = left & 3;
+ lastP->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = tcPtrP->firstTupkeybuf;
+ ndbassert(cfirstfreeDatabuf != RNIL);
+ }
+
+ if(len == totalLen && regDatabufptr.i != RNIL)
+ {
+ regDatabufptr.p->nextDatabuf = cfirstfreeDatabuf;
+ cfirstfreeDatabuf = regDatabufptr.i;
+ tcPtrP->lastTupkeybuf = regDatabufptr.i = RNIL;
+ ndbassert(cfirstfreeDatabuf != RNIL);
+ }
+
+ tcPtrP->m_offset_current_keybuf = offset;
+ tcPtrP->firstTupkeybuf = regDatabufptr.i;
+ tcPtrP->primKeyLen = 4 + totalLen - len;
+
+ return len;
+ }
+ return totalLen;
+}
+
/* -------------------------------------------------------------------------
* ENTER STORED_PROCCONF WITH
* TC_CONNECTPTR,
@@ -7901,14 +7997,10 @@ void Dblqh::continueFirstScanAfterBlockedLab(Signal* signal)
scanptr.i = tcConnectptr.p->tcScanRec;
c_scanRecordPool.getPtr(scanptr);
scanptr.p->scanState = ScanRecord::WAIT_NEXT_SCAN;
- init_acc_ptr_list(scanptr.p);
signal->theData[0] = scanptr.p->scanAccPtr;
signal->theData[1] = RNIL;
signal->theData[2] = NextScanReq::ZSCAN_NEXT;
- if (! scanptr.p->rangeScan)
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
- else
- sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
return;
}//Dblqh::continueFirstScanAfterBlockedLab()
@@ -7978,10 +8070,8 @@ void Dblqh::continueAfterCheckLcpStopBlocked(Signal* signal)
c_scanRecordPool.getPtr(scanptr);
signal->theData[0] = scanptr.p->scanAccPtr;
signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP;
- if (! scanptr.p->rangeScan)
- EXECUTE_DIRECT(DBACC, GSN_ACC_CHECK_SCAN, signal, 2);
- else
- EXECUTE_DIRECT(DBTUX, GSN_ACC_CHECK_SCAN, signal, 2);
+ EXECUTE_DIRECT(refToBlock(scanptr.p->scanBlockref), GSN_ACC_CHECK_SCAN,
+ signal, 2);
}//Dblqh::continueAfterCheckLcpStopBlocked()
/* -------------------------------------------------------------------------
@@ -8029,7 +8119,10 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
if (scanptr.p->m_curr_batch_size_rows > 0) {
jam();
- scanptr.p->scanCompletedStatus = ZTRUE;
+
+ if((tcConnectptr.p->primKeyLen - 4) == 0)
+ scanptr.p->scanCompletedStatus = ZTRUE;
+
scanptr.p->scanState = ScanRecord::WAIT_SCAN_NEXTREQ;
sendScanFragConf(signal, ZFALSE);
return;
@@ -8068,12 +8161,8 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
signal->theData[0] = scanptr.p->scanAccPtr;
signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP;
- if (! scanptr.p->rangeScan)
- sendSignal(tcConnectptr.p->tcAccBlockref,
- GSN_ACC_CHECK_SCAN, signal, 2, JBB);
- else
- sendSignal(tcConnectptr.p->tcTuxBlockref,
- GSN_ACC_CHECK_SCAN, signal, 2, JBB);
+ sendSignal(scanptr.p->scanBlockref,
+ GSN_ACC_CHECK_SCAN, signal, 2, JBB);
return;
}//if
jam();
@@ -8084,22 +8173,6 @@ void Dblqh::nextScanConfScanLab(Signal* signal)
scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
scanptr.p->scanLocalFragid = nextScanConf->fragId;
- if (scanptr.p->scanKeyinfoFlag) {
- jam();
- tcConnectptr.p->primKeyLen = nextScanConf->keyLength;
- seizeTupkeybuf(signal);
- databufptr.p->data[0] = nextScanConf->key[0];
- databufptr.p->data[1] = nextScanConf->key[1];
- databufptr.p->data[2] = nextScanConf->key[2];
- databufptr.p->data[3] = nextScanConf->key[3];
- if (nextScanConf->keyLength > 4) {
- jam();
- tcConnectptr.p->save1 = 4;
- scanptr.p->scanState = ScanRecord::WAIT_SCAN_KEYINFO;
- return;
- }//if
- }//if
- jam();
nextScanConfLoopLab(signal);
}//Dblqh::nextScanConfScanLab()
@@ -8111,7 +8184,6 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
if (scanptr.p->scanCompletedStatus == ZTRUE) {
jam();
releaseActiveFrag(signal);
- releaseOprec(signal);
if ((scanptr.p->scanLockHold == ZTRUE) &&
(scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
@@ -8133,13 +8205,7 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
if (! scanptr.p->rangeScan) {
tableRef = tcConnectptr.p->tableref;
- if (fragptr.p->fragId == scanptr.p->scanLocalFragid) {
- jam();
- tupFragPtr = fragptr.p->tupFragptr[0];
- } else {
- jam();
- tupFragPtr = fragptr.p->tupFragptr[1];
- }//if
+ tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
} else {
jam();
// for ordered index use primary table
@@ -8147,13 +8213,7 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
tFragPtr.i = fragptr.p->tableFragptr;
ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
tableRef = tFragPtr.p->tabRef;
- if (tFragPtr.p->fragId == scanptr.p->scanLocalFragid) {
- jam();
- tupFragPtr = tFragPtr.p->tupFragptr[0];
- } else {
- jam();
- tupFragPtr = tFragPtr.p->tupFragptr[1];
- }//if
+ tupFragPtr = tFragPtr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
}
{
jam();
@@ -8188,33 +8248,46 @@ void Dblqh::nextScanConfLoopLab(Signal* signal)
* -------------------------------------------------------------------------
* PRECONDITION: SCAN_STATE = WAIT_SCAN_KEYINFO
* ------------------------------------------------------------------------- */
-bool Dblqh::keyinfoLab(Signal* signal, Uint32* dataPtr, Uint32 length)
+void
+Dblqh::keyinfoLab(const Uint32 * src, const Uint32 * end)
{
- tcConnectptr.i = scanptr.p->scanTcrec;
- ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
- Uint32 index = 0;
do {
jam();
- seizeTupkeybuf(signal);
- databufptr.p->data[0] = dataPtr[index];
- databufptr.p->data[1] = dataPtr[index + 1];
- databufptr.p->data[2] = dataPtr[index + 2];
- databufptr.p->data[3] = dataPtr[index + 3];
- index += 4;
- tcConnectptr.p->save1 = tcConnectptr.p->save1 + 4;
- if (tcConnectptr.p->save1 >= tcConnectptr.p->primKeyLen) {
- jam();
- return true;
- }//if
- if (index >= length) {
- jam();
- return false;
- }//if
- } while (index < 20);
- ndbrequire(false);
- return false;
+ seizeTupkeybuf(0);
+ databufptr.p->data[0] = * src ++;
+ databufptr.p->data[1] = * src ++;
+ databufptr.p->data[2] = * src ++;
+ databufptr.p->data[3] = * src ++;
+ } while (src < end);
}//Dblqh::keyinfoLab()
+Uint32
+Dblqh::readPrimaryKeys(ScanRecord *scanP, TcConnectionrec *tcConP, Uint32 *dst)
+{
+ Uint32 tableId = tcConP->tableref;
+ Uint32 fragId = scanP->scanLocalFragid;
+ Uint32 fragPageId = scanP->scanLocalref[0];
+ Uint32 pageIndex = scanP->scanLocalref[1];
+
+ if(scanP->rangeScan)
+ {
+ jam();
+ // for ordered index use primary table
+ FragrecordPtr tFragPtr;
+ tFragPtr.i = fragptr.p->tableFragptr;
+ ptrCheckGuard(tFragPtr, cfragrecFileSize, fragrecord);
+ tableId = tFragPtr.p->tabRef;
+ }
+
+ int ret = c_tup->accReadPk(tableId, fragId, fragPageId, pageIndex, dst, false);
+ if(0)
+ ndbout_c("readPrimaryKeys(table: %d fragment: %d [ %d %d ] -> %d",
+ tableId, fragId, fragPageId, pageIndex, ret);
+ ndbassert(ret > 0);
+
+ return ret;
+}
+
/* -------------------------------------------------------------------------
* ENTER TUPKEYCONF
* -------------------------------------------------------------------------
@@ -8234,7 +8307,6 @@ void Dblqh::scanTupkeyConfLab(Signal* signal)
/* ---------------------------------------------------------------------
* STOP THE SCAN PROCESS IF THIS HAS BEEN REQUESTED.
* --------------------------------------------------------------------- */
- releaseOprec(signal);
if ((scanptr.p->scanLockHold == ZTRUE) &&
(scanptr.p->m_curr_batch_size_rows > 0)) {
jam();
@@ -8248,10 +8320,8 @@ void Dblqh::scanTupkeyConfLab(Signal* signal)
}//if
if (scanptr.p->scanKeyinfoFlag) {
jam();
- sendKeyinfo20(signal, scanptr.p, tcConnectptr.p);
- releaseOprec(signal);
-
- tdata4 += tcConnectptr.p->primKeyLen;// Inform API about keyinfo len aswell
+ // Inform API about keyinfo len aswell
+ tdata4 += sendKeyinfo20(signal, scanptr.p, tcConnectptr.p);
}//if
ndbrequire(scanptr.p->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
scanptr.p->m_curr_batch_size_bytes+= tdata4;
@@ -8335,10 +8405,7 @@ void Dblqh::continueScanAfterBlockedLab(Signal* signal)
signal->theData[0] = scanptr.p->scanAccPtr;
signal->theData[1] = accOpPtr;
signal->theData[2] = scanptr.p->scanFlag;
- if (! scanptr.p->rangeScan)
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3,JBB);
- else
- sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3,JBB);
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
}//Dblqh::continueScanAfterBlockedLab()
/* -------------------------------------------------------------------------
@@ -8353,7 +8420,6 @@ void Dblqh::scanTupkeyRefLab(Signal* signal)
tcConnectptr.p->transactionState = TcConnectionrec::SCAN_STATE_USED;
scanptr.i = tcConnectptr.p->tcScanRec;
releaseActiveFrag(signal);
- releaseOprec(signal);
c_scanRecordPool.getPtr(scanptr);
if (scanptr.p->scanCompletedStatus == ZTRUE) {
/* ---------------------------------------------------------------------
@@ -8461,10 +8527,7 @@ void Dblqh::continueCloseScanAfterBlockedLab(Signal* signal)
signal->theData[0] = scanptr.p->scanAccPtr;
signal->theData[1] = RNIL;
signal->theData[2] = NextScanReq::ZSCAN_CLOSE;
- if (! scanptr.p->rangeScan)
- sendSignal(tcConnectptr.p->tcAccBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
- else
- sendSignal(tcConnectptr.p->tcTuxBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
+ sendSignal(scanptr.p->scanBlockref, GSN_NEXT_SCANREQ, signal, 3, JBB);
}//Dblqh::continueCloseScanAfterBlockedLab()
/* -------------------------------------------------------------------------
@@ -8475,8 +8538,18 @@ void Dblqh::continueCloseScanAfterBlockedLab(Signal* signal)
void Dblqh::accScanCloseConfLab(Signal* signal)
{
tcConnectptr.i = scanptr.p->scanTcrec;
- scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN;
ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec);
+
+ if((tcConnectptr.p->primKeyLen - 4) > 0 &&
+ scanptr.p->scanCompletedStatus != ZTRUE)
+ {
+ jam();
+ releaseActiveFrag(signal);
+ continueAfterReceivingAllAiLab(signal);
+ return;
+ }
+
+ scanptr.p->scanState = ScanRecord::WAIT_DELETE_STORED_PROC_ID_SCAN;
signal->theData[0] = tcConnectptr.p->tupConnectrec;
signal->theData[1] = tcConnectptr.p->tableref;
signal->theData[2] = scanptr.p->scanSchemaVersion;
@@ -8538,7 +8611,9 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
const Uint32 scanLockHold = ScanFragReq::getHoldLockFlag(reqinfo);
const Uint32 keyinfo = ScanFragReq::getKeyinfoFlag(reqinfo);
const Uint32 readCommitted = ScanFragReq::getReadCommittedFlag(reqinfo);
- const Uint32 idx = ScanFragReq::getRangeScanFlag(reqinfo);
+ const Uint32 rangeScan = ScanFragReq::getRangeScanFlag(reqinfo);
+ const Uint32 descending = ScanFragReq::getDescendingFlag(reqinfo);
+ const Uint32 tupScan = ScanFragReq::getTupScanFlag(reqinfo);
const Uint32 attrLen = ScanFragReq::getAttrLen(reqinfo);
const Uint32 scanPrio = ScanFragReq::getScanPrio(reqinfo);
@@ -8556,10 +8631,19 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
scanptr.p->m_max_batch_size_rows = max_rows;
scanptr.p->m_max_batch_size_bytes = max_bytes;
+ if (! rangeScan && ! tupScan)
+ scanptr.p->scanBlockref = tcConnectptr.p->tcAccBlockref;
+ else if (! tupScan)
+ scanptr.p->scanBlockref = tcConnectptr.p->tcTuxBlockref;
+ else
+ scanptr.p->scanBlockref = tcConnectptr.p->tcTupBlockref;
+
scanptr.p->scanErrorCounter = 0;
scanptr.p->scanLockMode = scanLockMode;
scanptr.p->readCommitted = readCommitted;
- scanptr.p->rangeScan = idx;
+ scanptr.p->rangeScan = rangeScan;
+ scanptr.p->descending = descending;
+ scanptr.p->tupScan = tupScan;
scanptr.p->scanState = ScanRecord::SCAN_FREE;
scanptr.p->scanFlag = ZFALSE;
scanptr.p->scanLocalref[0] = 0;
@@ -8569,6 +8653,7 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
scanptr.p->scanNumber = ~0;
scanptr.p->scanApiOpPtr = scanFragReq->clientOpPtr;
scanptr.p->m_last_row = 0;
+ scanptr.p->scanStoredProcId = RNIL;
if (max_rows == 0 || (max_bytes > 0 && max_rows > max_bytes)){
jam();
@@ -8590,8 +8675,8 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
* !idx uses 1 - (MAX_PARALLEL_SCANS_PER_FRAG - 1) = 1-11
* idx uses from MAX_PARALLEL_SCANS_PER_FRAG - MAX = 12-42)
*/
- Uint32 start = (idx ? MAX_PARALLEL_SCANS_PER_FRAG : 1 );
- Uint32 stop = (idx ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1);
+ Uint32 start = (rangeScan || tupScan ? MAX_PARALLEL_SCANS_PER_FRAG : 1 );
+ Uint32 stop = (rangeScan || tupScan ? MAX_PARALLEL_INDEX_SCANS_PER_FRAG : MAX_PARALLEL_SCANS_PER_FRAG - 1);
stop += start;
Uint32 free = tFragPtr.p->m_scanNumberMask.find(start);
@@ -8627,7 +8712,8 @@ Uint32 Dblqh::initScanrec(const ScanFragReq* scanFragReq)
#ifdef TRACE_SCAN_TAKEOVER
ndbout_c("adding (%d %d) table: %d fragId: %d frag.i: %d tableFragptr: %d",
scanptr.p->scanNumber, scanptr.p->fragPtrI,
- tabptr.i, scanFragReq->fragmentNo, fragptr.i, fragptr.p->tableFragptr);
+ tabptr.i, scanFragReq->fragmentNoKeyLen & 0xFFFF,
+ fragptr.i, fragptr.p->tableFragptr);
#endif
c_scanTakeOverHash.add(scanptr);
}
@@ -8663,6 +8749,8 @@ void Dblqh::initScanTc(Signal* signal,
tcConnectptr.p->operation = ZREAD;
tcConnectptr.p->listState = TcConnectionrec::NOT_IN_LIST;
tcConnectptr.p->commitAckMarker = RNIL;
+ tcConnectptr.p->m_offset_current_keybuf = 0;
+ tcConnectptr.p->m_scan_curr_range_no = 0;
tabptr.p->usageCount++;
}//Dblqh::initScanTc()
@@ -8777,23 +8865,17 @@ void Dblqh::releaseScanrec(Signal* signal)
* ------- SEND KEYINFO20 TO API -------
*
* ------------------------------------------------------------------------ */
-void Dblqh::sendKeyinfo20(Signal* signal,
- ScanRecord * scanP,
- TcConnectionrec * tcConP)
+Uint32 Dblqh::sendKeyinfo20(Signal* signal,
+ ScanRecord * scanP,
+ TcConnectionrec * tcConP)
{
ndbrequire(scanP->m_curr_batch_size_rows < MAX_PARALLEL_OP_PER_SCAN);
KeyInfo20 * keyInfo = (KeyInfo20 *)&signal->theData[0];
- DatabufPtr TdataBuf;
- TdataBuf.i = tcConP->firstTupkeybuf;
- Uint32 keyLen = tcConP->primKeyLen;
- const Uint32 dataBufSz = cdatabufFileSize;
-
/**
* Note that this code requires signal->theData to be big enough for
* a entire key
*/
- ndbrequire(keyLen * 4 <= sizeof(signal->theData));
const BlockReference ref = scanP->scanApiBlockref;
const Uint32 scanOp = scanP->m_curr_batch_size_rows;
const Uint32 nodeId = refToNode(ref);
@@ -8806,24 +8888,12 @@ void Dblqh::sendKeyinfo20(Signal* signal,
Uint32 * dst = keyInfo->keyData;
dst += nodeId == getOwnNodeId() ? 0 : KeyInfo20::DataLength;
- /**
- * Copy keydata from data buffer into signal
- *
- */
- for(Uint32 i = 0; i < keyLen; i += 4){
- ptrCheckGuard(TdataBuf, dataBufSz, databuf);
- * dst++ = TdataBuf.p->data[0];
- * dst++ = TdataBuf.p->data[1];
- * dst++ = TdataBuf.p->data[2];
- * dst++ = TdataBuf.p->data[3];
- TdataBuf.i = TdataBuf.p->nextDatabuf;
- }
-
+ Uint32 keyLen = readPrimaryKeys(scanP, tcConP, dst);
+ Uint32 fragId = tcConP->fragmentid;
keyInfo->clientOpPtr = scanP->scanApiOpPtr;
keyInfo->keyLen = keyLen;
- keyInfo->scanInfo_Node = KeyInfo20::setScanInfo(scanOp,
- scanP->scanNumber)+
- (getOwnNodeId() << 20);
+ keyInfo->scanInfo_Node =
+ KeyInfo20::setScanInfo(scanOp, scanP->scanNumber) + (fragId << 20);
keyInfo->transId1 = tcConP->transid[0];
keyInfo->transId2 = tcConP->transid[1];
@@ -8846,7 +8916,7 @@ void Dblqh::sendKeyinfo20(Signal* signal,
MEMCOPY_NO_WORDS(keyInfo->keyData, src, keyLen);
sendSignal(ref, GSN_KEYINFO20, signal,
KeyInfo20::HeaderLength+keyLen, JBB);
- return;
+ return keyLen;
}
LinearSectionPtr ptr[3];
@@ -8854,13 +8924,13 @@ void Dblqh::sendKeyinfo20(Signal* signal,
ptr[0].sz = keyLen;
sendSignal(ref, GSN_KEYINFO20, signal, KeyInfo20::HeaderLength,
JBB, ptr, 1);
- return;
+ return keyLen;
}
EXECUTE_DIRECT(refToBlock(ref), GSN_KEYINFO20, signal,
KeyInfo20::HeaderLength + keyLen);
jamEntry();
- return;
+ return keyLen;
}
/**
@@ -8886,7 +8956,7 @@ void Dblqh::sendKeyinfo20(Signal* signal,
keyInfo->keyData[keyLen] = ref;
sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
KeyInfo20::HeaderLength+keyLen+1, JBB);
- return;
+ return keyLen;
}
keyInfo->keyData[0] = ref;
@@ -8895,7 +8965,7 @@ void Dblqh::sendKeyinfo20(Signal* signal,
ptr[0].sz = keyLen;
sendSignal(routeBlockref, GSN_KEYINFO20_R, signal,
KeyInfo20::HeaderLength+1, JBB, ptr, 1);
- return;
+ return keyLen;
}
/* ------------------------------------------------------------------------
@@ -8944,44 +9014,17 @@ void Dblqh::sendScanFragConf(Signal* signal, Uint32 scanCompleted)
/* FRAGMENT TO A NEW REPLICA OF THE FRAGMENT. IT DOES ALSO SHUT DOWN ALL */
/* CONNECTIONS TO THE FAILED NODE. */
/*---------------------------------------------------------------------------*/
-void Dblqh::calculateHash(Signal* signal)
-{
- DatabufPtr locDatabufptr;
- UintR Ti;
- UintR Tdata0;
- UintR Tdata1;
- UintR Tdata2;
- UintR Tdata3;
- UintR* Tdata32;
- Uint64 Tdata[512];
-
- Tdata32 = (UintR*)&Tdata[0];
-
- Tdata0 = tcConnectptr.p->tupkeyData[0];
- Tdata1 = tcConnectptr.p->tupkeyData[1];
- Tdata2 = tcConnectptr.p->tupkeyData[2];
- Tdata3 = tcConnectptr.p->tupkeyData[3];
- Tdata32[0] = Tdata0;
- Tdata32[1] = Tdata1;
- Tdata32[2] = Tdata2;
- Tdata32[3] = Tdata3;
- locDatabufptr.i = tcConnectptr.p->firstTupkeybuf;
- Ti = 4;
- while (locDatabufptr.i != RNIL) {
- ptrCheckGuard(locDatabufptr, cdatabufFileSize, databuf);
- Tdata0 = locDatabufptr.p->data[0];
- Tdata1 = locDatabufptr.p->data[1];
- Tdata2 = locDatabufptr.p->data[2];
- Tdata3 = locDatabufptr.p->data[3];
- Tdata32[Ti ] = Tdata0;
- Tdata32[Ti + 1] = Tdata1;
- Tdata32[Ti + 2] = Tdata2;
- Tdata32[Ti + 3] = Tdata3;
- locDatabufptr.i = locDatabufptr.p->nextDatabuf;
- Ti += 4;
- }//while
- tcConnectptr.p->hashValue =
- md5_hash((Uint64*)&Tdata32[0], (UintR)tcConnectptr.p->primKeyLen);
+Uint32
+Dblqh::calculateHash(Uint32 tableId, const Uint32* src)
+{
+ jam();
+ Uint64 Tmp[(MAX_KEY_SIZE_IN_WORDS*MAX_XFRM_MULTIPLY) >> 1];
+ Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX];
+ Uint32 keyLen = xfrm_key(tableId, src, (Uint32*)Tmp, sizeof(Tmp) >> 2,
+ keyPartLen);
+ ndbrequire(keyLen);
+
+ return md5_hash(Tmp, keyLen);
}//Dblqh::calculateHash()
/* *************************************** */
@@ -9033,6 +9076,7 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
/* ------------------------------------------------------------------------- */
scanptr.p->m_max_batch_size_rows = 0;
scanptr.p->rangeScan = 0;
+ scanptr.p->tupScan = 0;
seizeTcrec();
/**
@@ -9051,6 +9095,7 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
scanptr.p->scanKeyinfoFlag = 0; // Don't put into hash
scanptr.p->fragPtrI = fragptr.i;
fragptr.p->m_scanNumberMask.clear(NR_ScanNo);
+ scanptr.p->scanBlockref = DBACC_REF;
initScanTc(signal,
0,
@@ -9071,7 +9116,6 @@ void Dblqh::execCOPY_FRAGREQ(Signal* signal)
req->fragmentNo = fragId;
req->requestInfo = 0;
AccScanReq::setLockMode(req->requestInfo, 0);
- AccScanReq::setKeyinfoFlag(req->requestInfo, 1);
AccScanReq::setReadCommittedFlag(req->requestInfo, 0);
req->transId1 = tcConnectptr.p->transid[0];
req->transId2 = tcConnectptr.p->transid[1];
@@ -9224,12 +9268,6 @@ void Dblqh::nextScanConfCopyLab(Signal* signal)
set_acc_ptr_in_scan_record(scanptr.p, 0, nextScanConf->accOperationPtr);
initCopyTc(signal);
- if (tcConnectptr.p->primKeyLen > 4) {
- jam();
- tcConnectptr.p->save1 = 4;
- scanptr.p->scanState = ScanRecord::WAIT_COPY_KEYINFO;
- return;
- }//if
copySendTupkeyReqLab(signal);
return;
}//Dblqh::nextScanConfCopyLab()
@@ -9245,13 +9283,7 @@ void Dblqh::copySendTupkeyReqLab(Signal* signal)
scanptr.p->scanState = ScanRecord::WAIT_TUPKEY_COPY;
fragptr.i = tcConnectptr.p->fragmentptr;
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
- if (fragptr.p->fragId == scanptr.p->scanLocalFragid) {
- jam();
- tupFragPtr = fragptr.p->tupFragptr[0];
- } else {
- jam();
- tupFragPtr = fragptr.p->tupFragptr[1];
- }//if
+ tupFragPtr = fragptr.p->tupFragptr[scanptr.p->scanLocalFragid & 1];
{
TupKeyReq * const tupKeyReq = (TupKeyReq *)signal->getDataPtrSend();
@@ -9326,9 +9358,10 @@ void Dblqh::copyTupkeyConfLab(Signal* signal)
const TupKeyConf * const tupKeyConf = (TupKeyConf *)signal->getDataPtr();
UintR readLength = tupKeyConf->readLength;
-
+ Uint32 tableId = tcConnectptr.p->tableref;
scanptr.i = tcConnectptr.p->tcScanRec;
c_scanRecordPool.getPtr(scanptr);
+ ScanRecord* scanP = scanptr.p;
releaseActiveFrag(signal);
if (tcConnectptr.p->errorCode != 0) {
jam();
@@ -9343,9 +9376,30 @@ void Dblqh::copyTupkeyConfLab(Signal* signal)
closeCopyLab(signal);
return;
}//if
+ TcConnectionrec * tcConP = tcConnectptr.p;
tcConnectptr.p->totSendlenAi = readLength;
tcConnectptr.p->connectState = TcConnectionrec::COPY_CONNECTED;
- calculateHash(signal);
+
+ // Read primary keys (used to get here via scan keyinfo)
+ Uint32* tmp = signal->getDataPtrSend()+24;
+ Uint32 len= tcConnectptr.p->primKeyLen = readPrimaryKeys(scanP, tcConP, tmp);
+
+ // Calculate hash (no need to linearies key)
+ if (g_key_descriptor_pool.getPtr(tableId)->hasCharAttr)
+ {
+ tcConnectptr.p->hashValue = calculateHash(tableId, tmp);
+ }
+ else
+ {
+ tcConnectptr.p->hashValue = md5_hash((Uint64*)tmp, len);
+ }
+
+ // Move into databuffer to make packLqhkeyreqLab happy
+ memcpy(tcConP->tupkeyData, tmp, 4*4);
+ if(len > 4)
+ keyinfoLab(tmp+4, tmp + len);
+ LqhKeyReq::setKeyLen(tcConP->reqinfo, len);
+
/*---------------------------------------------------------------------------*/
// To avoid using up to many operation records in ACC we will increase the
// constant to ensure that we never send more than 40 records at a time.
@@ -9356,7 +9410,7 @@ void Dblqh::copyTupkeyConfLab(Signal* signal)
// records to ensure that node recovery does not fail because of simultaneous
// scanning.
/*---------------------------------------------------------------------------*/
- UintR TnoOfWords = readLength + tcConnectptr.p->primKeyLen;
+ UintR TnoOfWords = readLength + len;
TnoOfWords = TnoOfWords + MAGIC_CONSTANT;
TnoOfWords = TnoOfWords + (TnoOfWords >> 2);
@@ -9678,7 +9732,6 @@ void Dblqh::closeCopyRequestLab(Signal* signal)
scanptr.p->scanErrorCounter++;
switch (scanptr.p->scanState) {
case ScanRecord::WAIT_TUPKEY_COPY:
- case ScanRecord::WAIT_COPY_KEYINFO:
case ScanRecord::WAIT_NEXT_SCAN_COPY:
jam();
/*---------------------------------------------------------------------------*/
@@ -9909,11 +9962,6 @@ void Dblqh::execCOPY_STATEREQ(Signal* signal)
void Dblqh::initCopyTc(Signal* signal)
{
const NextScanConf * const nextScanConf = (NextScanConf *)&signal->theData[0];
- tcConnectptr.p->primKeyLen = nextScanConf->keyLength;
- tcConnectptr.p->tupkeyData[0] = nextScanConf->key[0];
- tcConnectptr.p->tupkeyData[1] = nextScanConf->key[1];
- tcConnectptr.p->tupkeyData[2] = nextScanConf->key[2];
- tcConnectptr.p->tupkeyData[3] = nextScanConf->key[3];
scanptr.p->scanLocalref[0] = nextScanConf->localKey[0];
scanptr.p->scanLocalref[1] = nextScanConf->localKey[1];
scanptr.p->scanLocalFragid = nextScanConf->fragId;
@@ -9922,7 +9970,6 @@ void Dblqh::initCopyTc(Signal* signal)
tcConnectptr.p->opExec = 0; /* NOT INTERPRETED MODE */
tcConnectptr.p->schemaVersion = scanptr.p->scanSchemaVersion;
Uint32 reqinfo = 0;
- LqhKeyReq::setKeyLen(reqinfo, nextScanConf->keyLength);
LqhKeyReq::setLockType(reqinfo, ZINSERT);
LqhKeyReq::setDirtyFlag(reqinfo, 1);
LqhKeyReq::setSimpleFlag(reqinfo, 1);
@@ -12462,6 +12509,22 @@ void Dblqh::lastWriteInFileLab(Signal* signal)
void Dblqh::writePageZeroLab(Signal* signal)
{
+ if (false && logPartPtr.p->logPartState == LogPartRecord::FILE_CHANGE_PROBLEM)
+ {
+ if (logPartPtr.p->firstLogQueue == RNIL)
+ {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::IDLE;
+ ndbout_c("resetting logPartState to IDLE");
+ }
+ else
+ {
+ jam();
+ logPartPtr.p->logPartState = LogPartRecord::ACTIVE;
+ ndbout_c("resetting logPartState to ACTIVE");
+ }
+ }
+
logFilePtr.p->fileChangeState = LogFileRecord::NOT_ONGOING;
/*---------------------------------------------------------------------------*/
/* IT COULD HAVE ARRIVED PAGE WRITES TO THE CURRENT FILE WHILE WE WERE */
@@ -13425,7 +13488,6 @@ void Dblqh::execSR_FRAGIDCONF(Signal* signal)
ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord);
fragptr.p->accFragptr[0] = srFragidConf->fragPtr[0];
fragptr.p->accFragptr[1] = srFragidConf->fragPtr[1];
- fragptr.p->hashCheckBit = srFragidConf->hashCheckBit;
Uint32 noLocFrag = srFragidConf->noLocFrag;
ndbrequire(noLocFrag == 2);
Uint32 fragid[2];
@@ -15528,6 +15590,7 @@ void Dblqh::warningHandlerLab(Signal* signal)
void Dblqh::systemErrorLab(Signal* signal)
{
+ systemError(signal);
progError(0, 0);
/*************************************************************************>*/
/* WE WANT TO INVOKE AN IMMEDIATE ERROR HERE SO WE GET THAT BY */
@@ -18402,8 +18465,65 @@ Dblqh::execDUMP_STATE_ORD(Signal* signal)
return;
}
+ Uint32 arg= dumpState->args[0];
+ if(arg == 2304 || arg == 2305)
+ {
+ jam();
+ Uint32 i;
+ GcpRecordPtr gcp; gcp.i = RNIL;
+ for(i = 0; i<4; i++)
+ {
+ logPartPtr.i = i;
+ ptrCheckGuard(logPartPtr, clogPartFileSize, logPartRecord);
+ ndbout_c("LP %d state: %d WW_Gci: %d gcprec: %d flq: %d currfile: %d tailFileNo: %d logTailMbyte: %d",
+ i,
+ logPartPtr.p->logPartState,
+ logPartPtr.p->waitWriteGciLog,
+ logPartPtr.p->gcprec,
+ logPartPtr.p->firstLogQueue,
+ logPartPtr.p->currentLogfile,
+ logPartPtr.p->logTailFileNo,
+ logPartPtr.p->logTailMbyte);
+
+ if(gcp.i == RNIL && logPartPtr.p->gcprec != RNIL)
+ gcp.i = logPartPtr.p->gcprec;
+ LogFileRecordPtr logFilePtr;
+ Uint32 first= logFilePtr.i= logPartPtr.p->firstLogfile;
+ do
+ {
+ ptrCheckGuard(logFilePtr, clogFileFileSize, logFileRecord);
+ ndbout_c(" file %d(%d) FileChangeState: %d logFileStatus: %d currentMbyte: %d currentFilepage",
+ logFilePtr.p->fileNo,
+ logFilePtr.i,
+ logFilePtr.p->fileChangeState,
+ logFilePtr.p->logFileStatus,
+ logFilePtr.p->currentMbyte,
+ logFilePtr.p->currentFilepage);
+ logFilePtr.i = logFilePtr.p->nextLogFile;
+ } while(logFilePtr.i != first);
+ }
+
+ if(gcp.i != RNIL)
+ {
+ ptrCheckGuard(gcp, cgcprecFileSize, gcpRecord);
+ for(i = 0; i<4; i++)
+ {
+ ndbout_c(" GCP %d file: %d state: %d sync: %d page: %d word: %d",
+ i, gcp.p->gcpFilePtr[i], gcp.p->gcpLogPartState[i],
+ gcp.p->gcpSyncReady[i],
+ gcp.p->gcpPageNo[i],
+ gcp.p->gcpWordNo[i]);
+ }
+ }
+ if(arg== 2305)
+ {
+ progError(__LINE__, ERR_SYSTEM_ERROR,
+ "Shutting down node due to failed handling of GCP_SAVEREQ");
+
+ }
+ }
}//Dblqh::execDUMP_STATE_ORD()
void Dblqh::execSET_VAR_REQ(Signal* signal)
diff --git a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
index 61afef30b43..bf9f421a0e3 100644
--- a/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
+++ b/ndb/src/kernel/blocks/dbtc/Dbtc.hpp
@@ -561,7 +561,7 @@ public:
Uint32 expectedTransIdAI;
AttributeBuffer transIdAI; // For accumulating TransId_AI
- TcIndxReq tcIndxReq;
+ TcKeyReq tcIndxReq;
UintR connectionIndex;
UintR indexReadTcConnect; //
@@ -871,11 +871,11 @@ public:
UintR hashValue; /* THE HASH VALUE USED TO LOCATE FRAGMENT */
Uint8 distributionKeyIndicator;
- Uint8 distributionGroupIndicator;
- Uint8 distributionGroupType;
+ Uint8 m_special_hash; // collation or distribution key
+ Uint8 unused2;
Uint8 lenAiInTckeyreq; /* LENGTH OF ATTRIBUTE INFORMATION IN TCKEYREQ */
- Uint8 distributionKey;
+ Uint8 fragmentDistributionKey; /* DIH generation no */
/**
* EXECUTION MODE OF OPERATION
@@ -899,16 +899,16 @@ public:
// Second 16 byte cache line in second 64 byte cache
// line. Diverse use.
//---------------------------------------------------
- UintR distributionGroup;
+ UintR distributionKey;
UintR nextCacheRec;
- UintR distributionKeySize;
+ UintR unused3;
Uint32 scanInfo;
//---------------------------------------------------
// Third 16 byte cache line in second 64
// byte cache line. Diverse use.
//---------------------------------------------------
- Uint32 scanNode;
+ Uint32 unused4;
Uint32 scanTakeOverInd;
UintR firstKeybuf; /* POINTER THE LINKED LIST OF KEY BUFFERS */
UintR lastKeybuf; /* VARIABLE POINTING TO THE LAST KEY BUFFER */
@@ -946,6 +946,7 @@ public:
typedef Ptr<HostRecord> HostRecordPtr;
/* *********** TABLE RECORD ********************************************* */
+
/********************************************************/
/* THIS RECORD CONTAINS THE CURRENT SCHEMA VERSION OF */
/* ALL TABLES IN THE SYSTEM. */
@@ -956,14 +957,18 @@ public:
Uint8 dropping;
Uint8 tableType;
Uint8 storedTable;
+
+ Uint8 noOfKeyAttr;
+ Uint8 hasCharAttr;
+ Uint8 noOfDistrKeys;
bool checkTable(Uint32 schemaVersion) const {
return enabled && !dropping &&
(table_version_major(schemaVersion) == table_version_major(currentSchemaVersion));
}
-
+
Uint32 getErrorCode(Uint32 schemaVersion) const;
-
+
struct DropTable {
Uint32 senderRef;
Uint32 senderData;
@@ -1142,7 +1147,7 @@ public:
Uint32 nextScan;
// Length of expected attribute information
- Uint32 scanAiLength;
+ union { Uint32 scanAiLength; Uint32 m_booked_fragments_count; };
Uint32 scanKeyLen;
@@ -1411,6 +1416,10 @@ private:
void gcpTcfinished(Signal* signal);
void handleGcp(Signal* signal);
void hash(Signal* signal);
+ bool handle_special_hash(Uint32 dstHash[4],
+ Uint32* src, Uint32 srcLen,
+ Uint32 tabPtrI, bool distr);
+
void initApiConnect(Signal* signal);
void initApiConnectRec(Signal* signal,
ApiConnectRecord * const regApiPtr,
@@ -1663,7 +1672,7 @@ private:
c_scan_count = c_range_scan_count = 0;
}
Uint32 report(Signal* signal){
- signal->theData[0] = EventReport::TransReportCounters;
+ signal->theData[0] = NDB_LE_TransReportCounters;
signal->theData[1] = ctransCount;
signal->theData[2] = ccommitCount;
signal->theData[3] = creadCount;
diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
index 04b40dafcb5..e61ec45b18d 100644
--- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
+++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp
@@ -20,6 +20,7 @@
#include "md5_hash.hpp"
#include <RefConvert.hpp>
#include <ndb_limits.h>
+#include <my_sys.h>
#include <signaldata/EventReport.hpp>
#include <signaldata/TcKeyReq.hpp>
@@ -63,6 +64,9 @@
#include <signaldata/PackedSignal.hpp>
#include <AttributeHeader.hpp>
#include <signaldata/DictTabInfo.hpp>
+#include <AttributeDescriptor.hpp>
+#include <SectionReader.hpp>
+#include <KeyDescriptor.hpp>
#include <NdbOut.hpp>
#include <DebuggerNames.hpp>
@@ -313,6 +317,10 @@ void Dbtc::execREAD_NODESREF(Signal* signal)
void Dbtc::execTC_SCHVERREQ(Signal* signal)
{
jamEntry();
+ if (! assembleFragments(signal)) {
+ jam();
+ return;
+ }
tabptr.i = signal->theData[0];
ptrCheckGuard(tabptr, ctabrecFilesize, tableRecord);
tabptr.p->currentSchemaVersion = signal->theData[1];
@@ -320,10 +328,18 @@ void Dbtc::execTC_SCHVERREQ(Signal* signal)
BlockReference retRef = signal->theData[3];
tabptr.p->tableType = (Uint8)signal->theData[4];
BlockReference retPtr = signal->theData[5];
+ Uint32 noOfKeyAttr = signal->theData[6];
+ ndbrequire(noOfKeyAttr <= MAX_ATTRIBUTES_IN_INDEX);
+
+ const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tabptr.i);
+ ndbrequire(noOfKeyAttr == desc->noOfKeyAttr);
ndbrequire(tabptr.p->enabled == false);
tabptr.p->enabled = true;
tabptr.p->dropping = false;
+ tabptr.p->noOfKeyAttr = desc->noOfKeyAttr;
+ tabptr.p->hasCharAttr = desc->hasCharAttr;
+ tabptr.p->noOfDistrKeys = desc->noOfDistrKeys;
signal->theData[0] = tabptr.i;
signal->theData[1] = retPtr;
@@ -2224,11 +2240,10 @@ void Dbtc::hash(Signal* signal)
UintR Tdata2;
UintR Tdata3;
UintR* Tdata32;
- Uint64 Tdata[512];
-
+
CacheRecord * const regCachePtr = cachePtr.p;
- Tdata32 = (UintR*)&Tdata[0];
-
+ Tdata32 = signal->theData;
+
Tdata0 = regCachePtr->keydata[0];
Tdata1 = regCachePtr->keydata[1];
Tdata2 = regCachePtr->keydata[2];
@@ -2254,31 +2269,71 @@ void Dbtc::hash(Signal* signal)
ti += 4;
}//while
}//if
- UintR ThashValue;
- UintR TdistrHashValue;
- ThashValue = md5_hash((Uint64*)&Tdata32[0], (UintR)regCachePtr->keylen);
- if (regCachePtr->distributionGroupIndicator == 1) {
- if (regCachePtr->distributionGroupType == 1) {
- jam();
- TdistrHashValue = (regCachePtr->distributionGroup << 6);
- } else {
- jam();
- Tdata32[0] = regCachePtr->distributionGroup;
- TdistrHashValue = md5_hash((Uint64*)&Tdata32[0], (UintR)1);
- }//if
- } else if (regCachePtr->distributionKeyIndicator == 1) {
+ UintR keylen = (UintR)regCachePtr->keylen;
+ Uint32 distKey = regCachePtr->distributionKeyIndicator;
+
+ Uint32 tmp[4];
+ if(!regCachePtr->m_special_hash)
+ {
+ md5_hash(tmp, (Uint64*)&Tdata32[0], keylen);
+ }
+ else
+ {
+ handle_special_hash(tmp, Tdata32, keylen, regCachePtr->tableref, !distKey);
+ }
+
+ thashValue = tmp[0];
+ if (distKey){
jam();
- TdistrHashValue = md5_hash((Uint64*)&Tdata32[0],
- (UintR)regCachePtr->distributionKeySize);
+ tdistrHashValue = regCachePtr->distributionKey;
} else {
jam();
- TdistrHashValue = ThashValue;
+ tdistrHashValue = tmp[1];
}//if
- thashValue = ThashValue;
- tdistrHashValue = TdistrHashValue;
}//Dbtc::hash()
+bool
+Dbtc::handle_special_hash(Uint32 dstHash[4], Uint32* src, Uint32 srcLen,
+ Uint32 tabPtrI,
+ bool distr)
+{
+ Uint64 Tmp[MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY];
+ const TableRecord* tabPtrP = &tableRecord[tabPtrI];
+ const bool hasCharAttr = tabPtrP->hasCharAttr;
+ const bool hasDistKeys = tabPtrP->noOfDistrKeys > 0;
+
+ Uint32 *dst = (Uint32*)Tmp;
+ Uint32 dstPos = 0;
+ Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX];
+ Uint32 * keyPartLenPtr;
+ if(hasCharAttr)
+ {
+ keyPartLenPtr = keyPartLen;
+ dstPos = xfrm_key(tabPtrI, src, dst, sizeof(Tmp) >> 2, keyPartLenPtr);
+ ndbrequire(dstPos);
+ }
+ else
+ {
+ dst = src;
+ dstPos = srcLen;
+ keyPartLenPtr = 0;
+ }
+
+ md5_hash(dstHash, (Uint64*)dst, dstPos);
+
+ if(distr && hasDistKeys)
+ {
+ jam();
+
+ Uint32 tmp[4];
+ Uint32 len = create_distr_key(tabPtrI, dst, keyPartLenPtr);
+ md5_hash(tmp, (Uint64*)dst, len);
+ dstHash[1] = tmp[1];
+ }
+ return true; // success
+}
+
/*
INIT_API_CONNECT_REC
---------------------------
@@ -2670,18 +2725,13 @@ void Dbtc::execTCKEYREQ(Signal* signal)
Uint8 TSimpleFlag = tcKeyReq->getSimpleFlag(Treqinfo);
Uint8 TDirtyFlag = tcKeyReq->getDirtyFlag(Treqinfo);
Uint8 TInterpretedFlag = tcKeyReq->getInterpretedFlag(Treqinfo);
- Uint8 TDistrGroupFlag = tcKeyReq->getDistributionGroupFlag(Treqinfo);
- Uint8 TDistrGroupTypeFlag = tcKeyReq->getDistributionGroupTypeFlag(Treqinfo);
Uint8 TDistrKeyFlag = tcKeyReq->getDistributionKeyFlag(Treqinfo);
Uint8 TexecuteFlag = TexecFlag;
regCachePtr->opSimple = TSimpleFlag;
regCachePtr->opExec = TInterpretedFlag;
regTcPtr->dirtyOp = TDirtyFlag;
-
- regCachePtr->distributionGroupIndicator = TDistrGroupFlag;
- regCachePtr->distributionGroupType = TDistrGroupTypeFlag;
- regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
+ regCachePtr->distributionKeyIndicator = TDistrKeyFlag;
//-------------------------------------------------------------
// The next step is to read the upto three conditional words.
@@ -2690,17 +2740,14 @@ void Dbtc::execTCKEYREQ(Signal* signal)
Uint32* TOptionalDataPtr = (Uint32*)&tcKeyReq->scanInfo;
{
Uint32 TDistrGHIndex = tcKeyReq->getScanIndFlag(Treqinfo);
- Uint32 TDistrKeyIndex = TDistrGHIndex + TDistrGroupFlag;
+ Uint32 TDistrKeyIndex = TDistrGHIndex;
- Uint32 TscanNode = tcKeyReq->getTakeOverScanNode(TOptionalDataPtr[0]);
Uint32 TscanInfo = tcKeyReq->getTakeOverScanInfo(TOptionalDataPtr[0]);
regCachePtr->scanTakeOverInd = TDistrGHIndex;
- regCachePtr->scanNode = TscanNode;
regCachePtr->scanInfo = TscanInfo;
- regCachePtr->distributionGroup = TOptionalDataPtr[TDistrGHIndex];
- regCachePtr->distributionKeySize = TOptionalDataPtr[TDistrKeyIndex];
+ regCachePtr->distributionKey = TOptionalDataPtr[TDistrKeyIndex];
TkeyIndex = TDistrKeyIndex + TDistrKeyFlag;
}
@@ -2734,7 +2781,8 @@ void Dbtc::execTCKEYREQ(Signal* signal)
regCachePtr->keylen = TkeyLength;
regCachePtr->lenAiInTckeyreq = titcLenAiInTckeyreq;
regCachePtr->currReclenAi = titcLenAiInTckeyreq;
-
+ regCachePtr->m_special_hash =
+ localTabptr.p->hasCharAttr | (localTabptr.p->noOfDistrKeys > 0);
Tdata1 = TAIDataPtr[0];
Tdata2 = TAIDataPtr[1];
Tdata3 = TAIDataPtr[2];
@@ -2937,6 +2985,15 @@ void Dbtc::tckeyreq050Lab(Signal* signal)
execDIGETNODESREF(signal);
return;
}
+
+ if(ERROR_INSERTED(8050) && signal->theData[3] != getOwnNodeId())
+ {
+ ndbassert(false);
+ signal->theData[1] = 626;
+ execDIGETNODESREF(signal);
+ return;
+ }
+
/****************>>*/
/* DIGETNODESCONF >*/
/* ***************>*/
@@ -2961,7 +3018,7 @@ void Dbtc::tckeyreq050Lab(Signal* signal)
tnoOfBackup = tnodeinfo & 3;
tnoOfStandby = (tnodeinfo >> 8) & 3;
- regCachePtr->distributionKey = (tnodeinfo >> 16) & 255;
+ regCachePtr->fragmentDistributionKey = (tnodeinfo >> 16) & 255;
if (Toperation == ZREAD) {
if (Tdirty == 1) {
jam();
@@ -3051,8 +3108,6 @@ void Dbtc::attrinfoDihReceivedLab(Signal* signal)
CacheRecord * const regCachePtr = cachePtr.p;
TcConnectRecord * const regTcPtr = tcConnectptr.p;
Uint16 Tnode = regTcPtr->tcNodedata[0];
- Uint16 TscanTakeOverInd = regCachePtr->scanTakeOverInd;
- Uint16 TscanNode = regCachePtr->scanNode;
TableRecordPtr localTabptr;
localTabptr.i = regCachePtr->tableref;
@@ -3065,11 +3120,6 @@ void Dbtc::attrinfoDihReceivedLab(Signal* signal)
TCKEY_abort(signal, 58);
return;
}
- if ((TscanTakeOverInd == 1) &&
- (Tnode != TscanNode)) {
- TCKEY_abort(signal, 15);
- return;
- }//if
arrGuard(Tnode, MAX_NDB_NODES);
packLqhkeyreq(signal, calcLqhBlockRef(Tnode));
}//Dbtc::attrinfoDihReceivedLab()
@@ -3131,7 +3181,7 @@ void Dbtc::sendlqhkeyreq(Signal* signal,
/* ---------------------------------------------------------------------- */
// Bit16 == 0 since StoredProcedures are not yet supported.
/* ---------------------------------------------------------------------- */
- LqhKeyReq::setDistributionKey(tslrAttrLen, regCachePtr->distributionKey);
+ LqhKeyReq::setDistributionKey(tslrAttrLen, regCachePtr->fragmentDistributionKey);
LqhKeyReq::setScanTakeOverFlag(tslrAttrLen, regCachePtr->scanTakeOverInd);
Tdata10 = 0;
@@ -4539,8 +4589,9 @@ void Dbtc::sendApiCommit(Signal* signal)
}
commitConf->transId1 = regApiPtr->transid[0];
commitConf->transId2 = regApiPtr->transid[1];
-
- sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal, 3, JBB);
+ commitConf->gci = regApiPtr->globalcheckpointid;
+ sendSignal(regApiPtr->ndbapiBlockref, GSN_TC_COMMITCONF, signal,
+ TcCommitConf::SignalLength, JBB);
} else if (regApiPtr->returnsignal == RS_NO_RETURN) {
jam();
} else {
@@ -5233,8 +5284,9 @@ void Dbtc::execTC_COMMITREQ(Signal* signal)
commitConf->apiConnectPtr = apiConnectPtr;
commitConf->transId1 = transId1;
commitConf->transId2 = transId2;
-
- sendSignal(apiBlockRef, GSN_TC_COMMITCONF, signal, 3, JBB);
+ commitConf->gci = 0;
+ sendSignal(apiBlockRef, GSN_TC_COMMITCONF, signal,
+ TcCommitConf::SignalLength, JBB);
regApiPtr->returnsignal = RS_NO_RETURN;
releaseAbortResources(signal);
@@ -8490,7 +8542,7 @@ void Dbtc::systemErrorLab(Signal* signal)
void Dbtc::execSCAN_TABREQ(Signal* signal)
{
const ScanTabReq * const scanTabReq = (ScanTabReq *)&signal->theData[0];
- const Uint32 reqinfo = scanTabReq->requestInfo;
+ const Uint32 ri = scanTabReq->requestInfo;
const Uint32 aiLength = (scanTabReq->attrLenKeyLen & 0xFFFF);
const Uint32 keyLen = scanTabReq->attrLenKeyLen >> 16;
const Uint32 schemaVersion = scanTabReq->tableSchemaVersion;
@@ -8500,8 +8552,8 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
const Uint32 buddyPtr = (tmpXX == 0xFFFFFFFF ? RNIL : tmpXX);
Uint32 currSavePointId = 0;
- Uint32 scanConcurrency = scanTabReq->getParallelism(reqinfo);
- Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(reqinfo);
+ Uint32 scanConcurrency = scanTabReq->getParallelism(ri);
+ Uint32 noOprecPerFrag = ScanTabReq::getScanBatch(ri);
Uint32 scanParallel = scanConcurrency;
Uint32 errCode;
ScanRecordPtr scanptr;
@@ -8585,6 +8637,8 @@ void Dbtc::execSCAN_TABREQ(Signal* signal)
seizeCacheRecord(signal);
cachePtr.p->keylen = keyLen;
cachePtr.p->save1 = 0;
+ cachePtr.p->distributionKey = scanTabReq->distributionKey;
+ cachePtr.p->distributionKeyIndicator= ScanTabReq::getDistributionKeyFlag(ri);
scanptr = seizeScanrec(signal);
ndbrequire(transP->apiScanRec == RNIL);
@@ -8671,6 +8725,7 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr,
UintR scanParallel,
UintR noOprecPerFrag)
{
+ const UintR ri = scanTabReq->requestInfo;
scanptr.p->scanTcrec = tcConnectptr.i;
scanptr.p->scanApiRec = apiConnectptr.i;
scanptr.p->scanAiLength = scanTabReq->attrLenKeyLen & 0xFFFF;
@@ -8683,12 +8738,13 @@ void Dbtc::initScanrec(ScanRecordPtr scanptr,
scanptr.p->batch_size_rows = noOprecPerFrag;
Uint32 tmp = 0;
- const UintR ri = scanTabReq->requestInfo;
ScanFragReq::setLockMode(tmp, ScanTabReq::getLockMode(ri));
ScanFragReq::setHoldLockFlag(tmp, ScanTabReq::getHoldLockFlag(ri));
ScanFragReq::setKeyinfoFlag(tmp, ScanTabReq::getKeyinfoFlag(ri));
ScanFragReq::setReadCommittedFlag(tmp,ScanTabReq::getReadCommittedFlag(ri));
ScanFragReq::setRangeScanFlag(tmp, ScanTabReq::getRangeScanFlag(ri));
+ ScanFragReq::setDescendingFlag(tmp, ScanTabReq::getDescendingFlag(ri));
+ ScanFragReq::setTupScanFlag(tmp, ScanTabReq::getTupScanFlag(ri));
ScanFragReq::setAttrLen(tmp, scanTabReq->attrLenKeyLen & 0xFFFF);
scanptr.p->scanRequestInfo = tmp;
@@ -8798,14 +8854,43 @@ void Dbtc::diFcountReqLab(Signal* signal, ScanRecordPtr scanptr)
return;
}
+ scanptr.p->scanNextFragId = 0;
+ scanptr.p->m_booked_fragments_count= 0;
scanptr.p->scanState = ScanRecord::WAIT_FRAGMENT_COUNT;
- /*************************************************
- * THE FIRST STEP TO RECEIVE IS SUCCESSFULLY COMPLETED.
- * WE MUST FIRST GET THE NUMBER OF FRAGMENTS IN THE TABLE.
- ***************************************************/
- signal->theData[0] = tcConnectptr.p->dihConnectptr;
- signal->theData[1] = scanptr.p->scanTableref;
- sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, 2, JBB);
+
+ if(!cachePtr.p->distributionKeyIndicator)
+ {
+ jam();
+ /*************************************************
+ * THE FIRST STEP TO RECEIVE IS SUCCESSFULLY COMPLETED.
+ * WE MUST FIRST GET THE NUMBER OF FRAGMENTS IN THE TABLE.
+ ***************************************************/
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = scanptr.p->scanTableref;
+ sendSignal(cdihblockref, GSN_DI_FCOUNTREQ, signal, 2, JBB);
+ }
+ else
+ {
+ signal->theData[0] = tcConnectptr.p->dihConnectptr;
+ signal->theData[1] = tabPtr.i;
+ signal->theData[2] = cachePtr.p->distributionKey;
+ EXECUTE_DIRECT(DBDIH, GSN_DIGETNODESREQ, signal, 3);
+ UintR TerrorIndicator = signal->theData[0];
+ jamEntry();
+ if (TerrorIndicator != 0) {
+ signal->theData[0] = tcConnectptr.i;
+ //signal->theData[1] Contains error
+ execDI_FCOUNTREF(signal);
+ return;
+ }
+
+ UintR Tdata1 = signal->theData[1];
+ scanptr.p->scanNextFragId = Tdata1;
+
+ signal->theData[0] = tcConnectptr.i;
+ signal->theData[1] = 1; // Frag count
+ execDI_FCOUNTCONF(signal);
+ }
return;
}//Dbtc::diFcountReqLab()
@@ -8822,7 +8907,7 @@ void Dbtc::execDI_FCOUNTCONF(Signal* signal)
{
jamEntry();
tcConnectptr.i = signal->theData[0];
- const UintR tfragCount = signal->theData[1];
+ Uint32 tfragCount = signal->theData[1];
ptrCheckGuard(tcConnectptr, ctcConnectFilesize, tcConnectRecord);
apiConnectptr.i = tcConnectptr.p->apiConnect;
ptrCheckGuard(apiConnectptr, capiConnectFilesize, apiConnectRecord);
@@ -8856,24 +8941,17 @@ void Dbtc::execDI_FCOUNTCONF(Signal* signal)
return;
}
- if(scanptr.p->scanParallel > tfragCount){
- jam();
- abortScanLab(signal, scanptr, ZTOO_HIGH_CONCURRENCY_ERROR);
- return;
- }
-
scanptr.p->scanParallel = tfragCount;
scanptr.p->scanNoFrag = tfragCount;
- scanptr.p->scanNextFragId = 0;
scanptr.p->scanState = ScanRecord::RUNNING;
setApiConTimer(apiConnectptr.i, 0, __LINE__);
updateBuddyTimer(apiConnectptr);
ScanFragRecPtr ptr;
- ScanFragList list(c_scan_frag_pool,
- scanptr.p->m_running_scan_frags);
- for (list.first(ptr); !ptr.isNull(); list.next(ptr)){
+ ScanFragList list(c_scan_frag_pool, scanptr.p->m_running_scan_frags);
+ for (list.first(ptr); !ptr.isNull() && tfragCount;
+ list.next(ptr), tfragCount--){
jam();
ptr.p->lqhBlockref = 0;
@@ -8888,6 +8966,22 @@ void Dbtc::execDI_FCOUNTCONF(Signal* signal)
signal->theData[3] = ptr.p->scanFragId;
sendSignal(cdihblockref, GSN_DIGETPRIMREQ, signal, 4, JBB);
}//for
+
+ ScanFragList queued(c_scan_frag_pool, scanptr.p->m_queued_scan_frags);
+ for (; !ptr.isNull();)
+ {
+ ptr.p->m_ops = 0;
+ ptr.p->m_totalLen = 0;
+ ptr.p->m_scan_frag_conf_status = 1;
+ ptr.p->scanFragState = ScanFragRec::QUEUED_FOR_DELIVERY;
+ ptr.p->stopFragTimer();
+
+ ScanFragRecPtr tmp = ptr;
+ list.next(ptr);
+ list.remove(tmp);
+ queued.add(tmp);
+ scanptr.p->m_queued_count++;
+ }
}//Dbtc::execDI_FCOUNTCONF()
/******************************************************
@@ -9238,7 +9332,7 @@ void Dbtc::execSCAN_FRAGCONF(Signal* signal)
}
if(noCompletedOps == 0 && status != 0 &&
- scanptr.p->scanNextFragId < scanptr.p->scanNoFrag){
+ scanptr.p->scanNextFragId+scanptr.p->m_booked_fragments_count < scanptr.p->scanNoFrag){
/**
* Start on next fragment
*/
@@ -9408,6 +9502,9 @@ void Dbtc::execSCAN_NEXTREQ(Signal* signal)
*/
jam();
ndbrequire(scanptr.p->scanNextFragId < scanptr.p->scanNoFrag);
+ jam();
+ ndbassert(scanptr.p->m_booked_fragments_count);
+ scanptr.p->m_booked_fragments_count--;
scanFragptr.p->scanFragState = ScanFragRec::WAIT_GET_PRIMCONF;
tcConnectptr.i = scanptr.p->scanTcrec;
@@ -9649,8 +9746,9 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
jam();
ops += 21;
}
-
- Uint32 left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId;
+
+ int left = scanPtr.p->scanNoFrag - scanPtr.p->scanNextFragId;
+ Uint32 booked = scanPtr.p->m_booked_fragments_count;
ScanTabConf * conf = (ScanTabConf*)&signal->theData[0];
conf->apiConnectPtr = apiConnectptr.p->ndbapiConnect;
@@ -9666,8 +9764,10 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
ScanFragRecPtr curr = ptr; // Remove while iterating...
queued.next(ptr);
- bool done = curr.p->m_scan_frag_conf_status && --left;
-
+ bool done = curr.p->m_scan_frag_conf_status && (left <= (int)booked);
+ if(curr.p->m_scan_frag_conf_status)
+ booked++;
+
* ops++ = curr.p->m_apiPtr;
* ops++ = done ? RNIL : curr.i;
* ops++ = (curr.p->m_totalLen << 10) + curr.p->m_ops;
@@ -9685,8 +9785,10 @@ void Dbtc::sendScanTabConf(Signal* signal, ScanRecordPtr scanPtr) {
}
}
+ scanPtr.p->m_booked_fragments_count = booked;
if(scanPtr.p->m_delivered_scan_frags.isEmpty() &&
- scanPtr.p->m_running_scan_frags.isEmpty()){
+ scanPtr.p->m_running_scan_frags.isEmpty())
+ {
conf->requestInfo = op_count | ScanTabConf::EndOfData;
releaseScanResources(scanPtr);
}
@@ -9998,6 +10100,9 @@ void Dbtc::initTable(Signal* signal)
tabptr.p->tableType = 0;
tabptr.p->enabled = false;
tabptr.p->dropping = false;
+ tabptr.p->noOfKeyAttr = 0;
+ tabptr.p->hasCharAttr = 0;
+ tabptr.p->noOfDistrKeys = 0;
}//for
}//Dbtc::initTable()
@@ -11144,7 +11249,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
{
jamEntry();
- TcIndxReq * const tcIndxReq = (TcIndxReq *)signal->getDataPtr();
+ TcKeyReq * const tcIndxReq = (TcKeyReq *)signal->getDataPtr();
const UintR TapiIndex = tcIndxReq->apiConnectPtr;
Uint32 tcIndxRequestInfo = tcIndxReq->requestInfo;
Uint32 startFlag = tcIndxReq->getStartFlag(tcIndxRequestInfo);
@@ -11195,7 +11300,7 @@ void Dbtc::execTCINDXREQ(Signal* signal)
// If operation is readTupleExclusive or updateTuple then read index
// table with exclusive lock
- Uint32 indexLength = TcIndxReq::getIndexLength(tcIndxRequestInfo);
+ Uint32 indexLength = TcKeyReq::getKeyLength(tcIndxRequestInfo);
Uint32 attrLength = tcIndxReq->attrLen;
indexOp->expectedKeyInfo = indexLength;
Uint32 includedIndexLength = MIN(indexLength, indexBufSize);
@@ -11500,14 +11605,14 @@ void Dbtc::execTCKEYCONF(Signal* signal)
case(IOS_NOOP): {
jam();
// Should never happen, abort
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
return;
}
case(IOS_INDEX_ACCESS): {
@@ -11519,14 +11624,14 @@ void Dbtc::execTCKEYCONF(Signal* signal)
case(IOS_INDEX_ACCESS_WAIT_FOR_TRANSID_AI): {
jam();
// Double TCKEYCONF, should never happen, abort
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
return;
}
case(IOS_INDEX_ACCESS_WAIT_FOR_TCKEYCONF): {
@@ -11612,8 +11717,8 @@ void Dbtc::execTCKEYREF(Signal* signal)
// Send TCINDXREF
jam();
- TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
ndbassert(regApiPtr->noIndexOp);
regApiPtr->noIndexOp--; // Decrease count
@@ -11622,7 +11727,7 @@ void Dbtc::execTCKEYREF(Signal* signal)
tcIndxRef->transId[1] = tcKeyRef->transId[1];
tcIndxRef->errorCode = tcKeyRef->errorCode;
sendSignal(regApiPtr->ndbapiBlockref,
- GSN_TCINDXREF, signal, TcIndxRef::SignalLength, JBB);
+ GSN_TCINDXREF, signal, TcKeyRef::SignalLength, JBB);
return;
}
}
@@ -11689,14 +11794,14 @@ void Dbtc::execTRANSID_AI(Signal* signal)
signal->getLength() - TransIdAI::HeaderLength)) {
jam();
// Failed to allocate space for TransIdAI
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
return;
}
@@ -11704,14 +11809,14 @@ void Dbtc::execTRANSID_AI(Signal* signal)
case(IOS_NOOP): {
jam();
// Should never happen, abort
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
return;
break;
}
@@ -11732,14 +11837,14 @@ void Dbtc::execTRANSID_AI(Signal* signal)
#endif
/*
// Too many TRANSID_AI
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndexRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
*/
break;
}
@@ -11757,14 +11862,14 @@ void Dbtc::execTRANSID_AI(Signal* signal)
case(IOS_INDEX_OPERATION): {
// Should never receive TRANSID_AI in this state!!
jam();
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
return;
}
}
@@ -11807,24 +11912,24 @@ void Dbtc::readIndexTable(Signal* signal,
(Operation_t)TcKeyReq::getOperationType(tcKeyRequestInfo);
// Find index table
- if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.indexId)) == NULL) {
+ if ((indexData = c_theIndexes.getPtr(indexOp->tcIndxReq.tableId)) == NULL) {
jam();
// Failed to find index record
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4000;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
return;
}
tcKeyReq->transId1 = transId1;
tcKeyReq->transId2 = transId2;
tcKeyReq->tableId = indexData->indexId;
tcKeyLength += MIN(keyLength, keyBufSize);
- tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.indexSchemaVersion;
+ tcKeyReq->tableSchemaVersion = indexOp->tcIndxReq.tableSchemaVersion;
TcKeyReq::setOperationType(tcKeyRequestInfo,
opType == ZREAD ? ZREAD : ZREAD_EX);
TcKeyReq::setAIInTcKeyReq(tcKeyRequestInfo, 1); // Allways send one AttrInfo
@@ -11916,7 +12021,7 @@ void Dbtc::executeIndexOperation(Signal* signal,
Uint32 keyBufSize = 8; // Maximum for key in TCKEYREQ
Uint32 attrBufSize = 5;
Uint32 dataPos = 0;
- TcIndxReq * const tcIndxReq = &indexOp->tcIndxReq;
+ TcKeyReq * const tcIndxReq = &indexOp->tcIndxReq;
TcKeyReq * const tcKeyReq = (TcKeyReq *)signal->getDataPtrSend();
Uint32 * dataPtr = &tcKeyReq->scanInfo;
Uint32 tcKeyLength = TcKeyReq::StaticLength;
@@ -11927,17 +12032,17 @@ void Dbtc::executeIndexOperation(Signal* signal,
bool moreKeyData = indexOp->transIdAI.first(aiIter);
// Find index table
- if ((indexData = c_theIndexes.getPtr(tcIndxReq->indexId)) == NULL) {
+ if ((indexData = c_theIndexes.getPtr(tcIndxReq->tableId)) == NULL) {
jam();
// Failed to find index record
- TcIndxRef * const tcIndxRef = (TcIndxRef *)signal->getDataPtrSend();
+ TcKeyRef * const tcIndxRef = (TcKeyRef *)signal->getDataPtrSend();
tcIndxRef->connectPtr = indexOp->tcIndxReq.senderData;
tcIndxRef->transId[0] = regApiPtr->transid[0];
tcIndxRef->transId[1] = regApiPtr->transid[1];
tcIndxRef->errorCode = 4349;
sendSignal(regApiPtr->ndbapiBlockref, GSN_TCINDXREF, signal,
- TcIndxRef::SignalLength, JBB);
+ TcKeyRef::SignalLength, JBB);
return;
}
// Find schema version of primary table
diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
index 1cb3bd89997..cf3c6056d65 100644
--- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
+++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp
@@ -64,6 +64,7 @@
// DbtupSystemRestart.cpp 26000
// DbtupIndex.cpp 28000
// DbtupDebug.cpp 30000
+// DbtupScan.cpp 32000
//------------------------------------------------------------------
/*
@@ -207,6 +208,8 @@
#define ZTUPLE_DELETED_ERROR 626
#define ZINSERT_ERROR 630
+#define ZINVALID_CHAR_FORMAT 744
+
/* SOME WORD POSITIONS OF FIELDS IN SOME HEADERS */
#define ZPAGE_STATE_POS 0 /* POSITION OF PAGE STATE */
@@ -500,7 +503,8 @@ struct Fragoperrec {
Uint32 tableidFrag;
Uint32 fragPointer;
Uint32 attributeCount;
- Uint32 freeNullBit;
+ Uint32 currNullBit;
+ Uint32 noOfNullBits;
Uint32 noOfNewAttrCount;
Uint32 charsetIndex;
BlockReference lqhBlockrefFrag;
@@ -508,6 +512,49 @@ struct Fragoperrec {
};
typedef Ptr<Fragoperrec> FragoperrecPtr;
+ // Position for use by scan
+ struct PagePos {
+ Uint32 m_fragId; // "base" fragment id
+ Uint32 m_fragBit; // two fragments in 5.0
+ Uint32 m_pageId;
+ Uint32 m_tupleNo;
+ bool m_match;
+ };
+
+ // Tup scan op (compare Dbtux::ScanOp)
+ struct ScanOp {
+ enum {
+ Undef = 0,
+ First = 1, // before first entry
+ Locked = 4, // at current entry (no lock needed)
+ Next = 5, // looking for next extry
+ Last = 6, // after last entry
+ Invalid = 9 // cannot return REF to LQH currently
+ };
+ Uint16 m_state;
+ Uint16 m_lockwait; // unused
+ Uint32 m_userPtr; // scanptr.i in LQH
+ Uint32 m_userRef;
+ Uint32 m_tableId;
+ Uint32 m_fragId; // "base" fragment id
+ Uint32 m_fragPtrI[2];
+ Uint32 m_transId1;
+ Uint32 m_transId2;
+ PagePos m_scanPos;
+ union {
+ Uint32 nextPool;
+ Uint32 nextList;
+ };
+ Uint32 prevList;
+ };
+ typedef Ptr<ScanOp> ScanOpPtr;
+ ArrayPool<ScanOp> c_scanOpPool;
+
+ void scanFirst(Signal* signal, ScanOpPtr scanPtr);
+ void scanNext(Signal* signal, ScanOpPtr scanPtr);
+ void scanClose(Signal* signal, ScanOpPtr scanPtr);
+ void releaseScanOp(ScanOpPtr& scanPtr);
+
struct Fragrecord {
Uint32 nextStartRange;
Uint32 currentPageRange;
@@ -529,6 +576,9 @@ struct Fragrecord {
Uint32 fragTableId;
Uint32 fragmentId;
Uint32 nextfreefrag;
+
+ DLList<ScanOp> m_scanList;
+ Fragrecord(ArrayPool<ScanOp> & scanOpPool) : m_scanList(scanOpPool) {}
};
typedef Ptr<Fragrecord> FragrecordPtr;
@@ -1019,7 +1069,14 @@ public:
* for md5 summing and when returning keyinfo. Returns number of
* words or negative (-terrorCode) on error.
*/
- int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut);
+ int tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut, bool xfrmFlag);
+
+ /*
+ * ACC reads primary key without headers into an array of words. At
+ * this point in ACC deconstruction, ACC still uses logical references
+ * to fragment and tuple.
+ */
+ int accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag);
/*
* TUX checks if tuple is visible to scan.
@@ -1069,6 +1126,11 @@ private:
void buildIndex(Signal* signal, Uint32 buildPtrI);
void buildIndexReply(Signal* signal, const BuildIndexRec* buildRec);
+ // Tup scan
+ void execACC_SCANREQ(Signal* signal);
+ void execNEXT_SCANREQ(Signal* signal);
+ void execACC_CHECK_SCAN(Signal* signal);
+
//------------------------------------------------------------------
//------------------------------------------------------------------
// Methods to handle execution of TUPKEYREQ + ATTRINFO.
@@ -1618,19 +1680,11 @@ private:
Uint32 attrDescriptor,
Uint32 attrDes2);
-// *****************************************************************
-// Read char routines optionally (tXfrmFlag) apply strxfrm
-// *****************************************************************
-
- bool readCharNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
- bool readCharNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2);
+ bool readBitsNULLable(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
+ bool updateBitsNULLable(Uint32* inBuffer, Uint32, Uint32);
+ bool readBitsNotNULL(Uint32* outBuffer, AttributeHeader*, Uint32, Uint32);
+ bool updateBitsNotNULL(Uint32* inBuffer, Uint32, Uint32);
//------------------------------------------------------------------
//------------------------------------------------------------------
@@ -1737,8 +1791,7 @@ private:
Uint32* const mainBuffer,
Uint32& noMainWords,
Uint32* const copyBuffer,
- Uint32& noCopyWords,
- bool xfrm);
+ Uint32& noCopyWords);
void sendTrigAttrInfo(Signal* signal,
Uint32* data,
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
index 808cfd33696..8c43de52a75 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupDebug.cpp
@@ -68,7 +68,7 @@ struct Chunk {
void
Dbtup::reportMemoryUsage(Signal* signal, int incDec){
- signal->theData[0] = EventReport::MemoryUsage;
+ signal->theData[0] = NDB_LE_MemoryUsage;
signal->theData[1] = incDec;
signal->theData[2] = sizeof(Page);
signal->theData[3] = cnoOfAllocatedPages;
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
index 8e3ca6528c2..761f959acdc 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupExecQuery.cpp
@@ -858,6 +858,8 @@ void Dbtup::sendTUPKEYCONF(Signal* signal,
return;
}//Dbtup::sendTUPKEYCONF()
+#define MAX_READ (sizeof(signal->theData) > MAX_MESSAGE_SIZE ? MAX_MESSAGE_SIZE : sizeof(signal->theData))
+
/* ---------------------------------------------------------------- */
/* ----------------------------- READ ---------------------------- */
/* ---------------------------------------------------------------- */
@@ -878,7 +880,7 @@ int Dbtup::handleReadReq(Signal* signal,
}//if
Uint32 * dst = &signal->theData[25];
- Uint32 dstLen = (sizeof(signal->theData) / 4) - 25;
+ Uint32 dstLen = (MAX_READ / 4) - 25;
const Uint32 node = refToNode(sendBref);
if(node != 0 && node != getOwnNodeId()) {
;
@@ -888,7 +890,7 @@ int Dbtup::handleReadReq(Signal* signal,
* execute direct
*/
dst = &signal->theData[3];
- dstLen = (sizeof(signal->theData) / 4) - 3;
+ dstLen = (MAX_READ / 4) - 3;
}
if (regOperPtr->interpretedExec != 1) {
@@ -1228,7 +1230,7 @@ int Dbtup::interpreterStartLab(Signal* signal,
const BlockReference sendBref = regOperPtr->recBlockref;
Uint32 * dst = &signal->theData[25];
- Uint32 dstLen = (sizeof(signal->theData) / 4) - 25;
+ Uint32 dstLen = (MAX_READ / 4) - 25;
const Uint32 node = refToNode(sendBref);
if(node != 0 && node != getOwnNodeId()) {
;
@@ -1238,7 +1240,7 @@ int Dbtup::interpreterStartLab(Signal* signal,
* execute direct
*/
dst = &signal->theData[3];
- dstLen = (sizeof(signal->theData) / 4) - 3;
+ dstLen = (MAX_READ / 4) - 3;
}
RtotalLen = RinitReadLen;
@@ -1538,13 +1540,8 @@ int Dbtup::interpreterNextLab(Signal* signal,
// Calculate the number of words of this attribute.
// We allow writes into arrays as long as they fit into the 64 bit
// register size.
- //TEST_MR See to that TattrNoOfWords can be
- // read faster from attribute description.
/* --------------------------------------------------------------- */
- Uint32 TarraySize = (TattrDesc1 >> 16);
- Uint32 TattrLogLen = (TattrDesc1 >> 4) & 0xf;
- Uint32 TattrNoOfBits = TarraySize << TattrLogLen;
- Uint32 TattrNoOfWords = (TattrNoOfBits + 31) >> 5;
+ Uint32 TattrNoOfWords = AttributeDescriptor::getSizeInWords(TattrDesc1);
Uint32 Toptype = operPtr.p->optype;
Uint32 TdataForUpdate[3];
@@ -1821,9 +1818,6 @@ int Dbtup::interpreterNextLab(Signal* signal,
case Interpreter::BRANCH_ATTR_OP_ARG:{
jam();
Uint32 cond = Interpreter::getBinaryCondition(theInstruction);
- Uint32 diff = Interpreter::getArrayLengthDiff(theInstruction);
- Uint32 vchr = Interpreter::isVarchar(theInstruction);
- Uint32 nopad =Interpreter::isNopad(theInstruction);
Uint32 ins2 = TcurrentProgram[TprogramCounter];
Uint32 attrId = Interpreter::getBranchCol_AttrId(ins2) << 16;
Uint32 argLen = Interpreter::getBranchCol_Len(ins2);
@@ -1842,84 +1836,93 @@ int Dbtup::interpreterNextLab(Signal* signal,
}
tmpHabitant = attrId;
}
-
- AttributeHeader ah(tmpArea[0]);
+ // get type
+ attrId >>= 16;
+ Uint32 TattrDescrIndex = tabptr.p->tabDescriptor +
+ (attrId << ZAD_LOG_SIZE);
+ Uint32 TattrDesc1 = tableDescriptor[TattrDescrIndex].tabDescr;
+ Uint32 TattrDesc2 = tableDescriptor[TattrDescrIndex+1].tabDescr;
+ Uint32 typeId = AttributeDescriptor::getType(TattrDesc1);
+ void * cs = 0;
+ if(AttributeOffset::getCharsetFlag(TattrDesc2))
+ {
+ Uint32 pos = AttributeOffset::getCharsetPos(TattrDesc2);
+ cs = tabptr.p->charsetArray[pos];
+ }
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(typeId);
+
+ // get data
+ AttributeHeader ah(tmpArea[0]);
const char* s1 = (char*)&tmpArea[1];
const char* s2 = (char*)&TcurrentProgram[TprogramCounter+1];
- Uint32 attrLen = (4 * ah.getDataSize()) - diff;
- if (vchr) {
-#if NDB_VERSION_MAJOR >= 3
- bool vok = false;
- if (attrLen >= 2) {
- Uint32 vlen = (s1[0] << 8) | s1[1]; // big-endian
- s1 += 2;
- attrLen -= 2;
- if (attrLen >= vlen) {
- attrLen = vlen;
- vok = true;
- }
+ // fixed length in 5.0
+ Uint32 attrLen = AttributeDescriptor::getSizeInBytes(TattrDesc1);
+
+ bool r1_null = ah.isNULL();
+ bool r2_null = argLen == 0;
+ int res1;
+ if (cond != Interpreter::LIKE &&
+ cond != Interpreter::NOT_LIKE) {
+ if (r1_null || r2_null) {
+ // NULL==NULL and NULL<not-NULL
+ res1 = r1_null && r2_null ? 0 : r1_null ? -1 : 1;
+ } else {
+ res1 = (*sqlType.m_cmp)(cs, s1, attrLen, s2, argLen, true);
}
- if (!vok) {
- terrorCode = ZREGISTER_INIT_ERROR;
- tupkeyErrorLab(signal);
- return -1;
- }
-#else
- Uint32 tmp;
- if (attrLen >= 2) {
- unsigned char* ss = (unsigned char*)&s1[attrLen - 2];
- tmp = (ss[0] << 8) | ss[1];
- if (tmp <= attrLen - 2)
- attrLen = tmp;
+ } else {
+ if (r1_null || r2_null) {
+ // NULL like NULL is true (has no practical use)
+ res1 = r1_null && r2_null ? 0 : -1;
+ } else {
+ res1 = (*sqlType.m_like)(cs, s1, attrLen, s2, argLen);
}
- // XXX handle bad data
-#endif
}
- bool res = false;
+ int res = 0;
switch ((Interpreter::BinaryCondition)cond) {
case Interpreter::EQ:
- res = NdbSqlUtil::char_compare(s1, attrLen, s2, argLen, !nopad) == 0;
+ res = (res1 == 0);
break;
case Interpreter::NE:
- res = NdbSqlUtil::char_compare(s1, attrLen, s2, argLen, !nopad) != 0;
+ res = (res1 != 0);
break;
// note the condition is backwards
case Interpreter::LT:
- res = NdbSqlUtil::char_compare(s1, attrLen, s2, argLen, !nopad) > 0;
+ res = (res1 > 0);
break;
case Interpreter::LE:
- res = NdbSqlUtil::char_compare(s1, attrLen, s2, argLen, !nopad) >= 0;
+ res = (res1 >= 0);
break;
case Interpreter::GT:
- res = NdbSqlUtil::char_compare(s1, attrLen, s2, argLen, !nopad) < 0;
+ res = (res1 < 0);
break;
case Interpreter::GE:
- res = NdbSqlUtil::char_compare(s1, attrLen, s2, argLen, !nopad) <= 0;
+ res = (res1 <= 0);
break;
case Interpreter::LIKE:
- res = NdbSqlUtil::char_like(s1, attrLen, s2, argLen, !nopad);
+ res = (res1 == 0);
break;
case Interpreter::NOT_LIKE:
- res = ! NdbSqlUtil::char_like(s1, attrLen, s2, argLen, !nopad);
+ res = (res1 == 1);
break;
- // XXX handle invalid value
+ // XXX handle invalid value
}
#ifdef TRACE_INTERPRETER
- ndbout_c("cond=%u diff=%d vc=%d nopad=%d attr(%d) = >%.*s<(%d) str=>%.*s<(%d) -> res = %d",
- cond, diff, vchr, nopad,
- attrId >> 16, attrLen, s1, attrLen, argLen, s2, argLen, res);
+ ndbout_c("cond=%u attr(%d)='%.*s'(%d) str='%.*s'(%d) res1=%d res=%d",
+ cond, attrId >> 16,
+ attrLen, s1, attrLen, argLen, s2, argLen, res1, res);
#endif
if (res)
TprogramCounter = brancher(theInstruction, TprogramCounter);
- else {
- Uint32 tmp = (Interpreter::mod4(argLen) >> 2) + 1;
+ else
+ {
+ Uint32 tmp = ((argLen + 3) >> 2) + 1;
TprogramCounter += tmp;
}
break;
}
-
+
case Interpreter::BRANCH_ATTR_EQ_NULL:{
jam();
Uint32 ins2 = TcurrentProgram[TprogramCounter];
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
index af516d53a24..52ac96bc5d3 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupGen.cpp
@@ -137,6 +137,11 @@ Dbtup::Dbtup(const class Configuration & conf)
// Ordered index related
addRecSignal(GSN_BUILDINDXREQ, &Dbtup::execBUILDINDXREQ);
+ // Tup scan
+ addRecSignal(GSN_ACC_SCANREQ, &Dbtup::execACC_SCANREQ);
+ addRecSignal(GSN_NEXT_SCANREQ, &Dbtup::execNEXT_SCANREQ);
+ addRecSignal(GSN_ACC_CHECK_SCAN, &Dbtup::execACC_CHECK_SCAN);
+
initData();
}//Dbtup::Dbtup()
@@ -622,6 +627,10 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
c_buildIndexPool.setSize(c_noOfBuildIndexRec);
c_triggerPool.setSize(noOfTriggers);
+ Uint32 nScanOp; // use TUX config for now
+ ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_TUX_SCAN_OP, &nScanOp));
+ c_scanOpPool.setSize(nScanOp);
+
initRecords();
czero = 0;
cminusOne = czero - 1;
@@ -644,6 +653,8 @@ void Dbtup::execREAD_CONFIG_REQ(Signal* signal)
void Dbtup::initRecords()
{
+ unsigned i;
+
// Records with dynamic sizes
attrbufrec = (Attrbufrec*)allocRecord("Attrbufrec",
sizeof(Attrbufrec),
@@ -665,6 +676,11 @@ void Dbtup::initRecords()
fragrecord = (Fragrecord*)allocRecord("Fragrecord",
sizeof(Fragrecord),
cnoOfFragrec);
+
+ for (i = 0; i<cnoOfFragrec; i++) {
+ void * p = &fragrecord[i];
+ new (p) Fragrecord(c_scanOpPool);
+ }
hostBuffer = (HostBuffer*)allocRecord("HostBuffer",
sizeof(HostBuffer),
@@ -702,7 +718,7 @@ void Dbtup::initRecords()
sizeof(Tablerec),
cnoOfTablerec);
- for(unsigned i = 0; i<cnoOfTablerec; i++) {
+ for (i = 0; i<cnoOfTablerec; i++) {
void * p = &tablerec[i];
new (p) Tablerec(c_triggerPool);
}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
index 5a8642c4d2e..ab6e0642e11 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupIndex.cpp
@@ -173,7 +173,7 @@ Dbtup::tuxReadAttrs(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32 tu
}
int
-Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut)
+Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* dataOut, bool xfrmFlag)
{
ljamEntry();
// use own variables instead of globals
@@ -200,8 +200,7 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* data
operPtr.i = RNIL;
operPtr.p = NULL;
// do it
- int ret = readAttributes(pagePtr.p, pageOffset, attrIds,
- numAttrs, dataOut, ZNIL, true);
+ int ret = readAttributes(pagePtr.p, pageOffset, attrIds, numAttrs, dataOut, ZNIL, xfrmFlag);
// restore globals
tabptr = tabptr_old;
fragptr = fragptr_old;
@@ -229,6 +228,27 @@ Dbtup::tuxReadPk(Uint32 fragPtrI, Uint32 pageId, Uint32 pageOffset, Uint32* data
return ret;
}
+int
+Dbtup::accReadPk(Uint32 tableId, Uint32 fragId, Uint32 fragPageId, Uint32 pageIndex, Uint32* dataOut, bool xfrmFlag)
+{
+ ljamEntry();
+ // get table
+ TablerecPtr tablePtr;
+ tablePtr.i = tableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ // get fragment
+ FragrecordPtr fragPtr;
+ getFragmentrec(fragPtr, fragId, tablePtr.p);
+ // get real page id and tuple offset
+ PagePtr pagePtr;
+ Uint32 pageId = getRealpid(fragPtr.p, fragPageId);
+ ndbrequire((pageIndex & 0x1) == 0);
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + (pageIndex >> 1) * tablePtr.p->tupheadsize;
+ // use TUX routine - optimize later
+ int ret = tuxReadPk(fragPtr.i, pageId, pageOffset, dataOut, xfrmFlag);
+ return ret;
+}
+
bool
Dbtup::tuxQueryTh(Uint32 fragPtrI, Uint32 tupAddr, Uint32 tupVersion, Uint32 transId1, Uint32 transId2, Uint32 savePointId)
{
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
index c6e33bdc92b..bacba2a880c 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp
@@ -96,9 +96,10 @@ void Dbtup::execTUPFRAGREQ(Signal* signal)
fragOperPtr.p->fragidFrag = fragId;
fragOperPtr.p->tableidFrag = regTabPtr.i;
fragOperPtr.p->attributeCount = noOfAttributes;
- fragOperPtr.p->freeNullBit = noOfNullAttr;
+ fragOperPtr.p->noOfNullBits = noOfNullAttr;
fragOperPtr.p->noOfNewAttrCount = noOfNewAttr;
fragOperPtr.p->charsetIndex = 0;
+ fragOperPtr.p->currNullBit = 0;
ndbrequire(reqinfo == ZADDFRAG);
@@ -287,8 +288,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
ptrCheckGuard(fragOperPtr, cnoOfFragoprec, fragoperrec);
Uint32 attrId = signal->theData[2];
Uint32 attrDescriptor = signal->theData[3];
- // DICT sends extended type (ignored) and charset number
- Uint32 extType = (signal->theData[4] & 0xFF);
+ // DICT sends charset number in upper half
Uint32 csNumber = (signal->theData[4] >> 16);
regTabPtr.i = fragOperPtr.p->tableidFrag;
@@ -309,13 +309,13 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
Uint32 firstTabDesIndex = regTabPtr.p->tabDescriptor + (attrId * ZAD_SIZE);
setTabDescrWord(firstTabDesIndex, attrDescriptor);
Uint32 attrLen = AttributeDescriptor::getSize(attrDescriptor);
- Uint32 nullBitPos = 0; /* Default pos for NOT NULL attributes */
+ Uint32 nullBitPos = fragOperPtr.p->currNullBit;
+ Uint32 bitCount = 0;
+
if (AttributeDescriptor::getNullable(attrDescriptor)) {
if (!AttributeDescriptor::getDynamic(attrDescriptor)) {
- ljam(); /* NULL ATTR */
- fragOperPtr.p->freeNullBit--; /* STORE NULL BIT POSTITION */
- nullBitPos = fragOperPtr.p->freeNullBit;
- ndbrequire(fragOperPtr.p->freeNullBit < ZNIL); /* Check not below zero */
+ ljam(); /* NULL ATTR */
+ fragOperPtr.p->currNullBit++;
}//if
} else {
ljam();
@@ -331,27 +331,40 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
case 2:
{
ljam();
- Uint32 bitsUsed = AttributeDescriptor::getArraySize(attrDescriptor) * (1 << attrLen);
- regTabPtr.p->tupheadsize += ((bitsUsed + 31) >> 5);
- break;
+ if(attrLen != 0)
+ {
+ ljam();
+ Uint32 bitsUsed =
+ AttributeDescriptor::getArraySize(attrDescriptor) * (1 << attrLen);
+ regTabPtr.p->tupheadsize += ((bitsUsed + 31) >> 5);
+ break;
+ }
+ else
+ {
+ ljam();
+ bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ fragOperPtr.p->currNullBit += bitCount;
+ break;
+ }
}
default:
ndbrequire(false);
break;
}//switch
+ if(nullBitPos + bitCount + 1 >= MAX_NULL_BITS)
+ {
+ terrorCode = TupAddAttrRef::TooManyBitsUsed;
+ addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
+ return;
+ }
AttributeOffset::setOffset(attrDes2, attributePos);
AttributeOffset::setNullFlagPos(attrDes2, nullBitPos);
} else {
ndbrequire(false);
}//if
if (csNumber != 0) {
- CHARSET_INFO* cs = get_charset(csNumber, MYF(0));
- if (cs == NULL) {
- ljam();
- terrorCode = TupAddAttrRef::InvalidCharset;
- addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
- return;
- }
+ CHARSET_INFO* cs = all_charsets[csNumber];
+ ndbrequire(cs != NULL);
Uint32 i = 0;
while (i < fragOperPtr.p->charsetIndex) {
ljam();
@@ -375,7 +388,9 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal)
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
return;
}//if
- if (lastAttr && (fragOperPtr.p->freeNullBit != 0)) {
+ if (lastAttr &&
+ (fragOperPtr.p->currNullBit != fragOperPtr.p->noOfNullBits))
+ {
ljam();
terrorCode = ZINCONSISTENT_NULL_ATTRIBUTE_COUNT;
addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId);
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
index cbb165c3eb1..acdad3f9f1a 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp
@@ -40,7 +40,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
if ((AttributeDescriptor::getArrayType(attrDescriptor) == ZNON_ARRAY) ||
(AttributeDescriptor::getArrayType(attrDescriptor) == ZFIXED_ARRAY)) {
if (!AttributeDescriptor::getNullable(attrDescriptor)) {
- if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1) {
+ if (AttributeDescriptor::getSize(attrDescriptor) == 0){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNotNULL;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
ljam();
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNotNULL;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHOneWordNotNULL;
@@ -55,13 +59,18 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
} else {
ndbrequire(false);
}//if
- // replace read function of char attribute
+ // replace functions for char attribute
if (AttributeOffset::getCharsetFlag(attrOffset)) {
ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readCharNotNULL;
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNotNULL;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNotNULL;
}
} else {
- if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1) {
+ if (AttributeDescriptor::getSize(attrDescriptor) == 0){
+ ljam();
+ regTabPtr->readFunctionArray[i] = &Dbtup::readBitsNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateBitsNULLable;
+ } else if (AttributeDescriptor::getSizeInWords(attrDescriptor) == 1){
ljam();
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHOneWordNULLable;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
@@ -78,10 +87,11 @@ Dbtup::setUpQueryRoutines(Tablerec* const regTabPtr)
regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHZeroWordNULLable;
regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
}//if
- // replace read function of char attribute
+ // replace functions for char attribute
if (AttributeOffset::getCharsetFlag(attrOffset)) {
ljam();
- regTabPtr->readFunctionArray[i] = &Dbtup::readCharNULLable;
+ regTabPtr->readFunctionArray[i] = &Dbtup::readFixedSizeTHManyWordNULLable;
+ regTabPtr->updateFunctionArray[i] = &Dbtup::updateFixedSizeTHManyWordNULLable;
}
}//if
} else if (AttributeDescriptor::getArrayType(attrDescriptor) == ZVAR_ARRAY) {
@@ -329,25 +339,68 @@ Dbtup::readFixedSizeTHManyWordNotNULL(Uint32* outBuffer,
Uint32 attrDes2)
{
Uint32 indexBuf = tOutBufIndex;
+ Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
- Uint32 newIndexBuf = indexBuf + attrNoOfWords;
Uint32 maxRead = tMaxRead;
ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
- if (newIndexBuf <= maxRead) {
- ljam();
- ahOut->setDataSize(attrNoOfWords);
- MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
- &tTupleHeader[readOffset],
- attrNoOfWords);
- tOutBufIndex = newIndexBuf;
- return true;
+ if (! charsetFlag || ! tXfrmFlag) {
+ Uint32 newIndexBuf = indexBuf + attrNoOfWords;
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize(attrNoOfWords);
+ MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
+ &tTupleHeader[readOffset],
+ attrNoOfWords);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ }//if
} else {
ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- return false;
- }//if
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
+ uchar* dstPtr = (uchar*)&outBuffer[indexBuf];
+ const uchar* srcPtr = (uchar*)&tTupleHeader[readOffset];
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < regTabPtr->noOfCharsets);
+ CHARSET_INFO* cs = regTabPtr->charsetArray[i];
+ Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ if (ok) {
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ // see comment in DbtcMain.cpp
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ Uint32 maxIndexBuf = indexBuf + (dstLen >> 2);
+ if (maxIndexBuf <= maxRead) {
+ ljam();
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0) {
+ dstPtr[n++] = 0;
+ }
+ Uint32 dstWords = (n >> 2);
+ ahOut->setDataSize(dstWords);
+ Uint32 newIndexBuf = indexBuf + dstWords;
+ ndbrequire(newIndexBuf <= maxRead);
+ tOutBufIndex = newIndexBuf;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ }
+ } else {
+ ljam();
+ terrorCode = ZTUPLE_CORRUPTED_ERROR;
+ }
+ }
+ return false;
}//Dbtup::readFixedSizeTHManyWordNotNULL()
bool
@@ -394,7 +447,6 @@ Dbtup::readFixedSizeTHManyWordNULLable(Uint32* outBuffer,
Uint32 attrDescriptor,
Uint32 attrDes2)
{
-ljam();
if (!nullFlagCheck(attrDes2)) {
ljam();
return readFixedSizeTHManyWordNotNULL(outBuffer,
@@ -555,74 +607,6 @@ Dbtup::readDynSmallVarSize(Uint32* outBuffer,
return false;
}//Dbtup::readDynSmallVarSize()
-
-bool
-Dbtup::readCharNotNULL(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- Uint32 indexBuf = tOutBufIndex;
- Uint32 readOffset = AttributeOffset::getOffset(attrDes2);
- Uint32 attrNoOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
- Uint32 newIndexBuf = indexBuf + attrNoOfWords;
- Uint32 maxRead = tMaxRead;
-
- ndbrequire((readOffset + attrNoOfWords - 1) < tCheckOffset);
- if (newIndexBuf <= maxRead) {
- ljam();
- ahOut->setDataSize(attrNoOfWords);
- if (! tXfrmFlag) {
- MEMCOPY_NO_WORDS(&outBuffer[indexBuf],
- &tTupleHeader[readOffset],
- attrNoOfWords);
- } else {
- ljam();
- Tablerec* regTabPtr = tabptr.p;
- Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
- ndbrequire(i < tabptr.p->noOfCharsets);
- // not const in MySQL
- CHARSET_INFO* cs = tabptr.p->charsetArray[i];
- // XXX should strip Uint32 null padding
- const unsigned nBytes = attrNoOfWords << 2;
- unsigned n =
- (*cs->coll->strnxfrm)(cs,
- (uchar*)&outBuffer[indexBuf],
- nBytes,
- (const uchar*)&tTupleHeader[readOffset],
- nBytes);
- // pad with ascii spaces
- while (n < nBytes)
- ((uchar*)&outBuffer[indexBuf])[n++] = 0x20;
- }
- tOutBufIndex = newIndexBuf;
- return true;
- } else {
- ljam();
- terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
- return false;
- }
-}
-
-bool
-Dbtup::readCharNULLable(Uint32* outBuffer,
- AttributeHeader* ahOut,
- Uint32 attrDescriptor,
- Uint32 attrDes2)
-{
- if (!nullFlagCheck(attrDes2)) {
- ljam();
- return readCharNotNULL(outBuffer,
- ahOut,
- attrDescriptor,
- attrDes2);
- } else {
- ljam();
- ahOut->setNULL();
- return true;
- }
-}
-
/* ---------------------------------------------------------------------- */
/* THIS ROUTINE IS USED TO UPDATE A NUMBER OF ATTRIBUTES. IT IS */
/* USED BY THE INSERT ROUTINE, THE UPDATE ROUTINE AND IT CAN BE */
@@ -810,6 +794,7 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
Uint32 indexBuf = tInBufIndex;
Uint32 inBufLen = tInBufLen;
Uint32 updateOffset = AttributeOffset::getOffset(attrDes2);
+ Uint32 charsetFlag = AttributeOffset::getCharsetFlag(attrDes2);
AttributeHeader ahIn(inBuffer[indexBuf]);
Uint32 nullIndicator = ahIn.isNULL();
Uint32 noOfWords = AttributeDescriptor::getSizeInWords(attrDescriptor);
@@ -819,6 +804,31 @@ Dbtup::updateFixedSizeTHManyWordNotNULL(Uint32* inBuffer,
if (newIndex <= inBufLen) {
if (!nullIndicator) {
ljam();
+ if (charsetFlag) {
+ ljam();
+ Tablerec* regTabPtr = tabptr.p;
+ Uint32 typeId = AttributeDescriptor::getType(attrDescriptor);
+ Uint32 bytes = AttributeDescriptor::getSizeInBytes(attrDescriptor);
+ Uint32 i = AttributeOffset::getCharsetPos(attrDes2);
+ ndbrequire(i < regTabPtr->noOfCharsets);
+ // not const in MySQL
+ CHARSET_INFO* cs = regTabPtr->charsetArray[i];
+ int not_used;
+ const char* ssrc = (const char*)&inBuffer[tInBufIndex + 1];
+ Uint32 lb, len;
+ if (! NdbSqlUtil::get_var_length(typeId, ssrc, bytes, lb, len)) {
+ ljam();
+ terrorCode = ZINVALID_CHAR_FORMAT;
+ return false;
+ }
+ // fast fix bug#7340
+ if (typeId != NDB_TYPE_TEXT &&
+ (*cs->cset->well_formed_len)(cs, ssrc + lb, ssrc + lb + len, ZNIL, &not_used) != len) {
+ ljam();
+ terrorCode = ZINVALID_CHAR_FORMAT;
+ return false;
+ }
+ }
tInBufIndex = newIndex;
MEMCOPY_NO_WORDS(&tTupleHeader[updateOffset],
&inBuffer[indexBuf + 1],
@@ -990,18 +1000,198 @@ Dbtup::read_psuedo(Uint32 attrId, Uint32* outBuffer){
Signal * signal = (Signal*)&tmp;
switch(attrId){
case AttributeHeader::FRAGMENT:
- * outBuffer = operPtr.p->fragId;
+ * outBuffer = operPtr.p->fragId >> 1; // remove "hash" bit
+ return 1;
+ case AttributeHeader::FRAGMENT_MEMORY:
+ {
+ Uint64 tmp = 0;
+ tmp += fragptr.p->noOfPages;
+ {
+ /**
+ * Each fragment is split into 2...get #pages from other as well
+ */
+ Uint32 twin = fragptr.p->fragmentId ^ 1;
+ FragrecordPtr twinPtr;
+ getFragmentrec(twinPtr, twin, tabptr.p);
+ ndbrequire(twinPtr.p != 0);
+ tmp += twinPtr.p->noOfPages;
+ }
+ tmp *= 32768;
+ memcpy(outBuffer,&tmp,8);
+ }
+ return 2;
+ case AttributeHeader::ROW_SIZE:
+ * outBuffer = tabptr.p->tupheadsize << 2;
return 1;
case AttributeHeader::ROW_COUNT:
case AttributeHeader::COMMIT_COUNT:
signal->theData[0] = operPtr.p->userpointer;
signal->theData[1] = attrId;
-
+
EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
outBuffer[0] = signal->theData[0];
outBuffer[1] = signal->theData[1];
return 2;
+ case AttributeHeader::RANGE_NO:
+ signal->theData[0] = operPtr.p->userpointer;
+ signal->theData[1] = attrId;
+
+ EXECUTE_DIRECT(DBLQH, GSN_READ_PSUEDO_REQ, signal, 2);
+ outBuffer[0] = signal->theData[0];
+ return 1;
default:
return 0;
}
}
+
+bool
+Dbtup::readBitsNotNULL(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
+ Uint32 maxRead = tMaxRead;
+
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize((bitCount + 31) >> 5);
+ tOutBufIndex = newIndexBuf;
+
+ BitmaskImpl::getField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos,
+ bitCount,
+ outBuffer+indexBuf);
+
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}
+
+bool
+Dbtup::readBitsNULLable(Uint32* outBuffer,
+ AttributeHeader* ahOut,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+
+ Uint32 indexBuf = tOutBufIndex;
+ Uint32 newIndexBuf = indexBuf + ((bitCount + 31) >> 5);
+ Uint32 maxRead = tMaxRead;
+
+ if(BitmaskImpl::get(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos))
+ {
+ ljam();
+ ahOut->setNULL();
+ return true;
+ }
+
+
+ if (newIndexBuf <= maxRead) {
+ ljam();
+ ahOut->setDataSize((bitCount + 31) >> 5);
+ tOutBufIndex = newIndexBuf;
+ BitmaskImpl::getField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos+1,
+ bitCount,
+ outBuffer+indexBuf);
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZTRY_TO_READ_TOO_MUCH_ERROR;
+ return false;
+ }//if
+}
+
+bool
+Dbtup::updateBitsNotNULL(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 inBufLen = tInBufLen;
+ AttributeHeader ahIn(inBuffer[indexBuf]);
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+ Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
+
+ if (newIndex <= inBufLen) {
+ if (!nullIndicator) {
+ BitmaskImpl::setField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos,
+ bitCount,
+ inBuffer+indexBuf+1);
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZNOT_NULL_ATTR;
+ return false;
+ }//if
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ return true;
+}
+
+bool
+Dbtup::updateBitsNULLable(Uint32* inBuffer,
+ Uint32 attrDescriptor,
+ Uint32 attrDes2)
+{
+ Tablerec* const regTabPtr = tabptr.p;
+ AttributeHeader ahIn(inBuffer[tInBufIndex]);
+ Uint32 indexBuf = tInBufIndex;
+ Uint32 nullIndicator = ahIn.isNULL();
+ Uint32 pos = AttributeOffset::getNullFlagPos(attrDes2);
+ Uint32 bitCount = AttributeDescriptor::getArraySize(attrDescriptor);
+
+ if (!nullIndicator) {
+ BitmaskImpl::clear(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos);
+ BitmaskImpl::setField(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos+1,
+ bitCount,
+ inBuffer+indexBuf+1);
+
+ Uint32 newIndex = indexBuf + 1 + ((bitCount + 31) >> 5);
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ Uint32 newIndex = tInBufIndex + 1;
+ if (newIndex <= tInBufLen) {
+ ljam();
+ BitmaskImpl::set(regTabPtr->tupNullWords,
+ tTupleHeader+regTabPtr->tupNullIndex,
+ pos);
+
+ tInBufIndex = newIndex;
+ return true;
+ } else {
+ ljam();
+ terrorCode = ZAI_INCONSISTENCY_ERROR;
+ return false;
+ }//if
+ }//if
+}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp b/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
new file mode 100644
index 00000000000..396404faa8c
--- /dev/null
+++ b/ndb/src/kernel/blocks/dbtup/DbtupScan.cpp
@@ -0,0 +1,315 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#define DBTUP_C
+#include "Dbtup.hpp"
+#include <signaldata/AccScan.hpp>
+#include <signaldata/NextScan.hpp>
+
+#undef jam
+#undef jamEntry
+#define jam() { jamLine(32000 + __LINE__); }
+#define jamEntry() { jamEntryLine(32000 + __LINE__); }
+
+void
+Dbtup::execACC_SCANREQ(Signal* signal)
+{
+ jamEntry();
+ const AccScanReq reqCopy = *(const AccScanReq*)signal->getDataPtr();
+ const AccScanReq* const req = &reqCopy;
+ ScanOpPtr scanPtr;
+ scanPtr.i = RNIL;
+ do {
+ // find table and fragments
+ TablerecPtr tablePtr;
+ tablePtr.i = req->tableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ FragrecordPtr fragPtr[2];
+ Uint32 fragId = req->fragmentNo << 1;
+ fragPtr[0].i = fragPtr[1].i = RNIL;
+ getFragmentrec(fragPtr[0], fragId | 0, tablePtr.p);
+ getFragmentrec(fragPtr[1], fragId | 1, tablePtr.p);
+ ndbrequire(fragPtr[0].i != RNIL && fragPtr[1].i != RNIL);
+ Fragrecord& frag = *fragPtr[0].p;
+ // seize from pool and link to per-fragment list
+ if (! frag.m_scanList.seize(scanPtr)) {
+ jam();
+ break;
+ }
+ new (scanPtr.p) ScanOp();
+ ScanOp& scan = *scanPtr.p;
+ scan.m_state = ScanOp::First;
+ scan.m_userPtr = req->senderData;
+ scan.m_userRef = req->senderRef;
+ scan.m_tableId = tablePtr.i;
+ scan.m_fragId = frag.fragmentId;
+ scan.m_fragPtrI[0] = fragPtr[0].i;
+ scan.m_fragPtrI[1] = fragPtr[1].i;
+ scan.m_transId1 = req->transId1;
+ scan.m_transId2 = req->transId2;
+ // conf
+ AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = req->senderData;
+ conf->accPtr = scanPtr.i;
+ conf->flag = AccScanConf::ZNOT_EMPTY_FRAGMENT;
+ sendSignal(req->senderRef, GSN_ACC_SCANCONF, signal,
+ AccScanConf::SignalLength, JBB);
+ return;
+ } while (0);
+ if (scanPtr.i != RNIL) {
+ jam();
+ releaseScanOp(scanPtr);
+ }
+ // LQH does not handle REF
+ signal->theData[0] = 0x313;
+ sendSignal(req->senderRef, GSN_ACC_SCANREF, signal, 1, JBB);
+}
+
+void
+Dbtup::execNEXT_SCANREQ(Signal* signal)
+{
+ jamEntry();
+ const NextScanReq reqCopy = *(const NextScanReq*)signal->getDataPtr();
+ const NextScanReq* const req = &reqCopy;
+ ScanOpPtr scanPtr;
+ c_scanOpPool.getPtr(scanPtr, req->accPtr);
+ ScanOp& scan = *scanPtr.p;
+ FragrecordPtr fragPtr;
+ fragPtr.i = scan.m_fragPtrI[0];
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ Fragrecord& frag = *fragPtr.p;
+ switch (req->scanFlag) {
+ case NextScanReq::ZSCAN_NEXT:
+ jam();
+ break;
+ case NextScanReq::ZSCAN_NEXT_COMMIT:
+ jam();
+ break;
+ case NextScanReq::ZSCAN_COMMIT:
+ jam();
+ {
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scan.m_userPtr;
+ unsigned signalLength = 1;
+ sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ return;
+ }
+ break;
+ case NextScanReq::ZSCAN_CLOSE:
+ jam();
+ scanClose(signal, scanPtr);
+ return;
+ case NextScanReq::ZSCAN_NEXT_ABORT:
+ jam();
+ default:
+ jam();
+ ndbrequire(false);
+ break;
+ }
+ // start looking for next scan result
+ AccCheckScan* checkReq = (AccCheckScan*)signal->getDataPtrSend();
+ checkReq->accPtr = scanPtr.i;
+ checkReq->checkLcpStop = AccCheckScan::ZNOT_CHECK_LCP_STOP;
+ EXECUTE_DIRECT(DBTUP, GSN_ACC_CHECK_SCAN, signal, AccCheckScan::SignalLength);
+ jamEntry();
+}
+
+void
+Dbtup::execACC_CHECK_SCAN(Signal* signal)
+{
+ jamEntry();
+ const AccCheckScan reqCopy = *(const AccCheckScan*)signal->getDataPtr();
+ const AccCheckScan* const req = &reqCopy;
+ ScanOpPtr scanPtr;
+ c_scanOpPool.getPtr(scanPtr, req->accPtr);
+ ScanOp& scan = *scanPtr.p;
+ FragrecordPtr fragPtr;
+ fragPtr.i = scan.m_fragPtrI[0];
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ Fragrecord& frag = *fragPtr.p;
+ if (req->checkLcpStop == AccCheckScan::ZCHECK_LCP_STOP) {
+ jam();
+ signal->theData[0] = scan.m_userPtr;
+ signal->theData[1] = true;
+ EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2);
+ jamEntry();
+ return;
+ }
+ if (scan.m_state == ScanOp::First) {
+ jam();
+ scanFirst(signal, scanPtr);
+ }
+ if (scan.m_state == ScanOp::Next) {
+ jam();
+ scanNext(signal, scanPtr);
+ }
+ if (scan.m_state == ScanOp::Locked) {
+ jam();
+ const PagePos& pos = scan.m_scanPos;
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scan.m_userPtr;
+ conf->accOperationPtr = (Uint32)-1; // no lock returned
+ conf->fragId = frag.fragmentId | pos.m_fragBit;
+ conf->localKey[0] = (pos.m_pageId << MAX_TUPLES_BITS) |
+ (pos.m_tupleNo << 1);
+ conf->localKey[1] = 0;
+ conf->localKeyLength = 1;
+ unsigned signalLength = 6;
+ Uint32 blockNo = refToBlock(scan.m_userRef);
+ EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength);
+ jamEntry();
+ // next time look for next entry
+ scan.m_state = ScanOp::Next;
+ return;
+ }
+ if (scan.m_state == ScanOp::Last ||
+ scan.m_state == ScanOp::Invalid) {
+ jam();
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scan.m_userPtr;
+ conf->accOperationPtr = RNIL;
+ conf->fragId = RNIL;
+ unsigned signalLength = 3;
+ sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ return;
+ }
+ ndbrequire(false);
+}
+
+void
+Dbtup::scanFirst(Signal* signal, ScanOpPtr scanPtr)
+{
+ ScanOp& scan = *scanPtr.p;
+ // set to first fragment, first page, first tuple
+ PagePos& pos = scan.m_scanPos;
+ pos.m_fragId = scan.m_fragId;
+ pos.m_fragBit = 0;
+ pos.m_pageId = 0;
+ pos.m_tupleNo = 0;
+ // just before
+ pos.m_match = false;
+ // let scanNext() do the work
+ scan.m_state = ScanOp::Next;
+}
+
+// TODO optimize this + index build
+void
+Dbtup::scanNext(Signal* signal, ScanOpPtr scanPtr)
+{
+ ScanOp& scan = *scanPtr.p;
+ PagePos& pos = scan.m_scanPos;
+ TablerecPtr tablePtr;
+ tablePtr.i = scan.m_tableId;
+ ptrCheckGuard(tablePtr, cnoOfTablerec, tablerec);
+ while (true) {
+ // TODO time-slice here after X loops
+ jam();
+ // get fragment
+ if (pos.m_fragBit == 2) {
+ jam();
+ scan.m_state = ScanOp::Last;
+ break;
+ }
+ ndbrequire(pos.m_fragBit <= 1);
+ FragrecordPtr fragPtr;
+ fragPtr.i = scan.m_fragPtrI[pos.m_fragBit];
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ Fragrecord& frag = *fragPtr.p;
+ // get page
+ PagePtr pagePtr;
+ if (pos.m_pageId >= frag.noOfPages) {
+ jam();
+ pos.m_fragBit++;
+ pos.m_pageId = 0;
+ pos.m_tupleNo = 0;
+ pos.m_match = false;
+ continue;
+ }
+ Uint32 realPageId = getRealpid(fragPtr.p, pos.m_pageId);
+ pagePtr.i = realPageId;
+ ptrCheckGuard(pagePtr, cnoOfPage, page);
+ const Uint32 pageState = pagePtr.p->pageWord[ZPAGE_STATE_POS];
+ if (pageState != ZTH_MM_FREE &&
+ pageState != ZTH_MM_FULL) {
+ jam();
+ pos.m_pageId++;
+ pos.m_tupleNo = 0;
+ pos.m_match = false;
+ continue;
+ }
+ // get next tuple
+ if (pos.m_match)
+ pos.m_tupleNo++;
+ pos.m_match = true;
+ const Uint32 tupheadsize = tablePtr.p->tupheadsize;
+ Uint32 pageOffset = ZPAGE_HEADER_SIZE + pos.m_tupleNo * tupheadsize;
+ if (pageOffset + tupheadsize > ZWORDS_ON_PAGE) {
+ jam();
+ pos.m_pageId++;
+ pos.m_tupleNo = 0;
+ pos.m_match = false;
+ continue;
+ }
+ // skip over free tuple
+ bool isFree = false;
+ if (pageState == ZTH_MM_FREE) {
+ jam();
+ if ((pagePtr.p->pageWord[pageOffset] >> 16) == tupheadsize) {
+ Uint32 nextTuple = pagePtr.p->pageWord[ZFREELIST_HEADER_POS] >> 16;
+ while (nextTuple != 0) {
+ jam();
+ if (nextTuple == pageOffset) {
+ jam();
+ isFree = true;
+ break;
+ }
+ nextTuple = pagePtr.p->pageWord[nextTuple] & 0xffff;
+ }
+ }
+ }
+ if (isFree) {
+ jam();
+ continue;
+ }
+ // TODO check for operation and return latest in own tx
+ scan.m_state = ScanOp::Locked;
+ break;
+ }
+}
+
+void
+Dbtup::scanClose(Signal* signal, ScanOpPtr scanPtr)
+{
+ NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
+ conf->scanPtr = scanPtr.p->m_userPtr;
+ conf->accOperationPtr = RNIL;
+ conf->fragId = RNIL;
+ unsigned signalLength = 3;
+ sendSignal(scanPtr.p->m_userRef, GSN_NEXT_SCANCONF,
+ signal, signalLength, JBB);
+ releaseScanOp(scanPtr);
+}
+
+void
+Dbtup::releaseScanOp(ScanOpPtr& scanPtr)
+{
+ FragrecordPtr fragPtr;
+ fragPtr.i = scanPtr.p->m_fragPtrI[0];
+ ptrCheckGuard(fragPtr, cnoOfFragrec, fragrecord);
+ fragPtr.p->m_scanList.release(scanPtr);
+}
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
index ed835dc057a..33d63e8ce49 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupSystemRestart.cpp
@@ -418,7 +418,7 @@ void Dbtup::xlcRestartCompletedLab(Signal* signal)
{
cnoOfLocalLogInfo = 0;
- signal->theData[0] = EventReport::UNDORecordsExecuted;
+ signal->theData[0] = NDB_LE_UNDORecordsExecuted;
signal->theData[1] = DBTUP; // From block
signal->theData[2] = 0; // Total records executed
for (int i = 0; i < 10; i++) {
diff --git a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
index 59a31475617..6652464dc0f 100644
--- a/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
+++ b/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp
@@ -607,7 +607,7 @@ void Dbtup::executeTrigger(Signal* signal,
for everybody else.
*/
signal->theData[0] = trigPtr->triggerId;
- signal->theData[1] = regOperPtr->fragId;
+ signal->theData[1] = regOperPtr->fragId >> 1; // send "real" frag id
EXECUTE_DIRECT(BACKUP, GSN_BACKUP_TRIG_REQ, signal, 2);
ljamEntry();
if (signal->theData[0] == 0) {
@@ -622,8 +622,7 @@ void Dbtup::executeTrigger(Signal* signal,
mainBuffer,
noMainWords,
copyBuffer,
- noCopyWords,
- (ref == BACKUP ? false : true))) {
+ noCopyWords)) {
ljam();
return;
}//if
@@ -728,8 +727,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
Uint32* const mainBuffer,
Uint32& noMainWords,
Uint32* const copyBuffer,
- Uint32& noCopyWords,
- bool xfrm)
+ Uint32& noCopyWords)
{
noCopyWords = 0;
noMainWords = 0;
@@ -759,7 +757,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
regTabPtr->noOfKeyAttr,
keyBuffer,
ZATTR_BUFFER_SIZE,
- xfrm);
+ false);
ndbrequire(ret != -1);
noPrimKey= ret;
@@ -802,7 +800,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
numAttrsToRead,
mainBuffer,
ZATTR_BUFFER_SIZE,
- xfrm);
+ false);
ndbrequire(ret != -1);
noMainWords= ret;
} else {
@@ -828,7 +826,7 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr,
numAttrsToRead,
copyBuffer,
ZATTR_BUFFER_SIZE,
- xfrm);
+ false);
ndbrequire(ret != -1);
noCopyWords = ret;
diff --git a/ndb/src/kernel/blocks/dbtup/Makefile.am b/ndb/src/kernel/blocks/dbtup/Makefile.am
index e51410e6be3..2d14ad41025 100644
--- a/ndb/src/kernel/blocks/dbtup/Makefile.am
+++ b/ndb/src/kernel/blocks/dbtup/Makefile.am
@@ -18,6 +18,7 @@ libdbtup_a_SOURCES = \
DbtupGen.cpp \
DbtupSystemRestart.cpp \
DbtupIndex.cpp \
+ DbtupScan.cpp \
DbtupDebug.cpp
include $(top_srcdir)/ndb/config/common.mk.am
diff --git a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
index 3d78fccb780..d4a44b9e641 100644
--- a/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
+++ b/ndb/src/kernel/blocks/dbtux/Dbtux.hpp
@@ -404,7 +404,7 @@ private:
Uint32 m_accLockOp;
Uint8 m_readCommitted; // no locking
Uint8 m_lockMode;
- Uint8 m_keyInfo;
+ Uint8 m_descending;
ScanBound m_boundMin;
ScanBound m_boundMax;
ScanBound* m_bound[2]; // pointers to above 2
@@ -447,7 +447,7 @@ private:
State m_state;
DictTabInfo::TableType m_tableType;
Uint32 m_tableId;
- Uint16 m_fragOff; // offset for duplicate fragId bits
+ Uint16 unused;
Uint16 m_numFrags;
Uint32 m_fragId[MaxIndexFragments];
Uint32 m_fragPtrI[MaxIndexFragments];
@@ -473,7 +473,7 @@ private:
struct Frag {
Uint32 m_tableId; // copy from index level
Uint32 m_indexId;
- Uint16 m_fragOff;
+ Uint16 unused;
Uint16 m_fragId;
Uint32 m_descPage; // copy from index level
Uint16 m_descOff;
@@ -637,7 +637,7 @@ private:
void execACCKEYREF(Signal* signal);
void execACC_ABORTCONF(Signal* signal);
void scanFirst(ScanOpPtr scanPtr);
- void scanNext(ScanOpPtr scanPtr);
+ void scanNext(ScanOpPtr scanPtr, bool fromMaintReq);
bool scanVisible(ScanOpPtr scanPtr, TreeEnt ent);
void scanClose(Signal* signal, ScanOpPtr scanPtr);
void addAccLockOp(ScanOp& scan, Uint32 accLockOp);
@@ -649,7 +649,9 @@ private:
*/
void searchToAdd(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
void searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePos& treePos);
- void searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
+ void searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos);
+ void searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
+ void searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos);
/*
* DbtuxCmp.cpp
@@ -1028,7 +1030,7 @@ Dbtux::ScanOp::ScanOp(ScanBoundPool& scanBoundPool) :
m_accLockOp(RNIL),
m_readCommitted(0),
m_lockMode(0),
- m_keyInfo(0),
+ m_descending(0),
m_boundMin(scanBoundPool),
m_boundMax(scanBoundPool),
m_scanPos(),
@@ -1072,7 +1074,6 @@ inline
Dbtux::Frag::Frag(ArrayPool<ScanOp>& scanOpPool) :
m_tableId(RNIL),
m_indexId(RNIL),
- m_fragOff(ZNIL),
m_fragId(ZNIL),
m_descPage(RNIL),
m_descOff(0),
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
index ddab77b97b5..cf815b14c1a 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxCmp.cpp
@@ -18,24 +18,26 @@
#include "Dbtux.hpp"
/*
- * Search key vs node prefix or entry
+ * Search key vs node prefix or entry.
*
* The comparison starts at given attribute position. The position is
* updated by number of equal initial attributes found. The entry data
* may be partial in which case CmpUnknown may be returned.
+ *
+ * The attributes are normalized and have variable size given in words.
*/
int
Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, ConstData entryData, unsigned maxlen)
{
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
- // number of words of attribute data left
- unsigned len2 = maxlen;
// skip to right position in search key only
for (unsigned i = 0; i < start; i++) {
jam();
searchKey += AttributeHeaderSize + searchKey.ah().getDataSize();
}
+ // number of words of entry data left
+ unsigned len2 = maxlen;
int ret = 0;
while (start < numAttrs) {
if (len2 <= AttributeHeaderSize) {
@@ -47,18 +49,20 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, Cons
if (! searchKey.ah().isNULL()) {
if (! entryData.ah().isNULL()) {
jam();
- // current attribute
+ // verify attribute id
const DescAttr& descAttr = descEnt.m_descAttr[start];
- // full data size
- const unsigned size1 = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
- ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize());
- const unsigned size2 = min(size1, len2);
+ ndbrequire(searchKey.ah().getAttributeId() == descAttr.m_primaryAttrId);
+ ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
+ // sizes
+ const unsigned size1 = searchKey.ah().getDataSize();
+ const unsigned size2 = min(entryData.ah().getDataSize(), len2);
len2 -= size2;
// compare
NdbSqlUtil::Cmp* const cmp = c_sqlCmp[start];
const Uint32* const p1 = &searchKey[AttributeHeaderSize];
const Uint32* const p2 = &entryData[AttributeHeaderSize];
- ret = (*cmp)(0, p1, p2, size1, size2);
+ const bool full = (maxlen == MaxAttrDataSize);
+ ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full);
if (ret != 0) {
jam();
break;
@@ -99,18 +103,20 @@ Dbtux::cmpSearchKey(const Frag& frag, unsigned& start, ConstData searchKey, Cons
*
* Following example illustrates this. We are at (a=2, b=3).
*
- * dir bounds strict return
+ * idir bounds strict return
* 0 a >= 2 and b >= 3 no -1
* 0 a >= 2 and b > 3 yes +1
* 1 a <= 2 and b <= 3 no +1
* 1 a <= 2 and b < 3 yes -1
+ *
+ * The attributes are normalized and have variable size given in words.
*/
int
-Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen)
+Dbtux::cmpScanBound(const Frag& frag, unsigned idir, ConstData boundInfo, unsigned boundCount, ConstData entryData, unsigned maxlen)
{
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
// direction 0-lower 1-upper
- ndbrequire(dir <= 1);
+ ndbrequire(idir <= 1);
// number of words of data left
unsigned len2 = maxlen;
// in case of no bounds, init last type to something non-strict
@@ -127,21 +133,21 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
if (! boundInfo.ah().isNULL()) {
if (! entryData.ah().isNULL()) {
jam();
- // current attribute
- const unsigned index = boundInfo.ah().getAttributeId();
+ // verify attribute id
+ const Uint32 index = boundInfo.ah().getAttributeId();
ndbrequire(index < frag.m_numAttrs);
const DescAttr& descAttr = descEnt.m_descAttr[index];
ndbrequire(entryData.ah().getAttributeId() == descAttr.m_primaryAttrId);
- // full data size
+ // sizes
const unsigned size1 = boundInfo.ah().getDataSize();
- ndbrequire(size1 != 0 && size1 == entryData.ah().getDataSize());
- const unsigned size2 = min(size1, len2);
+ const unsigned size2 = min(entryData.ah().getDataSize(), len2);
len2 -= size2;
// compare
NdbSqlUtil::Cmp* const cmp = c_sqlCmp[index];
const Uint32* const p1 = &boundInfo[AttributeHeaderSize];
const Uint32* const p2 = &entryData[AttributeHeaderSize];
- int ret = (*cmp)(0, p1, p2, size1, size2);
+ const bool full = (maxlen == MaxAttrDataSize);
+ int ret = (*cmp)(0, p1, size1 << 2, p2, size2 << 2, full);
if (ret != 0) {
jam();
return ret;
@@ -165,5 +171,5 @@ Dbtux::cmpScanBound(const Frag& frag, unsigned dir, ConstData boundInfo, unsigne
}
// all attributes were equal
const int strict = (type & 0x1);
- return (dir == 0 ? (strict == 0 ? -1 : +1) : (strict == 0 ? +1 : -1));
+ return (idir == 0 ? (strict == 0 ? -1 : +1) : (strict == 0 ? +1 : -1));
}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
index 1e1b0d1d5b6..ed29dc57915 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxDebug.cpp
@@ -340,14 +340,14 @@ operator<<(NdbOut& out, const Dbtux::ScanOp& scan)
out << " [savePointId " << dec << scan.m_savePointId << "]";
out << " [accLockOp " << hex << scan.m_accLockOp << "]";
out << " [accLockOps";
- for (unsigned i = 0; i < Dbtux::MaxAccLockOps; i++) {
+ for (unsigned i = 0; i < scan.m_maxAccLockOps; i++) {
if (scan.m_accLockOps[i] != RNIL)
out << " " << hex << scan.m_accLockOps[i];
}
out << "]";
out << " [readCommitted " << dec << scan.m_readCommitted << "]";
out << " [lockMode " << dec << scan.m_lockMode << "]";
- out << " [keyInfo " << dec << scan.m_keyInfo << "]";
+ out << " [descending " << dec << scan.m_descending << "]";
out << " [pos " << scan.m_scanPos << "]";
out << " [ent " << scan.m_scanEnt << "]";
for (unsigned i = 0; i <= 1; i++) {
@@ -370,7 +370,6 @@ operator<<(NdbOut& out, const Dbtux::Index& index)
{
out << "[Index " << hex << &index;
out << " [tableId " << dec << index.m_tableId << "]";
- out << " [fragOff " << dec << index.m_fragOff << "]";
out << " [numFrags " << dec << index.m_numFrags << "]";
for (unsigned i = 0; i < index.m_numFrags; i++) {
out << " [frag " << dec << i << " ";
@@ -393,7 +392,6 @@ operator<<(NdbOut& out, const Dbtux::Frag& frag)
out << "[Frag " << hex << &frag;
out << " [tableId " << dec << frag.m_tableId << "]";
out << " [indexId " << dec << frag.m_indexId << "]";
- out << " [fragOff " << dec << frag.m_fragOff << "]";
out << " [fragId " << dec << frag.m_fragId << "]";
out << " [descPage " << hex << frag.m_descPage << "]";
out << " [descOff " << dec << frag.m_descOff << "]";
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
index 8990d6c86b6..5640fdf2899 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxGen.cpp
@@ -217,6 +217,7 @@ Dbtux::setKeyAttrs(const Frag& frag)
const unsigned numAttrs = frag.m_numAttrs;
const DescEnt& descEnt = getDescEnt(frag.m_descPage, frag.m_descOff);
for (unsigned i = 0; i < numAttrs; i++) {
+ jam();
const DescAttr& descAttr = descEnt.m_descAttr[i];
Uint32 size = AttributeDescriptor::getSizeInWords(descAttr.m_attrDesc);
// set attr id and fixed size
@@ -244,6 +245,26 @@ Dbtux::readKeyAttrs(const Frag& frag, TreeEnt ent, unsigned start, Data keyData)
jamEntry();
// TODO handle error
ndbrequire(ret > 0);
+#ifdef VM_TRACE
+ if (debugFlags & (DebugMaint | DebugScan)) {
+ debugOut << "readKeyAttrs:" << endl;
+ ConstData data = keyData;
+ Uint32 totalSize = 0;
+ for (Uint32 i = start; i < frag.m_numAttrs; i++) {
+ Uint32 attrId = data.ah().getAttributeId();
+ Uint32 dataSize = data.ah().getDataSize();
+ debugOut << i << " attrId=" << attrId << " size=" << dataSize;
+ data += 1;
+ for (Uint32 j = 0; j < dataSize; j++) {
+ debugOut << " " << hex << data[0];
+ data += 1;
+ }
+ debugOut << endl;
+ totalSize += 1 + dataSize;
+ }
+ ndbassert((int)totalSize == ret);
+ }
+#endif
}
void
@@ -251,7 +272,7 @@ Dbtux::readTablePk(const Frag& frag, TreeEnt ent, Data pkData, unsigned& pkSize)
{
const Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[ent.m_fragBit];
const TupLoc tupLoc = ent.m_tupLoc;
- int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), pkData);
+ int ret = c_tup->tuxReadPk(tableFragPtrI, tupLoc.getPageId(), tupLoc.getPageOffset(), pkData, true);
jamEntry();
// TODO handle error
ndbrequire(ret > 0);
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
index 30afb51e7d7..4b568badc67 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMaint.cpp
@@ -57,9 +57,8 @@ Dbtux::execTUX_MAINT_REQ(Signal* signal)
c_indexPool.getPtr(indexPtr, req->indexId);
ndbrequire(indexPtr.p->m_tableId == req->tableId);
// get base fragment id and extra bits
- const Uint32 fragOff = indexPtr.p->m_fragOff;
- const Uint32 fragId = req->fragId & ((1 << fragOff) - 1);
- const Uint32 fragBit = req->fragId >> fragOff;
+ const Uint32 fragId = req->fragId & ~1;
+ const Uint32 fragBit = req->fragId & 1;
// get the fragment
FragPtr fragPtr;
fragPtr.i = RNIL;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
index b7526593a08..93c4a583624 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxMeta.cpp
@@ -16,6 +16,7 @@
#define DBTUX_META_CPP
#include "Dbtux.hpp"
+#include <my_sys.h>
/*
* Create index.
@@ -84,7 +85,6 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
new (fragPtr.p) Frag(c_scanOpPool);
fragPtr.p->m_tableId = req->primaryTableId;
fragPtr.p->m_indexId = req->tableId;
- fragPtr.p->m_fragOff = req->fragOff;
fragPtr.p->m_fragId = req->fragId;
fragPtr.p->m_numAttrs = req->noOfAttr;
fragPtr.p->m_storeNullKey = true; // not yet configurable
@@ -112,7 +112,6 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
indexPtr.p->m_state = Index::Defining;
indexPtr.p->m_tableType = (DictTabInfo::TableType)req->tableType;
indexPtr.p->m_tableId = req->primaryTableId;
- indexPtr.p->m_fragOff = req->fragOff;
indexPtr.p->m_numAttrs = req->noOfAttr;
indexPtr.p->m_storeNullKey = true; // not yet configurable
// allocate attribute descriptors
@@ -128,7 +127,6 @@ Dbtux::execTUXFRAGREQ(Signal* signal)
indexPtr.p->m_state == Index::Defining &&
indexPtr.p->m_tableType == (DictTabInfo::TableType)req->tableType &&
indexPtr.p->m_tableId == req->primaryTableId &&
- indexPtr.p->m_fragOff == req->fragOff &&
indexPtr.p->m_numAttrs == req->noOfAttr);
}
// copy metadata address to each fragment
@@ -203,7 +201,7 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
DescAttr& descAttr = descEnt.m_descAttr[attrId];
descAttr.m_attrDesc = req->attrDescriptor;
descAttr.m_primaryAttrId = req->primaryAttrId;
- descAttr.m_typeId = req->extTypeInfo & 0xFF;
+ descAttr.m_typeId = AttributeDescriptor::getType(req->attrDescriptor);
descAttr.m_charset = (req->extTypeInfo >> 16);
#ifdef VM_TRACE
if (debugFlags & DebugMeta) {
@@ -218,17 +216,15 @@ Dbtux::execTUX_ADD_ATTRREQ(Signal* signal)
errorCode = TuxAddAttrRef::InvalidAttributeType;
break;
}
-#ifdef dbtux_uses_charset
if (descAttr.m_charset != 0) {
- CHARSET_INFO *cs = get_charset(descAttr.m_charset, MYF(0));
- // here use the non-binary type
+ CHARSET_INFO *cs = all_charsets[descAttr.m_charset];
+ ndbrequire(cs != 0);
if (! NdbSqlUtil::usable_in_ordered_index(descAttr.m_typeId, cs)) {
jam();
errorCode = TuxAddAttrRef::InvalidCharset;
break;
}
}
-#endif
const bool lastAttr = (indexPtr.p->m_numAttrs == fragOpPtr.p->m_numAttrsRecvd);
if (ERROR_INSERTED(12003) && fragOpPtr.p->m_fragNo == 0 && attrId == 0 ||
ERROR_INSERTED(12004) && fragOpPtr.p->m_fragNo == 0 && lastAttr ||
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
index 9f9d4cb68e3..68a3e78ce9e 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxNode.cpp
@@ -502,7 +502,7 @@ Dbtux::moveScanList(NodeHandle& node, unsigned pos)
debugOut << "At pos=" << pos << " " << node << endl;
}
#endif
- scanNext(scanPtr);
+ scanNext(scanPtr, true);
ndbrequire(! (scanPos.m_loc == node.m_loc && scanPos.m_pos == pos));
}
scanPtr.i = nextPtrI;
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
index 8677ae741b3..a61b7c1f5ca 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxScan.cpp
@@ -16,6 +16,7 @@
#define DBTUX_SCAN_CPP
#include "Dbtux.hpp"
+#include <my_sys.h>
void
Dbtux::execACC_SCANREQ(Signal* signal)
@@ -34,7 +35,7 @@ Dbtux::execACC_SCANREQ(Signal* signal)
fragPtr.i = RNIL;
for (unsigned i = 0; i < indexPtr.p->m_numFrags; i++) {
jam();
- if (indexPtr.p->m_fragId[i] == req->fragmentNo) {
+ if (indexPtr.p->m_fragId[i] == req->fragmentNo << 1) {
jam();
c_fragPool.getPtr(fragPtr, indexPtr.p->m_fragPtrI[i]);
break;
@@ -43,7 +44,6 @@ Dbtux::execACC_SCANREQ(Signal* signal)
ndbrequire(fragPtr.i != RNIL);
Frag& frag = *fragPtr.p;
// must be normal DIH/TC fragment
- ndbrequire(frag.m_fragId < (1 << frag.m_fragOff));
TreeHead& tree = frag.m_tree;
// check for empty fragment
if (tree.m_root == NullTupLoc) {
@@ -74,18 +74,18 @@ Dbtux::execACC_SCANREQ(Signal* signal)
scanPtr.p->m_savePointId = req->savePointId;
scanPtr.p->m_readCommitted = AccScanReq::getReadCommittedFlag(req->requestInfo);
scanPtr.p->m_lockMode = AccScanReq::getLockMode(req->requestInfo);
- scanPtr.p->m_keyInfo = AccScanReq::getKeyinfoFlag(req->requestInfo);
-#ifdef VM_TRACE
- if (debugFlags & DebugScan) {
- debugOut << "Seize scan " << scanPtr.i << " " << *scanPtr.p << endl;
- }
-#endif
+ scanPtr.p->m_descending = AccScanReq::getDescendingFlag(req->requestInfo);
/*
* readCommitted lockMode keyInfo
* 1 0 0 - read committed (no lock)
* 0 0 0 - read latest (read lock)
* 0 1 1 - read exclusive (write lock)
*/
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Seize scan " << scanPtr.i << " " << *scanPtr.p << endl;
+ }
+#endif
// conf
AccScanConf* const conf = (AccScanConf*)signal->getDataPtrSend();
conf->scanPtr = req->senderData;
@@ -114,50 +114,100 @@ Dbtux::execACC_SCANREQ(Signal* signal)
* keys and that all but possibly last bound is non-strict.
*
* Finally save the sets of lower and upper bounds (i.e. start key and
- * end key). Full bound type (< 4) is included but only the strict bit
- * is used since lower and upper have now been separated.
+ * end key). Full bound type is included but only the strict bit is
+ * used since lower and upper have now been separated.
*/
void
Dbtux::execTUX_BOUND_INFO(Signal* signal)
{
jamEntry();
- struct BoundInfo {
- int type;
- unsigned offset;
- unsigned size;
- };
- TuxBoundInfo* const sig = (TuxBoundInfo*)signal->getDataPtrSend();
- const TuxBoundInfo reqCopy = *(const TuxBoundInfo*)sig;
- const TuxBoundInfo* const req = &reqCopy;
// get records
+ TuxBoundInfo* const sig = (TuxBoundInfo*)signal->getDataPtrSend();
+ const TuxBoundInfo* const req = (const TuxBoundInfo*)sig;
ScanOp& scan = *c_scanOpPool.getPtr(req->tuxScanPtrI);
- Index& index = *c_indexPool.getPtr(scan.m_indexId);
- // collect lower and upper bounds
+ const Index& index = *c_indexPool.getPtr(scan.m_indexId);
+ const DescEnt& descEnt = getDescEnt(index.m_descPage, index.m_descOff);
+ // collect normalized lower and upper bounds
+ struct BoundInfo {
+ int type2; // with EQ -> LE/GE
+ Uint32 offset; // offset in xfrmData
+ Uint32 size;
+ };
BoundInfo boundInfo[2][MaxIndexAttributes];
+ const unsigned dstSize = 1024 * MAX_XFRM_MULTIPLY;
+ Uint32 xfrmData[dstSize];
+ Uint32 dstPos = 0;
// largest attrId seen plus one
Uint32 maxAttrId[2] = { 0, 0 };
- unsigned offset = 0;
- const Uint32* const data = (Uint32*)sig + TuxBoundInfo::SignalLength;
// walk through entries
+ const Uint32* const data = (Uint32*)sig + TuxBoundInfo::SignalLength;
+ Uint32 offset = 0;
while (offset + 2 <= req->boundAiLength) {
jam();
const unsigned type = data[offset];
- if (type > 4) {
- jam();
- scan.m_state = ScanOp::Invalid;
- sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
- return;
- }
const AttributeHeader* ah = (const AttributeHeader*)&data[offset + 1];
const Uint32 attrId = ah->getAttributeId();
const Uint32 dataSize = ah->getDataSize();
- if (attrId >= index.m_numAttrs) {
+ if (type > 4 || attrId >= index.m_numAttrs || dstPos + 2 + dataSize > dstSize) {
jam();
scan.m_state = ScanOp::Invalid;
sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
return;
}
+ // copy header
+ xfrmData[dstPos + 0] = data[offset + 0];
+ xfrmData[dstPos + 1] = data[offset + 1];
+ // copy bound value
+ Uint32 dstWords = 0;
+ if (! ah->isNULL()) {
+ jam();
+ const DescAttr& descAttr = descEnt.m_descAttr[attrId];
+ Uint32 srcBytes = AttributeDescriptor::getSizeInBytes(descAttr.m_attrDesc);
+ Uint32 srcWords = (srcBytes + 3) / 4;
+ if (srcWords != dataSize) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidAttrInfo;
+ return;
+ }
+ uchar* dstPtr = (uchar*)&xfrmData[dstPos + 2];
+ const uchar* srcPtr = (const uchar*)&data[offset + 2];
+ if (descAttr.m_charset == 0) {
+ memcpy(dstPtr, srcPtr, srcWords << 2);
+ dstWords = srcWords;
+ } else {
+ jam();
+ Uint32 typeId = descAttr.m_typeId;
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ if (! ok) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::InvalidCharFormat;
+ return;
+ }
+ CHARSET_INFO* cs = all_charsets[descAttr.m_charset];
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ // see comment in DbtcMain.cpp
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ if (dstLen > ((dstSize - dstPos) << 2)) {
+ jam();
+ scan.m_state = ScanOp::Invalid;
+ sig->errorCode = TuxBoundInfo::TooMuchAttrInfo;
+ return;
+ }
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0) {
+ dstPtr[n++] = 0;
+ }
+ dstWords = n / 4;
+ }
+ }
for (unsigned j = 0; j <= 1; j++) {
+ jam();
// check if lower/upper bit matches
const unsigned luBit = (j << 1);
if ((type & 0x2) != luBit && type != 4)
@@ -166,29 +216,35 @@ Dbtux::execTUX_BOUND_INFO(Signal* signal)
const unsigned type2 = (type & 0x1) | luBit;
// fill in any gap
while (maxAttrId[j] <= attrId) {
+ jam();
BoundInfo& b = boundInfo[j][maxAttrId[j]++];
- b.type = -1;
+ b.type2 = -1;
}
BoundInfo& b = boundInfo[j][attrId];
- if (b.type != -1) {
- // compare with previous bound
- if (b.type != (int)type2 ||
- b.size != 2 + dataSize ||
- memcmp(&data[b.offset + 2], &data[offset + 2], dataSize << 2) != 0) {
+ if (b.type2 != -1) {
+ // compare with previously defined bound
+ if (b.type2 != (int)type2 ||
+ b.size != 2 + dstWords ||
+ memcmp(&xfrmData[b.offset + 2], &xfrmData[dstPos + 2], dstWords << 2) != 0) {
jam();
scan.m_state = ScanOp::Invalid;
sig->errorCode = TuxBoundInfo::InvalidBounds;
return;
}
} else {
+ // fix length
+ AttributeHeader* ah = (AttributeHeader*)&xfrmData[dstPos + 1];
+ ah->setDataSize(dstWords);
// enter new bound
- b.type = type2;
- b.offset = offset;
- b.size = 2 + dataSize;
+ jam();
+ b.type2 = type2;
+ b.offset = dstPos;
+ b.size = 2 + dstWords;
}
}
// jump to next
offset += 2 + dataSize;
+ dstPos += 2 + dstWords;
}
if (offset != req->boundAiLength) {
jam();
@@ -202,13 +258,13 @@ Dbtux::execTUX_BOUND_INFO(Signal* signal)
jam();
const BoundInfo& b = boundInfo[j][i];
// check for gap or strict bound before last
- if (b.type == -1 || (i + 1 < maxAttrId[j] && (b.type & 0x1))) {
+ if (b.type2 == -1 || (i + 1 < maxAttrId[j] && (b.type2 & 0x1))) {
jam();
scan.m_state = ScanOp::Invalid;
sig->errorCode = TuxBoundInfo::InvalidBounds;
return;
}
- bool ok = scan.m_bound[j]->append(&data[b.offset], b.size);
+ bool ok = scan.m_bound[j]->append(&xfrmData[b.offset], b.size);
if (! ok) {
jam();
scan.m_state = ScanOp::Invalid;
@@ -354,7 +410,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
conf->scanPtr = scan.m_userPtr;
conf->accOperationPtr = RNIL; // no tuple returned
- conf->fragId = frag.m_fragId | (ent.m_fragBit << frag.m_fragOff);
+ conf->fragId = frag.m_fragId | ent.m_fragBit;
unsigned signalLength = 3;
// if TC has ordered scan close, it will be detected here
sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF,
@@ -374,7 +430,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
if (scan.m_state == ScanOp::Next) {
jam();
// look for next
- scanNext(scanPtr);
+ scanNext(scanPtr, false);
}
// for reading tuple key in Current or Locked state
Data pkData = c_dataBuffer;
@@ -397,7 +453,7 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
lockReq->userPtr = scanPtr.i;
lockReq->userRef = reference();
lockReq->tableId = scan.m_tableId;
- lockReq->fragId = frag.m_fragId | (ent.m_fragBit << frag.m_fragOff);
+ lockReq->fragId = frag.m_fragId | ent.m_fragBit;
lockReq->fragPtrI = frag.m_accTableFragPtrI[ent.m_fragBit];
const Uint32* const buf32 = static_cast<Uint32*>(pkData);
const Uint64* const buf64 = reinterpret_cast<const Uint64*>(buf32);
@@ -474,13 +530,6 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
jam();
// read keys if not already done (uses signal)
const TreeEnt ent = scan.m_scanEnt;
- if (scan.m_keyInfo) {
- jam();
- if (pkSize == 0) {
- jam();
- readTablePk(frag, ent, pkData, pkSize);
- }
- }
// conf signal
NextScanConf* const conf = (NextScanConf*)signal->getDataPtrSend();
conf->scanPtr = scan.m_userPtr;
@@ -496,21 +545,12 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
accLockOp = (Uint32)-1;
}
conf->accOperationPtr = accLockOp;
- conf->fragId = frag.m_fragId | (ent.m_fragBit << frag.m_fragOff);
+ conf->fragId = frag.m_fragId | ent.m_fragBit;
conf->localKey[0] = getTupAddr(frag, ent);
conf->localKey[1] = 0;
conf->localKeyLength = 1;
unsigned signalLength = 6;
// add key info
- if (scan.m_keyInfo) {
- jam();
- conf->keyLength = pkSize;
- // piggy-back first 4 words of key data
- for (unsigned i = 0; i < 4; i++) {
- conf->key[i] = i < pkSize ? pkData[i] : 0;
- }
- signalLength = 11;
- }
if (! scan.m_readCommitted) {
sendSignal(scan.m_userRef, GSN_NEXT_SCANCONF,
signal, signalLength, JBB);
@@ -518,24 +558,6 @@ Dbtux::execACC_CHECK_SCAN(Signal* signal)
Uint32 blockNo = refToBlock(scan.m_userRef);
EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, signalLength);
}
- // send rest of key data
- if (scan.m_keyInfo && pkSize > 4) {
- unsigned total = 4;
- while (total < pkSize) {
- jam();
- unsigned length = pkSize - total;
- if (length > 20)
- length = 20;
- signal->theData[0] = scan.m_userPtr;
- signal->theData[1] = 0;
- signal->theData[2] = 0;
- signal->theData[3] = length;
- memcpy(&signal->theData[4], &pkData[total], length << 2);
- sendSignal(scan.m_userRef, GSN_ACC_SCAN_INFO24,
- signal, 4 + length, JBB);
- total += length;
- }
- }
// next time look for next entry
scan.m_state = ScanOp::Next;
return;
@@ -687,8 +709,10 @@ Dbtux::scanFirst(ScanOpPtr scanPtr)
TreeHead& tree = frag.m_tree;
// set up index keys for this operation
setKeyAttrs(frag);
- // unpack lower bound into c_dataBuffer
- const ScanBound& bound = *scan.m_bound[0];
+ // scan direction 0, 1
+ const unsigned idir = scan.m_descending;
+ // unpack start key into c_dataBuffer
+ const ScanBound& bound = *scan.m_bound[idir];
ScanBoundIterator iter;
bound.first(iter);
for (unsigned j = 0; j < bound.getSize(); j++) {
@@ -696,11 +720,10 @@ Dbtux::scanFirst(ScanOpPtr scanPtr)
c_dataBuffer[j] = *iter.data;
bound.next(iter);
}
- // search for scan start position
TreePos treePos;
- searchToScan(frag, c_dataBuffer, scan.m_boundCnt[0], treePos);
+ searchToScan(frag, c_dataBuffer, scan.m_boundCnt[idir], scan.m_descending, treePos);
if (treePos.m_loc == NullTupLoc) {
- // empty tree
+ // empty result set
jam();
scan.m_state = ScanOp::Last;
return;
@@ -718,7 +741,8 @@ Dbtux::scanFirst(ScanOpPtr scanPtr)
* Move to next entry. The scan is already linked to some node. When
* we leave, if an entry was found, it will be linked to a possibly
* different node. The scan has a position, and a direction which tells
- * from where we came to this position. This is one of:
+ * from where we came to this position. This is one of (all comments
+ * are in terms of ascending scan):
*
* 0 - up from left child (scan this node next)
* 1 - up from right child (proceed to parent)
@@ -730,7 +754,7 @@ Dbtux::scanFirst(ScanOpPtr scanPtr)
* re-organizations need not worry about scan direction.
*/
void
-Dbtux::scanNext(ScanOpPtr scanPtr)
+Dbtux::scanNext(ScanOpPtr scanPtr, bool fromMaintReq)
{
ScanOp& scan = *scanPtr.p;
Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
@@ -743,8 +767,11 @@ Dbtux::scanNext(ScanOpPtr scanPtr)
ndbrequire(scan.m_state != ScanOp::Locked);
// set up index keys for this operation
setKeyAttrs(frag);
- // unpack upper bound into c_dataBuffer
- const ScanBound& bound = *scan.m_bound[1];
+ // scan direction
+ const unsigned idir = scan.m_descending; // 0, 1
+ const int jdir = 1 - 2 * (int)idir; // 1, -1
+ // unpack end key into c_dataBuffer
+ const ScanBound& bound = *scan.m_bound[1 - idir];
ScanBoundIterator iter;
bound.first(iter);
for (unsigned j = 0; j < bound.getSize(); j++) {
@@ -764,6 +791,11 @@ Dbtux::scanNext(ScanOpPtr scanPtr)
TreeEnt ent;
while (true) {
jam();
+#ifdef VM_TRACE
+ if (debugFlags & DebugScan) {
+ debugOut << "Scan next pos " << pos << " " << node << endl;
+ }
+#endif
if (pos.m_dir == 2) {
// coming up from root ends the scan
jam();
@@ -778,7 +810,7 @@ Dbtux::scanNext(ScanOpPtr scanPtr)
if (pos.m_dir == 4) {
// coming down from parent proceed to left child
jam();
- TupLoc loc = node.getLink(0);
+ TupLoc loc = node.getLink(idir);
if (loc != NullTupLoc) {
jam();
pos.m_loc = loc;
@@ -786,34 +818,42 @@ Dbtux::scanNext(ScanOpPtr scanPtr)
continue;
}
// pretend we came from left child
- pos.m_dir = 0;
+ pos.m_dir = idir;
+ }
+ const unsigned occup = node.getOccup();
+ if (occup == 0) {
+ jam();
+ ndbrequire(fromMaintReq);
+ // move back to parent - see comment in treeRemoveInner
+ pos.m_loc = node.getLink(2);
+ pos.m_dir = node.getSide();
+ continue;
}
- if (pos.m_dir == 0) {
+ if (pos.m_dir == idir) {
// coming up from left child scan current node
jam();
- pos.m_pos = 0;
+ pos.m_pos = idir == 0 ? 0 : occup - 1;
pos.m_match = false;
pos.m_dir = 3;
}
if (pos.m_dir == 3) {
// within node
jam();
- unsigned occup = node.getOccup();
- ndbrequire(occup >= 1);
// advance position
if (! pos.m_match)
pos.m_match = true;
else
- pos.m_pos++;
+ // becomes ZNIL (which is > occup) if 0 and scan descending
+ pos.m_pos += jdir;
if (pos.m_pos < occup) {
jam();
ent = node.getEnt(pos.m_pos);
pos.m_dir = 3; // unchanged
// read and compare all attributes
readKeyAttrs(frag, ent, 0, c_entryKey);
- int ret = cmpScanBound(frag, 1, c_dataBuffer, scan.m_boundCnt[1], c_entryKey);
+ int ret = cmpScanBound(frag, 1 - idir, c_dataBuffer, scan.m_boundCnt[1 - idir], c_entryKey);
ndbrequire(ret != NdbSqlUtil::CmpUnknown);
- if (ret < 0) {
+ if (jdir * ret < 0) {
jam();
// hit upper bound of single range scan
pos.m_loc = NullTupLoc;
@@ -830,7 +870,7 @@ Dbtux::scanNext(ScanOpPtr scanPtr)
break;
}
// after node proceed to right child
- TupLoc loc = node.getLink(1);
+ TupLoc loc = node.getLink(1 - idir);
if (loc != NullTupLoc) {
jam();
pos.m_loc = loc;
@@ -838,9 +878,9 @@ Dbtux::scanNext(ScanOpPtr scanPtr)
continue;
}
// pretend we came from right child
- pos.m_dir = 1;
+ pos.m_dir = 1 - idir;
}
- if (pos.m_dir == 1) {
+ if (pos.m_dir == 1 - idir) {
// coming up from right child proceed to parent
jam();
pos.m_loc = node.getLink(2);
@@ -890,7 +930,7 @@ Dbtux::scanVisible(ScanOpPtr scanPtr, TreeEnt ent)
const Frag& frag = *c_fragPool.getPtr(scan.m_fragPtrI);
Uint32 fragBit = ent.m_fragBit;
Uint32 tableFragPtrI = frag.m_tupTableFragPtrI[fragBit];
- Uint32 fragId = frag.m_fragId | (fragBit << frag.m_fragOff);
+ Uint32 fragId = frag.m_fragId | fragBit;
Uint32 tupAddr = getTupAddr(frag, ent);
Uint32 tupVersion = ent.m_tupVersion;
// check for same tuple twice in row
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
index 7057d74c3ad..b0e2a664bfd 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxSearch.cpp
@@ -253,22 +253,33 @@ Dbtux::searchToRemove(Frag& frag, ConstData searchKey, TreeEnt searchEnt, TreePo
/*
* Search for scan start position.
*
- * Similar to searchToAdd.
+ * Similar to searchToAdd. The routines differ somewhat depending on
+ * scan direction and are done by separate methods.
*/
void
-Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, bool descending, TreePos& treePos)
{
const TreeHead& tree = frag.m_tree;
- NodeHandle currNode(frag);
- currNode.m_loc = tree.m_root;
- if (currNode.m_loc == NullTupLoc) {
- // empty tree
- jam();
- treePos.m_match = false;
+ if (tree.m_root != NullTupLoc) {
+ if (! descending)
+ searchToScanAscending(frag, boundInfo, boundCount, treePos);
+ else
+ searchToScanDescending(frag, boundInfo, boundCount, treePos);
return;
}
+ // empty tree
+}
+
+void
+Dbtux::searchToScanAscending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
NodeHandle glbNode(frag); // potential g.l.b of final node
NodeHandle bottomNode(frag);
+ // always before entry
+ treePos.m_match = false;
while (true) {
jam();
selectNode(currNode, currNode.m_loc);
@@ -283,6 +294,7 @@ Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePo
ndbrequire(ret != NdbSqlUtil::CmpUnknown);
}
if (ret < 0) {
+ // bound is left of this node
jam();
const TupLoc loc = currNode.getLink(0);
if (loc != NullTupLoc) {
@@ -300,11 +312,11 @@ Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePo
// start scanning this node
treePos.m_loc = currNode.m_loc;
treePos.m_pos = 0;
- treePos.m_match = false;
treePos.m_dir = 3;
return;
}
} else if (ret > 0) {
+ // bound is at or right of this node
jam();
const TupLoc loc = currNode.getLink(1);
if (loc != NullTupLoc) {
@@ -316,7 +328,7 @@ Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePo
continue;
}
} else {
- ndbassert(false);
+ ndbrequire(false);
}
break;
}
@@ -328,20 +340,19 @@ Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePo
ret = cmpScanBound(frag, 0, boundInfo, boundCount, c_entryKey);
ndbrequire(ret != NdbSqlUtil::CmpUnknown);
if (ret < 0) {
- // start scanning from current entry
+ // found first entry satisfying the bound
treePos.m_loc = currNode.m_loc;
treePos.m_pos = j;
- treePos.m_match = false;
treePos.m_dir = 3;
return;
}
}
+ // bound is to right of this node
if (! bottomNode.isNull()) {
jam();
// start scanning the l.u.b
treePos.m_loc = bottomNode.m_loc;
treePos.m_pos = 0;
- treePos.m_match = false;
treePos.m_dir = 3;
return;
}
@@ -349,3 +360,90 @@ Dbtux::searchToScan(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePo
treePos.m_loc = currNode.m_loc;
treePos.m_dir = 1;
}
+
+void
+Dbtux::searchToScanDescending(Frag& frag, ConstData boundInfo, unsigned boundCount, TreePos& treePos)
+{
+ const TreeHead& tree = frag.m_tree;
+ NodeHandle currNode(frag);
+ currNode.m_loc = tree.m_root;
+ NodeHandle glbNode(frag); // potential g.l.b of final node
+ NodeHandle bottomNode(frag);
+ // always before entry
+ treePos.m_match = false;
+ while (true) {
+ jam();
+ selectNode(currNode, currNode.m_loc);
+ int ret;
+ // compare prefix
+ ret = cmpScanBound(frag, 1, boundInfo, boundCount, currNode.getPref(), tree.m_prefSize);
+ if (ret == NdbSqlUtil::CmpUnknown) {
+ jam();
+ // read and compare all attributes
+ readKeyAttrs(frag, currNode.getMinMax(0), 0, c_entryKey);
+ ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ }
+ if (ret < 0) {
+ // bound is left of this node
+ jam();
+ const TupLoc loc = currNode.getLink(0);
+ if (loc != NullTupLoc) {
+ jam();
+ // continue to left subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ if (! glbNode.isNull()) {
+ jam();
+ // move up to the g.l.b but remember the bottom node
+ bottomNode = currNode;
+ currNode = glbNode;
+ } else {
+ // empty result set
+ return;
+ }
+ } else if (ret > 0) {
+ // bound is at or right of this node
+ jam();
+ const TupLoc loc = currNode.getLink(1);
+ if (loc != NullTupLoc) {
+ jam();
+ // save potential g.l.b
+ glbNode = currNode;
+ // continue to right subtree
+ currNode.m_loc = loc;
+ continue;
+ }
+ } else {
+ ndbrequire(false);
+ }
+ break;
+ }
+ for (unsigned j = 0, occup = currNode.getOccup(); j < occup; j++) {
+ jam();
+ int ret;
+ // read and compare attributes
+ readKeyAttrs(frag, currNode.getEnt(j), 0, c_entryKey);
+ ret = cmpScanBound(frag, 1, boundInfo, boundCount, c_entryKey);
+ ndbrequire(ret != NdbSqlUtil::CmpUnknown);
+ if (ret < 0) {
+ if (j > 0) {
+ // start scanning from previous entry
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = j - 1;
+ treePos.m_dir = 3;
+ return;
+ }
+ // start scanning upwards (pretend we came from left child)
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = 0;
+ treePos.m_dir = 0;
+ return;
+ }
+ }
+ // start scanning this node
+ treePos.m_loc = currNode.m_loc;
+ treePos.m_pos = currNode.getOccup() - 1;
+ treePos.m_dir = 3;
+}
diff --git a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
index b9e3b593a00..5107a8d8e31 100644
--- a/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
+++ b/ndb/src/kernel/blocks/dbtux/DbtuxTree.cpp
@@ -226,6 +226,9 @@ Dbtux::treeRemoveInner(Frag& frag, NodeHandle lubNode, unsigned pos)
// borrow max entry from semi/leaf
Uint32 scanList = RNIL;
nodePopDown(glbNode, glbNode.getOccup() - 1, ent, &scanList);
+ // g.l.b may be empty now
+ // a descending scan may try to enter the empty g.l.b
+ // we prevent this in scanNext
nodePopUp(lubNode, pos, ent, scanList);
if (glbNode.getLink(0) != NullTupLoc) {
jam();
diff --git a/ndb/src/kernel/blocks/dbtux/Times.txt b/ndb/src/kernel/blocks/dbtux/Times.txt
index 1e6d0a0a329..68120084846 100644
--- a/ndb/src/kernel/blocks/dbtux/Times.txt
+++ b/ndb/src/kernel/blocks/dbtux/Times.txt
@@ -138,6 +138,14 @@ before mc02/c 5 ms 13 ms 126 pct
after mc02/c 5 ms 10 ms 70 pct
mc02/d 178 ms 242 ms 69 pct
-[ prelim preformance fix for max batch size 16 -> 992 ]
+[ prelim performance fix for max batch size 16 -> 992 ]
+
+wl-2066 mc02/c 5 ms 10 ms 87 pct
+before mc02/d 140 ms 237 ms 69 pct
+
+wl-2066 mc02/c 5 ms 10 ms 69 pct
+after mc02/d 150 ms 229 ms 52 pct
+
+[ wl-2066 = remove ACC storage, use TUX test to see effect ]
vim: set et:
diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
index e3ec1f9723e..e50e6bd242b 100644
--- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
+++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp
@@ -1458,7 +1458,7 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal)
while(!allFailed.isclear()){
nodeId = allFailed.find(nodeId + 1);
allFailed.clear(nodeId);
- signal->theData[0] = EventReport::NODE_FAILREP;
+ signal->theData[0] = NDB_LE_NODE_FAILREP;
signal->theData[1] = nodeId;
signal->theData[2] = 0;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -1574,13 +1574,9 @@ void Ndbcntr::createSystableLab(Signal* signal, unsigned index)
ndbassert(column.pos == i);
w.add(DictTabInfo::AttributeName, column.name);
w.add(DictTabInfo::AttributeId, (Uint32)column.pos);
- //w.add(DictTabInfo::AttributeType, DictTabInfo::UnSignedType);
- //w.add(DictTabInfo::AttributeSize, DictTabInfo::a32Bit);
- //w.add(DictTabInfo::AttributeArraySize, 1);
w.add(DictTabInfo::AttributeKeyFlag, (Uint32)column.keyFlag);
//w.add(DictTabInfo::AttributeStorage, (Uint32)DictTabInfo::MainMemory);
w.add(DictTabInfo::AttributeNullableFlag, (Uint32)column.nullable);
- // ext type overrides
w.add(DictTabInfo::AttributeExtType, (Uint32)column.type);
w.add(DictTabInfo::AttributeExtLength, (Uint32)column.length);
w.add(DictTabInfo::AttributeEnd, (Uint32)true);
@@ -1967,7 +1963,7 @@ Ndbcntr::execRESUME_REQ(Signal* signal){
jamEntry();
- signal->theData[0] = EventReport::SingleUser;
+ signal->theData[0] = NDB_LE_SingleUser;
signal->theData[1] = 2;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -2046,13 +2042,13 @@ Ndbcntr::execSTOP_REQ(Signal* signal){
jam();
return;
}
- signal->theData[0] = EventReport::NDBStopStarted;
+ signal->theData[0] = NDB_LE_NDBStopStarted;
signal->theData[1] = StopReq::getSystemStop(c_stopRec.stopReq.requestInfo) ? 1 : 0;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}
else
{
- signal->theData[0] = EventReport::SingleUser;
+ signal->theData[0] = NDB_LE_SingleUser;
signal->theData[1] = 0;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
}
@@ -2144,7 +2140,7 @@ Ndbcntr::StopRecord::checkNodeFail(Signal* signal){
cntr.updateNodeState(signal, newState);
}
- signal->theData[0] = EventReport::NDBStopAborted;
+ signal->theData[0] = NDB_LE_NDBStopAborted;
cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 1, JBB);
return false;
@@ -2252,7 +2248,7 @@ void Ndbcntr::execABORT_ALL_CONF(Signal* signal){
c_stopRec.stopReq.senderRef = 0; // the command is done
- signal->theData[0] = EventReport::SingleUser;
+ signal->theData[0] = NDB_LE_SingleUser;
signal->theData[1] = 1;
signal->theData[2] = c_stopRec.stopReq.singleUserApi;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
@@ -2440,7 +2436,7 @@ Ndbcntr::execFSREMOVECONF(Signal* signal){
}
void Ndbcntr::Missra::execSTART_ORD(Signal* signal){
- signal->theData[0] = EventReport::NDBStartStarted;
+ signal->theData[0] = NDB_LE_NDBStartStarted;
signal->theData[1] = NDB_VERSION;
cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -2570,14 +2566,14 @@ void Ndbcntr::Missra::sendNextSTTOR(Signal* signal){
* At least one wanted this start phase, report it
*/
jam();
- signal->theData[0] = EventReport::StartPhaseCompleted;
+ signal->theData[0] = NDB_LE_StartPhaseCompleted;
signal->theData[1] = currentStartPhase;
signal->theData[2] = cntr.ctypeOfStart;
cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
}
}
- signal->theData[0] = EventReport::NDBStartCompleted;
+ signal->theData[0] = NDB_LE_NDBStartCompleted;
signal->theData[1] = NDB_VERSION;
cntr.sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
index c0ffa722f1c..45073b63a5d 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp
@@ -96,6 +96,7 @@ AsyncFile::AsyncFile() :
theReportTo(0),
theMemoryChannelPtr(NULL)
{
+ m_current_request= m_last_request= 0;
}
void
@@ -177,6 +178,7 @@ AsyncFile::run()
endReq();
return;
}//if
+ m_current_request= request;
switch (request->action) {
case Request:: open:
openReq(request);
@@ -226,6 +228,8 @@ AsyncFile::run()
abort();
break;
}//switch
+ m_last_request= request;
+ m_current_request= 0;
// No need to signal as ndbfs only uses tryRead
theReportTo->writeChannelNoSignal(request);
@@ -1033,3 +1037,60 @@ void printErrorAndFlags(Uint32 used_flags) {
}
#endif
+
+NdbOut&
+operator<<(NdbOut& out, const Request& req)
+{
+ out << "[ Request: file: " << hex << req.file
+ << " userRef: " << hex << req.theUserReference
+ << " userData: " << dec << req.theUserPointer
+ << " theFilePointer: " << req.theFilePointer
+ << " action: ";
+ switch(req.action){
+ case Request::open:
+ out << "open";
+ break;
+ case Request::close:
+ out << "close";
+ break;
+ case Request::closeRemove:
+ out << "closeRemove";
+ break;
+ case Request::read: // Allways leave readv directly after
+ out << "read";
+ break;
+ case Request::readv:
+ out << "readv";
+ break;
+ case Request::write:// Allways leave writev directly after
+ out << "write";
+ break;
+ case Request::writev:
+ out << "writev";
+ break;
+ case Request::writeSync:// Allways leave writevSync directly after
+ out << "writeSync";
+ break;
+ // writeSync because SimblockAsyncFileSystem depends on it
+ case Request::writevSync:
+ out << "writevSync";
+ break;
+ case Request::sync:
+ out << "sync";
+ break;
+ case Request::end:
+ out << "end";
+ break;
+ case Request::append:
+ out << "append";
+ break;
+ case Request::rmrf:
+ out << "rmrf";
+ break;
+ default:
+ out << (Uint32)req.action;
+ break;
+ }
+ out << " ]";
+ return out;
+}
diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
index 2176c93c5d5..997bf40fe2a 100644
--- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.hpp
@@ -160,6 +160,7 @@ public:
Uint32 theTrace;
};
+NdbOut& operator <<(NdbOut&, const Request&);
inline
void
@@ -173,6 +174,7 @@ Request::set(BlockReference userReference,
class AsyncFile
{
+ friend class Ndbfs;
public:
AsyncFile();
~AsyncFile();
@@ -188,6 +190,7 @@ public:
bool isOpen();
Filename theFileName;
+ Request *m_current_request, *m_last_request;
private:
void openReq(Request *request);
diff --git a/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp b/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
index 349cccdbcb4..460ad3f614a 100644
--- a/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/CircularIndex.hpp
@@ -68,7 +68,7 @@ class CircularIndex
{
public:
inline CircularIndex( int start= 0,int size=256 );
- operator int ();
+ operator int () const;
CircularIndex& operator ++ ();
friend int full( const CircularIndex& write, const CircularIndex& read );
friend int empty( const CircularIndex& write, const CircularIndex& read );
@@ -77,7 +77,7 @@ private:
int theIndex;
};
-inline CircularIndex::operator int ()
+inline CircularIndex::operator int () const
{
return theIndex;
}
diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp b/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
index 6bb9684f3ca..9037bbad765 100644
--- a/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
+++ b/ndb/src/kernel/blocks/ndbfs/MemoryChannel.hpp
@@ -97,8 +97,20 @@ private:
NdbMutex* theMutexPtr;
NdbCondition* theConditionPtr;
+ template<class U>
+ friend NdbOut& operator<<(NdbOut& out, const MemoryChannel<U> & chn);
};
+template <class T>
+NdbOut& operator<<(NdbOut& out, const MemoryChannel<T> & chn)
+{
+ NdbMutex_Lock(chn.theMutexPtr);
+ out << "[ theSize: " << chn.theSize
+ << " theReadIndex: " << (int)chn.theReadIndex
+ << " theWriteIndex: " << (int)chn.theWriteIndex << " ]";
+ NdbMutex_Unlock(chn.theMutexPtr);
+ return out;
+}
template <class T> MemoryChannel<T>::MemoryChannel( int size):
theSize(size),
diff --git a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
index 6f848d7fe16..d6b19c8f872 100644
--- a/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
+++ b/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp
@@ -1006,6 +1006,30 @@ Ndbfs::execDUMP_STATE_ORD(Signal* signal)
}
return;
}
+
+ if(signal->theData[0] == 404)
+ {
+ ndbrequire(signal->getLength() == 2);
+ Uint32 file= signal->theData[1];
+ AsyncFile* openFile = theOpenFiles.find(file);
+ ndbrequire(openFile);
+ ndbout_c("File: %s %p", openFile->theFileName.c_str(), openFile);
+ Request* curr = openFile->m_current_request;
+ Request* last = openFile->m_last_request;
+ if(curr)
+ ndbout << "Current request: " << *curr << endl;
+ if(last)
+ ndbout << "Last request: " << *last << endl;
+
+ ndbout << "theReportTo " << *openFile->theReportTo << endl;
+ ndbout << "theMemoryChannelPtr" << *openFile->theMemoryChannelPtr << endl;
+
+ ndbout << "All files: " << endl;
+ for (unsigned i = 0; i < theFiles.size(); i++){
+ AsyncFile* file = theFiles[i];
+ ndbout_c("%2d (0x%x): %s", i,file, file->isOpen()?"OPEN":"CLOSED");
+ }
+ }
}//Ndbfs::execDUMP_STATE_ORD()
@@ -1016,3 +1040,4 @@ template class Vector<AsyncFile*>;
template class Vector<OpenFiles::OpenFileItem>;
template class MemoryChannel<Request>;
template class Pool<Request>;
+template NdbOut& operator<<(NdbOut&, const MemoryChannel<Request>&);
diff --git a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
index f6fafdae594..f84fae02fc4 100644
--- a/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
+++ b/ndb/src/kernel/blocks/qmgr/Qmgr.hpp
@@ -118,8 +118,7 @@ public:
struct NodeRec {
UintR ndynamicId;
Phase phase;
- UintR alarmCount;
-
+
QmgrState sendPrepFailReqStatus;
QmgrState sendCommitFailReqStatus;
QmgrState sendPresToStatus;
@@ -304,7 +303,7 @@ private:
void stateArbitChoose(Signal* signal);
void stateArbitCrash(Signal* signal);
void computeArbitNdbMask(NodeBitmask& aMask);
- void reportArbitEvent(Signal* signal, EventReport::EventType type);
+ void reportArbitEvent(Signal* signal, Ndb_logevent_type type);
// Initialisation
void initData();
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
index 43d8f0971ed..4061455092d 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp
@@ -35,6 +35,9 @@ void Qmgr::initData()
Uint32 hbDBAPI = 500;
setHbApiDelay(hbDBAPI);
+
+ c_connectedNodes.clear();
+ c_connectedNodes.set(getOwnNodeId());
}//Qmgr::initData()
void Qmgr::initRecords()
diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
index 6095895e7c2..0f736c54555 100644
--- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
+++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp
@@ -67,7 +67,7 @@ void Qmgr::execCM_HEARTBEAT(Signal* signal)
jamEntry();
hbNodePtr.i = signal->theData[0];
ptrCheckGuard(hbNodePtr, MAX_NDB_NODES, nodeRec);
- hbNodePtr.p->alarmCount = 0;
+ setNodeInfo(hbNodePtr.i).m_heartbeat_cnt= 0;
return;
}//Qmgr::execCM_HEARTBEAT()
@@ -609,7 +609,7 @@ void Qmgr::execCM_REGCONF(Signal* signal)
// Send this as an EVENT REPORT to inform about hearing about
// other NDB node proclaiming to be president.
/*--------------------------------------------------------------*/
- signal->theData[0] = EventReport::CM_REGCONF;
+ signal->theData[0] = NDB_LE_CM_REGCONF;
signal->theData[1] = getOwnNodeId();
signal->theData[2] = cpresident;
signal->theData[3] = TdynamicId;
@@ -735,7 +735,7 @@ void Qmgr::execCM_REGREF(Signal* signal)
// Send this as an EVENT REPORT to inform about hearing about
// other NDB node proclaiming not to be president.
/*--------------------------------------------------------------*/
- signal->theData[0] = EventReport::CM_REGREF;
+ signal->theData[0] = NDB_LE_CM_REGREF;
signal->theData[1] = getOwnNodeId();
signal->theData[2] = TaddNodeno;
//-----------------------------------------
@@ -1041,7 +1041,7 @@ void Qmgr::execCM_ADD(Signal* signal)
jam();
ndbrequire(addNodePtr.p->phase == ZSTARTING);
addNodePtr.p->phase = ZRUNNING;
- addNodePtr.p->alarmCount = 0;
+ setNodeInfo(addNodePtr.i).m_heartbeat_cnt= 0;
c_clusterNodes.set(addNodePtr.i);
findNeighbours(signal);
@@ -1079,7 +1079,7 @@ Qmgr::joinedCluster(Signal* signal, NodeRecPtr nodePtr){
* NODES IN THE CLUSTER.
*/
nodePtr.p->phase = ZRUNNING;
- nodePtr.p->alarmCount = 0;
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
findNeighbours(signal);
c_clusterNodes.set(nodePtr.i);
c_start.reset();
@@ -1300,11 +1300,11 @@ void Qmgr::findNeighbours(Signal* signal)
*---------------------------------------------------------------------*/
fnNodePtr.i = cneighbourl;
ptrCheckGuard(fnNodePtr, MAX_NDB_NODES, nodeRec);
- fnNodePtr.p->alarmCount = 0;
+ setNodeInfo(fnNodePtr.i).m_heartbeat_cnt= 0;
}//if
}//if
- signal->theData[0] = EventReport::FIND_NEIGHBOURS;
+ signal->theData[0] = NDB_LE_FIND_NEIGHBOURS;
signal->theData[1] = getOwnNodeId();
signal->theData[2] = cneighbourl;
signal->theData[3] = cneighbourh;
@@ -1348,8 +1348,8 @@ void Qmgr::initData(Signal* signal)
} else {
nodePtr.p->phase = ZAPI_INACTIVE;
}
-
- nodePtr.p->alarmCount = 0;
+
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
nodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
nodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
nodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
@@ -1530,7 +1530,7 @@ void Qmgr::sendHeartbeat(Signal* signal)
sendSignal(localNodePtr.p->blockRef, GSN_CM_HEARTBEAT, signal, 1, JBA);
#ifdef VM_TRACE
- signal->theData[0] = EventReport::SentHeartbeat;
+ signal->theData[0] = NDB_LE_SentHeartbeat;
signal->theData[1] = localNodePtr.i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
#endif
@@ -1551,24 +1551,24 @@ void Qmgr::checkHeartbeat(Signal* signal)
}//if
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
- nodePtr.p->alarmCount ++;
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt++;
ndbrequire(nodePtr.p->phase == ZRUNNING);
ndbrequire(getNodeInfo(nodePtr.i).m_type == NodeInfo::DB);
- if(nodePtr.p->alarmCount > 2){
- signal->theData[0] = EventReport::MissedHeartbeat;
+ if(getNodeInfo(nodePtr.i).m_heartbeat_cnt > 2){
+ signal->theData[0] = NDB_LE_MissedHeartbeat;
signal->theData[1] = nodePtr.i;
- signal->theData[2] = nodePtr.p->alarmCount - 1;
+ signal->theData[2] = getNodeInfo(nodePtr.i).m_heartbeat_cnt - 1;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
}
- if (nodePtr.p->alarmCount > 4) {
+ if (getNodeInfo(nodePtr.i).m_heartbeat_cnt > 4) {
jam();
/**----------------------------------------------------------------------
* OUR LEFT NEIGHBOUR HAVE KEPT QUIET FOR THREE CONSECUTIVE HEARTBEAT
* PERIODS. THUS WE DECLARE HIM DOWN.
*----------------------------------------------------------------------*/
- signal->theData[0] = EventReport::DeadDueToHeartbeat;
+ signal->theData[0] = NDB_LE_DeadDueToHeartbeat;
signal->theData[1] = nodePtr.i;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -1594,16 +1594,16 @@ void Qmgr::apiHbHandlingLab(Signal* signal)
if (TnodePtr.p->phase == ZAPI_ACTIVE){
jam();
- TnodePtr.p->alarmCount ++;
+ setNodeInfo(TnodePtr.i).m_heartbeat_cnt++;
- if(TnodePtr.p->alarmCount > 2){
- signal->theData[0] = EventReport::MissedHeartbeat;
+ if(getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 2){
+ signal->theData[0] = NDB_LE_MissedHeartbeat;
signal->theData[1] = nodeId;
- signal->theData[2] = TnodePtr.p->alarmCount - 1;
+ signal->theData[2] = getNodeInfo(TnodePtr.i).m_heartbeat_cnt - 1;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3, JBB);
}
- if (TnodePtr.p->alarmCount > 4) {
+ if (getNodeInfo(TnodePtr.i).m_heartbeat_cnt > 4) {
jam();
/*------------------------------------------------------------------*/
/* THE API NODE HAS NOT SENT ANY HEARTBEAT FOR THREE SECONDS.
@@ -1612,7 +1612,7 @@ void Qmgr::apiHbHandlingLab(Signal* signal)
/*------------------------------------------------------------------*/
/* We call node_failed to release all connections for this api node */
/*------------------------------------------------------------------*/
- signal->theData[0] = EventReport::DeadDueToHeartbeat;
+ signal->theData[0] = NDB_LE_DeadDueToHeartbeat;
signal->theData[1] = nodeId;
sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB);
@@ -1635,16 +1635,17 @@ void Qmgr::checkStartInterface(Signal* signal)
ptrAss(nodePtr, nodeRec);
if (nodePtr.p->phase == ZFAIL_CLOSING) {
jam();
- nodePtr.p->alarmCount = nodePtr.p->alarmCount + 1;
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt++;
if (c_connectedNodes.get(nodePtr.i)){
jam();
/*-------------------------------------------------------------------*/
// We need to ensure that the connection is not restored until it has
// been disconnected for at least three seconds.
/*-------------------------------------------------------------------*/
- nodePtr.p->alarmCount = 0;
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
}//if
- if ((nodePtr.p->alarmCount > 3) && (nodePtr.p->failState == NORMAL)) {
+ if ((getNodeInfo(nodePtr.i).m_heartbeat_cnt > 3)
+ && (nodePtr.p->failState == NORMAL)) {
/**------------------------------------------------------------------
* WE HAVE DISCONNECTED THREE SECONDS AGO. WE ARE NOW READY TO
* CONNECT AGAIN AND ACCEPT NEW REGISTRATIONS FROM THIS NODE.
@@ -1660,18 +1661,18 @@ void Qmgr::checkStartInterface(Signal* signal)
nodePtr.p->phase = ZINIT;
}//if
- nodePtr.p->alarmCount = 0;
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
signal->theData[0] = 0;
signal->theData[1] = nodePtr.i;
sendSignal(CMVMI_REF, GSN_OPEN_COMREQ, signal, 2, JBA);
} else {
- if(((nodePtr.p->alarmCount + 1) % 60) == 0){
+ if(((getNodeInfo(nodePtr.i).m_heartbeat_cnt + 1) % 60) == 0){
char buf[100];
BaseString::snprintf(buf, sizeof(buf),
"Failure handling of node %d has not completed in %d min."
" - state = %d",
nodePtr.i,
- (nodePtr.p->alarmCount + 1)/60,
+ (getNodeInfo(nodePtr.i).m_heartbeat_cnt + 1)/60,
nodePtr.p->failState);
warningEvent(buf);
}
@@ -1709,7 +1710,7 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo)
* WE ONLY NEED TO SET PARAMETERS TO ENABLE A NEW CONNECTION IN A FEW
* SECONDS.
*-------------------------------------------------------------------------*/
- failedNodePtr.p->alarmCount = 0;
+ setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
CloseComReqConf * const closeCom = (CloseComReqConf *)&signal->theData[0];
@@ -1862,7 +1863,7 @@ void Qmgr::node_failed(Signal* signal, Uint16 aFailedNode)
/*---------------------------------------------------------------------*/
failedNodePtr.p->failState = NORMAL;
failedNodePtr.p->phase = ZFAIL_CLOSING;
- failedNodePtr.p->alarmCount = 0;
+ setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
CloseComReqConf * const closeCom =
(CloseComReqConf *)&signal->theData[0];
@@ -1956,8 +1957,8 @@ void Qmgr::execAPI_REGREQ(Signal* signal)
}
setNodeInfo(apiNodePtr.i).m_version = version;
-
- apiNodePtr.p->alarmCount = 0;
+
+ setNodeInfo(apiNodePtr.i).m_heartbeat_cnt= 0;
ApiRegConf * const apiRegConf = (ApiRegConf *)&signal->theData[0];
apiRegConf->qmgrRef = reference();
@@ -2477,7 +2478,7 @@ void Qmgr::execCOMMIT_FAILREQ(Signal* signal)
ptrCheckGuard(nodePtr, MAX_NDB_NODES, nodeRec);
nodePtr.p->phase = ZFAIL_CLOSING;
nodePtr.p->failState = WAITING_FOR_NDB_FAILCONF;
- nodePtr.p->alarmCount = 0;
+ setNodeInfo(nodePtr.i).m_heartbeat_cnt= 0;
c_clusterNodes.clear(nodePtr.i);
}//for
/*----------------------------------------------------------------------*/
@@ -2735,7 +2736,7 @@ void Qmgr::failReport(Signal* signal,
failedNodePtr.p->sendPrepFailReqStatus = Q_NOT_ACTIVE;
failedNodePtr.p->sendCommitFailReqStatus = Q_NOT_ACTIVE;
failedNodePtr.p->sendPresToStatus = Q_NOT_ACTIVE;
- failedNodePtr.p->alarmCount = 0;
+ setNodeInfo(failedNodePtr.i).m_heartbeat_cnt= 0;
if (aSendFailRep == ZTRUE) {
jam();
if (failedNodePtr.i != getOwnNodeId()) {
@@ -3059,7 +3060,7 @@ Qmgr::handleArbitApiFail(Signal* signal, Uint16 nodeId)
jam();
return;
}
- reportArbitEvent(signal, EventReport::ArbitState);
+ reportArbitEvent(signal, NDB_LE_ArbitState);
arbitRec.node = 0;
switch (arbitRec.state) {
case ARBIT_NULL: // should not happen
@@ -3230,7 +3231,7 @@ Qmgr::handleArbitCheck(Signal* signal)
arbitRec.newstate = true;
break;
}
- reportArbitEvent(signal, EventReport::ArbitResult);
+ reportArbitEvent(signal, NDB_LE_ArbitResult);
switch (arbitRec.state) {
default:
jam();
@@ -3257,7 +3258,7 @@ Qmgr::startArbitThread(Signal* signal)
jam();
ndbrequire(cpresident == getOwnNodeId());
arbitRec.code = ArbitCode::ThreadStart;
- reportArbitEvent(signal, EventReport::ArbitState);
+ reportArbitEvent(signal, NDB_LE_ArbitState);
signal->theData[1] = ++arbitRec.thread;
runArbitThread(signal);
}
@@ -3501,7 +3502,7 @@ Qmgr::execARBIT_PREPREQ(Signal* signal)
arbitRec.node = sd->node;
arbitRec.ticket = sd->ticket;
arbitRec.code = sd->code;
- reportArbitEvent(signal, EventReport::ArbitState);
+ reportArbitEvent(signal, NDB_LE_ArbitState);
arbitRec.state = ARBIT_RUN;
arbitRec.newstate = true;
if (sd->code == ArbitCode::PrepAtrun) {
@@ -3588,7 +3589,7 @@ Qmgr::stateArbitStart(Signal* signal)
}
if (arbitRec.recvCount) {
jam();
- reportArbitEvent(signal, EventReport::ArbitState);
+ reportArbitEvent(signal, NDB_LE_ArbitState);
if (arbitRec.code == ArbitCode::ApiStart) {
jam();
arbitRec.state = ARBIT_RUN;
@@ -3602,7 +3603,7 @@ Qmgr::stateArbitStart(Signal* signal)
if (arbitRec.getTimediff() > getArbitTimeout()) {
jam();
arbitRec.code = ArbitCode::ErrTimeout;
- reportArbitEvent(signal, EventReport::ArbitState);
+ reportArbitEvent(signal, NDB_LE_ArbitState);
arbitRec.state = ARBIT_INIT;
arbitRec.newstate = true;
return;
@@ -3709,7 +3710,7 @@ Qmgr::stateArbitChoose(Signal* signal)
}
if (arbitRec.recvCount) {
jam();
- reportArbitEvent(signal, EventReport::ArbitResult);
+ reportArbitEvent(signal, NDB_LE_ArbitResult);
if (arbitRec.code == ArbitCode::WinChoose) {
jam();
sendCommitFailReq(signal); // start commit of failed nodes
@@ -3725,7 +3726,7 @@ Qmgr::stateArbitChoose(Signal* signal)
if (arbitRec.getTimediff() > getArbitTimeout()) {
jam();
arbitRec.code = ArbitCode::ErrTimeout;
- reportArbitEvent(signal, EventReport::ArbitState);
+ reportArbitEvent(signal, NDB_LE_ArbitState);
arbitRec.state = ARBIT_CRASH;
arbitRec.newstate = true;
stateArbitCrash(signal); // do it at once
@@ -3826,7 +3827,7 @@ Qmgr::computeArbitNdbMask(NodeBitmask& aMask)
* where sender (word 0) is event type.
*/
void
-Qmgr::reportArbitEvent(Signal* signal, EventReport::EventType type)
+Qmgr::reportArbitEvent(Signal* signal, Ndb_logevent_type type)
{
ArbitSignalData* sd = (ArbitSignalData*)&signal->theData[0];
sd->sender = type;
diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp
index 84a59f440d9..c4225ad2a4c 100644
--- a/ndb/src/kernel/blocks/suma/Suma.cpp
+++ b/ndb/src/kernel/blocks/suma/Suma.cpp
@@ -50,6 +50,17 @@
//#define EVENT_DEBUG
//#define EVENT_PH3_DEBUG
//#define EVENT_DEBUG2
+#if 0
+#undef DBUG_ENTER
+#undef DBUG_PRINT
+#undef DBUG_RETURN
+#undef DBUG_VOID_RETURN
+
+#define DBUG_ENTER(a) {ndbout_c("%s:%d >%s", __FILE__, __LINE__, a);}
+#define DBUG_PRINT(a,b) {ndbout << __FILE__ << ":" << __LINE__ << " " << a << ": "; ndbout_c b ;}
+#define DBUG_RETURN(a) { ndbout_c("%s:%d <", __FILE__, __LINE__); return(a); }
+#define DBUG_VOID_RETURN { ndbout_c("%s:%d <", __FILE__, __LINE__); return; }
+#endif
/**
* @todo:
@@ -112,15 +123,12 @@ Suma::getNodeGroupMembers(Signal* signal) {
void
Suma::execSTTOR(Signal* signal) {
jamEntry();
-
+
+ DBUG_ENTER("Suma::execSTTOR");
const Uint32 startphase = signal->theData[1];
const Uint32 typeOfStart = signal->theData[7];
-#ifdef NODEFAIL_DEBUG
- ndbout_c ("SUMA::execSTTOR startphase = %u, typeOfStart = %u",
- startphase, typeOfStart);
-
-#endif
+ DBUG_PRINT("info",("startphase = %u, typeOfStart = %u", startphase, typeOfStart));
if(startphase == 1){
jam();
@@ -155,7 +163,7 @@ Suma::execSTTOR(Signal* signal) {
g_subPtrI = subPtr.i;
// sendSTTORRY(signal);
#endif
- return;
+ DBUG_VOID_RETURN;
}
if(startphase == 5) {
@@ -178,9 +186,7 @@ Suma::execSTTOR(Signal* signal) {
for( int i = 0; i < NO_OF_BUCKETS; i++) {
if (getResponsibleSumaNodeId(i) == refToNode(reference())) {
// I'm running this bucket
-#ifdef EVENT_DEBUG
- ndbout_c("bucket %u set to true", i);
-#endif
+ DBUG_PRINT("info",("bucket %u set to true", i));
c_buckets[i].active = true;
}
}
@@ -190,32 +196,31 @@ Suma::execSTTOR(Signal* signal) {
c_masterNodeId == getOwnNodeId()) {
jam();
createSequence(signal);
- return;
+ DBUG_VOID_RETURN;
}//if
}//if
sendSTTORRY(signal);
- return;
+ DBUG_VOID_RETURN;
}
void
Suma::createSequence(Signal* signal)
{
jam();
+ DBUG_ENTER("Suma::createSequence");
UtilSequenceReq * req = (UtilSequenceReq*)signal->getDataPtrSend();
req->senderData = RNIL;
req->sequenceId = SUMA_SEQUENCE;
req->requestType = UtilSequenceReq::Create;
-#ifdef DEBUG_SUMA_SEQUENCE
- ndbout_c("SUMA: Create sequence");
-#endif
sendSignal(DBUTIL_REF, GSN_UTIL_SEQUENCE_REQ,
signal, UtilSequenceReq::SignalLength, JBB);
// execUTIL_SEQUENCE_CONF will call createSequenceReply()
+ DBUG_VOID_RETURN;
}
void
@@ -338,6 +343,7 @@ SumaParticipant::execCONTINUEB(Signal* signal)
void Suma::execAPI_FAILREQ(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Suma::execAPI_FAILREQ");
Uint32 failedApiNode = signal->theData[0];
//BlockReference retRef = signal->theData[1];
@@ -348,11 +354,13 @@ void Suma::execAPI_FAILREQ(Signal* signal)
jam();
c_failedApiNodes.clear(failedApiNode);
}
+ DBUG_VOID_RETURN;
}//execAPI_FAILREQ()
bool
SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId)
{
+ DBUG_ENTER("SumaParticipant::removeSubscribersOnNode");
bool found = false;
SubscriberPtr i_subbPtr;
@@ -372,20 +380,15 @@ SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId)
jam();
sendSubStopReq(signal);
}
- return found;
+ DBUG_RETURN(found);
}
void
-SumaParticipant::sendSubStopReq(Signal *signal){
+SumaParticipant::sendSubStopReq(Signal *signal, bool unlock){
+ DBUG_ENTER("SumaParticipant::sendSubStopReq");
static bool remove_lock = false;
jam();
- if(remove_lock) {
- jam();
- return;
- }
- remove_lock = true;
-
SubscriberPtr subbPtr;
c_removeDataSubscribers.first(subbPtr);
if (subbPtr.isNull()){
@@ -398,9 +401,15 @@ SumaParticipant::sendSubStopReq(Signal *signal){
c_failedApiNodes.clear();
remove_lock = false;
- return;
+ DBUG_VOID_RETURN;
}
+ if(remove_lock && !unlock) {
+ jam();
+ DBUG_VOID_RETURN;
+ }
+ remove_lock = true;
+
SubscriptionPtr subPtr;
c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI);
@@ -414,11 +423,13 @@ SumaParticipant::sendSubStopReq(Signal *signal){
req->part = SubscriptionData::TableData;
sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+ DBUG_VOID_RETURN;
}
void
SumaParticipant::execSUB_STOP_CONF(Signal* signal){
jamEntry();
+ DBUG_ENTER("SumaParticipant::execSUB_STOP_CONF");
SubStopConf * const conf = (SubStopConf*)signal->getDataPtr();
@@ -444,12 +455,15 @@ SumaParticipant::execSUB_STOP_CONF(Signal* signal){
}
}
- sendSubStopReq(signal);
+ sendSubStopReq(signal,true);
+ DBUG_VOID_RETURN;
}
void
SumaParticipant::execSUB_STOP_REF(Signal* signal){
jamEntry();
+ DBUG_ENTER("SumaParticipant::execSUB_STOP_REF");
+
SubStopRef * const ref = (SubStopRef*)signal->getDataPtr();
Uint32 subscriptionId = ref->subscriptionId;
@@ -471,11 +485,14 @@ SumaParticipant::execSUB_STOP_REF(Signal* signal){
req->part = part;
sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB);
+
+ DBUG_VOID_RETURN;
}
void
Suma::execNODE_FAILREP(Signal* signal){
jamEntry();
+ DBUG_ENTER("Suma::execNODE_FAILREP");
NodeFailRep * const rep = (NodeFailRep*)signal->getDataPtr();
@@ -541,6 +558,7 @@ Suma::execNODE_FAILREP(Signal* signal){
c_aliveNodes.clear(nodePtr.p->nodeId); // this has to be done after the loop above
}
}
+ DBUG_VOID_RETURN;
}
void
@@ -610,6 +628,19 @@ Suma::execSIGNAL_DROPPED_REP(Signal* signal){
*
*/
+static unsigned
+count_subscribers(const DLList<SumaParticipant::Subscriber> &subs)
+{
+ unsigned n= 0;
+ SumaParticipant::SubscriberPtr i_subbPtr;
+ subs.first(i_subbPtr);
+ while(!i_subbPtr.isNull()){
+ n++;
+ subs.next(i_subbPtr);
+ }
+ return n;
+}
+
void
Suma::execDUMP_STATE_ORD(Signal* signal){
jamEntry();
@@ -664,6 +695,15 @@ Suma::execDUMP_STATE_ORD(Signal* signal){
infoEvent("Suma: c_dataBufferPool size: %d free: %d",
c_dataBufferPool.getSize(),
c_dataBufferPool.getNoOfFree());
+
+ infoEvent("Suma: c_metaSubscribers count: %d",
+ count_subscribers(c_metaSubscribers));
+ infoEvent("Suma: c_dataSubscribers count: %d",
+ count_subscribers(c_dataSubscribers));
+ infoEvent("Suma: c_prepDataSubscribers count: %d",
+ count_subscribers(c_prepDataSubscribers));
+ infoEvent("Suma: c_removeDataSubscribers count: %d",
+ count_subscribers(c_removeDataSubscribers));
}
}
@@ -812,16 +852,14 @@ Suma::execUTIL_SEQUENCE_CONF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Suma::execUTIL_SEQUENCE_CONF");
CRASH_INSERTION(13002);
UtilSequenceConf * conf = (UtilSequenceConf*)signal->getDataPtr();
-#ifdef DEBUG_SUMA_SEQUENCE
- ndbout_c("SUMA: Create sequence conf");
-#endif
if(conf->requestType == UtilSequenceReq::Create) {
jam();
createSequenceReply(signal, conf, NULL);
- return;
+ DBUG_VOID_RETURN;
}
Uint64 subId;
@@ -841,18 +879,21 @@ Suma::execUTIL_SEQUENCE_CONF(Signal* signal)
CreateSubscriptionIdConf::SignalLength, JBB);
c_subscriberPool.release(subbPtr);
+
+ DBUG_VOID_RETURN;
}
void
Suma::execUTIL_SEQUENCE_REF(Signal* signal)
{
jamEntry();
+ DBUG_ENTER("Suma::execUTIL_SEQUENCE_REF");
UtilSequenceRef * ref = (UtilSequenceRef*)signal->getDataPtr();
if(ref->requestType == UtilSequenceReq::Create) {
jam();
createSequenceReply(signal, NULL, ref);
- return;
+ DBUG_VOID_RETURN;
}
Uint32 subData = ref->senderData;
@@ -861,7 +902,7 @@ Suma::execUTIL_SEQUENCE_REF(Signal* signal)
c_subscriberPool.getPtr(subbPtr,subData);
sendSubIdRef(signal, GrepError::SEQUENCE_ERROR);
c_subscriberPool.release(subbPtr);
- return;
+ DBUG_VOID_RETURN;
}//execUTIL_SEQUENCE_REF()
@@ -1429,7 +1470,7 @@ SumaParticipant::execDIGETPRIMCONF(Signal* signal){
void
SumaParticipant::execCREATE_TRIG_CONF(Signal* signal){
jamEntry();
-
+ DBUG_ENTER("SumaParticipant::execCREATE_TRIG_CONF");
CRASH_INSERTION(13009);
CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr();
@@ -1442,6 +1483,7 @@ SumaParticipant::execCREATE_TRIG_CONF(Signal* signal){
* dodido
* @todo: I (Johan) dont know what to do here. Jonas, what do you mean?
*/
+ DBUG_VOID_RETURN;
}
void
@@ -1453,7 +1495,7 @@ SumaParticipant::execCREATE_TRIG_REF(Signal* signal){
void
SumaParticipant::execDROP_TRIG_CONF(Signal* signal){
jamEntry();
-
+ DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF");
CRASH_INSERTION(13010);
DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr();
@@ -1461,17 +1503,19 @@ SumaParticipant::execDROP_TRIG_CONF(Signal* signal){
const Uint32 senderData = conf->getConnectionPtr();
SyncRecord* tmp = c_syncPool.getPtr(senderData);
tmp->runDROP_TRIG_CONF(signal);
+ DBUG_VOID_RETURN;
}
void
SumaParticipant::execDROP_TRIG_REF(Signal* signal){
jamEntry();
-
+ DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF");
DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr();
const Uint32 senderData = ref->getConnectionPtr();
SyncRecord* tmp = c_syncPool.getPtr(senderData);
tmp->runDROP_TRIG_CONF(signal);
+ DBUG_VOID_RETURN;
}
/*************************************************************************
@@ -2055,9 +2099,7 @@ SumaParticipant::execSCAN_HBREP(Signal* signal){
void
SumaParticipant::execSUB_START_REQ(Signal* signal){
jamEntry();
-#ifdef NODEFAIL_DEBUG
- ndbout_c("Suma::execSUB_START_REQ");
-#endif
+ DBUG_ENTER("SumaParticipant::execSUB_START_REQ");
CRASH_INSERTION(13013);
@@ -2067,7 +2109,7 @@ SumaParticipant::execSUB_START_REQ(Signal* signal){
if (RtoI(signal->getSendersBlockRef(), false) == RNIL) {
jam();
sendSubStartRef(signal, /** Error Code */ 0, true);
- return;
+ DBUG_VOID_RETURN;
}
// only allow other Suma's in the nodegroup to come through for restart purposes
}
@@ -2088,7 +2130,7 @@ SumaParticipant::execSUB_START_REQ(Signal* signal){
if(!c_subscriptions.find(subPtr, key)){
jam();
sendSubStartRef(signal, /** Error Code */ 0);
- return;
+ DBUG_VOID_RETURN;
}
Ptr<SyncRecord> syncPtr;
@@ -2099,7 +2141,7 @@ SumaParticipant::execSUB_START_REQ(Signal* signal){
ndbout_c("Locked");
#endif
sendSubStartRef(signal, /** Error Code */ 0, true);
- return;
+ DBUG_VOID_RETURN;
}
syncPtr.p->m_locked = true;
@@ -2108,7 +2150,7 @@ SumaParticipant::execSUB_START_REQ(Signal* signal){
jam();
syncPtr.p->m_locked = false;
sendSubStartRef(signal, /** Error Code */ 0);
- return;
+ DBUG_VOID_RETURN;
}
Uint32 type = subPtr.p->m_subscriptionType;
@@ -2176,6 +2218,7 @@ SumaParticipant::execSUB_START_REQ(Signal* signal){
break;
}
ndbrequire(ok);
+ DBUG_VOID_RETURN;
}
void
@@ -2800,7 +2843,7 @@ SumaParticipant::decideWhoToSend(Uint32 nBucket, Uint32 gci){
void
SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){
jamEntry();
-
+ DBUG_ENTER("SumaParticipant::execFIRE_TRIG_ORD");
CRASH_INSERTION(13016);
FireTrigOrd* const trg = (FireTrigOrd*)signal->getDataPtr();
const Uint32 trigId = trg->getTriggerId();
@@ -2928,6 +2971,7 @@ SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){
}
}
#endif
+ DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d", refToNode(ref)));
sendSignal(ref, GSN_SUB_TABLE_DATA, signal,
SubTableData::SignalLength, JBB, ptr, nptr);
data->logType = tmp;
@@ -2961,6 +3005,8 @@ SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){
*/
f_bufferLock = 0;
b_bufferLock = 0;
+
+ DBUG_VOID_RETURN;
}
void
@@ -3209,6 +3255,7 @@ bool SumaParticipant::FailoverBuffer::nodeFailRep()
void
SumaParticipant::execSUB_STOP_REQ(Signal* signal){
jamEntry();
+ DBUG_ENTER("SumaParticipant::execSUB_STOP_REQ");
CRASH_INSERTION(13019);
@@ -3238,7 +3285,7 @@ SumaParticipant::execSUB_STOP_REQ(Signal* signal){
SubStopConf::SignalLength, JBB);
removeSubscribersOnNode(signal, refToNode(subscriberRef));
- return;
+ DBUG_VOID_RETURN;
}
if(!c_subscriptions.find(subPtr, key)){
@@ -3264,7 +3311,7 @@ SumaParticipant::execSUB_STOP_REQ(Signal* signal){
for (;!subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){
jam();
if (subbPtr.p->m_subPtrI == subPtr.i &&
- subbPtr.p->m_subscriberRef == subscriberRef &&
+ refToNode(subbPtr.p->m_subscriberRef) == refToNode(subscriberRef) &&
subbPtr.p->m_subscriberData == subscriberData){
// ndbout_c("STOP_REQ: before c_dataSubscribers.release");
jam();
@@ -3279,7 +3326,7 @@ SumaParticipant::execSUB_STOP_REQ(Signal* signal){
if (!found) {
jam();
sendSubStopRef(signal, GrepError::SUBSCRIBER_NOT_FOUND);
- return;
+ DBUG_VOID_RETURN;
}
}
@@ -3292,11 +3339,12 @@ SumaParticipant::execSUB_STOP_REQ(Signal* signal){
if (syncPtr.p->m_locked) {
jam();
sendSubStopRef(signal, /** Error Code */ 0, true);
- return;
+ DBUG_VOID_RETURN;
}
syncPtr.p->m_locked = true;
syncPtr.p->startDropTrigger(signal);
+ DBUG_VOID_RETURN;
}
void
@@ -3492,6 +3540,8 @@ SumaParticipant::sendSubRemoveRef(Signal* signal, const SubRemoveReq& req,
jam();
SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend();
ref->senderRef = reference();
+ ref->subscriptionId = req.subscriptionId;
+ ref->subscriptionKey = req.subscriptionKey;
ref->senderData = req.senderData;
ref->err = errCode;
if (temporary)
diff --git a/ndb/src/kernel/blocks/suma/Suma.hpp b/ndb/src/kernel/blocks/suma/Suma.hpp
index 08987fa9420..65869f44423 100644
--- a/ndb/src/kernel/blocks/suma/Suma.hpp
+++ b/ndb/src/kernel/blocks/suma/Suma.hpp
@@ -376,7 +376,7 @@ public:
void sendSubStartComplete(Signal*, SubscriberPtr, Uint32,
SubscriptionData::Part);
void sendSubStopComplete(Signal*, SubscriberPtr);
- void sendSubStopReq(Signal* signal);
+ void sendSubStopReq(Signal* signal, bool unlock= false);
void completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr);
diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp
index d9953b920d2..bec9c8b28f4 100644
--- a/ndb/src/kernel/main.cpp
+++ b/ndb/src/kernel/main.cpp
@@ -19,6 +19,7 @@
#include <ndb_version.h>
#include "Configuration.hpp"
+#include <ConfigRetriever.hpp>
#include <TransporterRegistry.hpp>
#include "vm/SimBlockList.hpp"
@@ -36,6 +37,8 @@
#include <NdbAutoPtr.hpp>
+#include <mgmapi_debug.h>
+
#if defined NDB_SOLARIS // ok
#include <sys/processor.h> // For system informatio
#endif
@@ -96,7 +99,7 @@ int main(int argc, char** argv)
return 1;
}
}
-
+
#ifndef NDB_WIN32
signal(SIGUSR1, handler_sigusr1);
@@ -106,6 +109,12 @@ int main(int argc, char** argv)
*/
catchsigs(true);
+ /**
+ * We no longer need the mgm connection in this process
+ * (as we are the angel, not ndb)
+ */
+ theConfig->closeConfiguration();
+
int status = 0;
while(waitpid(child, &status, 0) != child);
if(WIFEXITED(status)){
@@ -214,6 +223,13 @@ int main(int argc, char** argv)
exit(-1);
}
+ // Re-use the mgm handle as a transporter
+ if(!globalTransporterRegistry.connect_client(
+ theConfig->get_config_retriever()->get_mgmHandlePtr()))
+ ERROR_SET(fatal, ERR_INVALID_CONFIG,
+ "Connection to mgmd terminated before setup was complete",
+ "StopOnError missing");
+
if (!globalTransporterRegistry.start_clients()){
ndbout_c("globalTransporterRegistry.start_clients() failed");
exit(-1);
diff --git a/ndb/src/kernel/vm/Configuration.cpp b/ndb/src/kernel/vm/Configuration.cpp
index de78a4e927c..650d914035f 100644
--- a/ndb/src/kernel/vm/Configuration.cpp
+++ b/ndb/src/kernel/vm/Configuration.cpp
@@ -88,13 +88,6 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt,
- argument ? argument : "d:t:O,/tmp/ndbd.trace");
-}
bool
Configuration::init(int argc, char** argv)
@@ -103,7 +96,11 @@ Configuration::init(int argc, char** argv)
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndbd.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
exit(ho_error);
if (_no_daemon) {
@@ -156,6 +153,7 @@ Configuration::Configuration()
_daemonMode = false;
m_config_retriever= 0;
m_clusterConfig= 0;
+ m_clusterConfigIter= 0;
}
Configuration::~Configuration(){
@@ -191,7 +189,6 @@ Configuration::fetch_configuration(){
}
m_mgmd_port= 0;
- m_mgmd_host= 0;
m_config_retriever= new ConfigRetriever(getConnectString(),
NDB_VERSION, NODE_TYPE_DB);
@@ -213,7 +210,7 @@ Configuration::fetch_configuration(){
}
m_mgmd_port= m_config_retriever->get_mgmd_port();
- m_mgmd_host= m_config_retriever->get_mgmd_host();
+ m_mgmd_host.assign(m_config_retriever->get_mgmd_host());
ConfigRetriever &cr= *m_config_retriever;
@@ -372,6 +369,8 @@ Configuration::setupConfiguration(){
ConfigValues* cf = ConfigValuesFactory::extractCurrentSection(iter.m_config);
+ if(m_clusterConfigIter)
+ ndb_mgm_destroy_iterator(m_clusterConfigIter);
m_clusterConfigIter = ndb_mgm_create_configuration_iterator
(p, CFG_SECTION_NODE);
diff --git a/ndb/src/kernel/vm/Configuration.hpp b/ndb/src/kernel/vm/Configuration.hpp
index acf0e163a84..6ca6d9a1f17 100644
--- a/ndb/src/kernel/vm/Configuration.hpp
+++ b/ndb/src/kernel/vm/Configuration.hpp
@@ -17,6 +17,7 @@
#ifndef Configuration_H
#define Configuration_H
+#include <util/BaseString.hpp>
#include <mgmapi.h>
#include <ndb_types.h>
@@ -67,7 +68,8 @@ public:
const ndb_mgm_configuration_iterator * getOwnConfigIterator() const;
Uint32 get_mgmd_port() const {return m_mgmd_port;};
- const char *get_mgmd_host() const {return m_mgmd_host;};
+ const char *get_mgmd_host() const {return m_mgmd_host.c_str();};
+ ConfigRetriever* get_config_retriever() { return m_config_retriever; };
class LogLevel * m_logLevel;
private:
@@ -98,7 +100,7 @@ private:
bool _initialStart;
char * _connectString;
Uint32 m_mgmd_port;
- const char *m_mgmd_host;
+ BaseString m_mgmd_host;
bool _daemonMode;
void calcSizeAlt(class ConfigValues * );
diff --git a/ndb/src/kernel/vm/FastScheduler.cpp b/ndb/src/kernel/vm/FastScheduler.cpp
index d0b7af27463..a2d806571fe 100644
--- a/ndb/src/kernel/vm/FastScheduler.cpp
+++ b/ndb/src/kernel/vm/FastScheduler.cpp
@@ -487,7 +487,7 @@ FastScheduler::reportDoJobStatistics(Uint32 tMeanLoopCount) {
Signal signal;
memset(&signal.header, 0, sizeof(signal.header));
- signal.theData[0] = EventReport::JobStatistic;
+ signal.theData[0] = NDB_LE_JobStatistic;
signal.theData[1] = tMeanLoopCount;
memset(&signal.header, 0, sizeof(SignalHeader));
diff --git a/ndb/include/ndb_types.h b/ndb/src/kernel/vm/KeyDescriptor.hpp
index 6cf9bb40d7f..456d64ce1d8 100644
--- a/ndb/include/ndb_types.h
+++ b/ndb/src/kernel/vm/KeyDescriptor.hpp
@@ -14,13 +14,28 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/**
- * @file ndb_types.h
- */
+#ifndef KEY_DESCRIPTOR_HPP
+#define KEY_DESCRIPTOR_HPP
-#ifndef NDB_TYPES_H
-#define NDB_TYPES_H
+#include <ndb_types.h>
+#include <ndb_limits.h>
+#include "CArray.hpp"
-#include "ndb_global.h"
+struct KeyDescriptor
+{
+ KeyDescriptor () { noOfKeyAttr = hasCharAttr = noOfDistrKeys = 0; }
+
+ Uint8 noOfKeyAttr;
+ Uint8 hasCharAttr;
+ Uint8 noOfDistrKeys;
+ Uint8 unused;
+ struct KeyAttr
+ {
+ Uint32 attributeDescriptor;
+ CHARSET_INFO* charsetInfo;
+ } keyAttr[MAX_ATTRIBUTES_IN_INDEX];
+};
+
+extern CArray<KeyDescriptor> g_key_descriptor_pool;
#endif
diff --git a/ndb/src/kernel/vm/MetaData.hpp b/ndb/src/kernel/vm/MetaData.hpp
index 11e262664c1..1000114a421 100644
--- a/ndb/src/kernel/vm/MetaData.hpp
+++ b/ndb/src/kernel/vm/MetaData.hpp
@@ -86,15 +86,9 @@ public:
/* Primary table of index otherwise RNIL */
Uint32 primaryTableId;
- /* Type of storage (memory/disk, not used) */
- DictTabInfo::StorageType storageType;
-
/* Type of fragmentation (small/medium/large) */
DictTabInfo::FragmentType fragmentType;
- /* Key type of fragmentation (pk/dist key/dist group) */
- DictTabInfo::FragmentKeyType fragmentKeyType;
-
/* Global checkpoint identity when table created */
Uint32 gciTableCreated;
@@ -166,7 +160,6 @@ public:
Uint32 attributeDescriptor;
/* Extended attributes */
- Uint32 extType;
Uint32 extPrecision;
Uint32 extScale;
Uint32 extLength;
diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp
index 9b52ac65331..57a4032e40b 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.cpp
+++ b/ndb/src/kernel/vm/SimulatedBlock.cpp
@@ -743,7 +743,7 @@ SimulatedBlock::infoEvent(const char * msg, ...) const {
return;
Uint32 theData[25];
- theData[0] = EventReport::InfoEvent;
+ theData[0] = NDB_LE_InfoEvent;
char * buf = (char *)&(theData[1]);
va_list ap;
@@ -784,7 +784,7 @@ SimulatedBlock::warningEvent(const char * msg, ...) const {
return;
Uint32 theData[25];
- theData[0] = EventReport::WarningEvent;
+ theData[0] = NDB_LE_WarningEvent;
char * buf = (char *)&(theData[1]);
va_list ap;
@@ -1865,3 +1865,118 @@ SimulatedBlock::init_globals_list(void ** tmp, size_t cnt){
}
#endif
+
+#include "KeyDescriptor.hpp"
+
+Uint32
+SimulatedBlock::xfrm_key(Uint32 tab, const Uint32* src,
+ Uint32 *dst, Uint32 dstSize,
+ Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX]) const
+{
+ const KeyDescriptor * desc = g_key_descriptor_pool.getPtr(tab);
+ const Uint32 noOfKeyAttr = desc->noOfKeyAttr;
+
+ Uint32 i = 0;
+ Uint32 srcPos = 0;
+ Uint32 dstPos = 0;
+ while (i < noOfKeyAttr)
+ {
+ const KeyDescriptor::KeyAttr& keyAttr = desc->keyAttr[i];
+
+ Uint32 srcBytes =
+ AttributeDescriptor::getSizeInBytes(keyAttr.attributeDescriptor);
+ Uint32 srcWords = (srcBytes + 3) / 4;
+ Uint32 dstWords = ~0;
+ uchar* dstPtr = (uchar*)&dst[dstPos];
+ const uchar* srcPtr = (const uchar*)&src[srcPos];
+ CHARSET_INFO* cs = keyAttr.charsetInfo;
+
+ if (cs == NULL)
+ {
+ jam();
+ memcpy(dstPtr, srcPtr, srcWords << 2);
+ dstWords = srcWords;
+ }
+ else
+ {
+ jam();
+ Uint32 typeId =
+ AttributeDescriptor::getType(keyAttr.attributeDescriptor);
+ Uint32 lb, len;
+ bool ok = NdbSqlUtil::get_var_length(typeId, srcPtr, srcBytes, lb, len);
+ ndbrequire(ok);
+ Uint32 xmul = cs->strxfrm_multiply;
+ if (xmul == 0)
+ xmul = 1;
+ /*
+ * Varchar is really Char. End spaces do not matter. To get
+ * same hash we blank-pad to maximum length via strnxfrm.
+ * TODO use MySQL charset-aware hash function instead
+ */
+ Uint32 dstLen = xmul * (srcBytes - lb);
+ ndbrequire(dstLen <= ((dstSize - dstPos) << 2));
+ int n = NdbSqlUtil::strnxfrm_bug7284(cs, dstPtr, dstLen, srcPtr + lb, len);
+ ndbrequire(n != -1);
+ while ((n & 3) != 0)
+ {
+ dstPtr[n++] = 0;
+ }
+ dstWords = (n >> 2);
+ }
+ dstPos += dstWords;
+ srcPos += srcWords;
+ keyPartLen[i++] = dstWords;
+ }
+
+ return dstPos;
+}
+
+Uint32
+SimulatedBlock::create_distr_key(Uint32 tableId,
+ Uint32 *data,
+ const Uint32
+ keyPartLen[MAX_ATTRIBUTES_IN_INDEX]) const
+{
+ const KeyDescriptor* desc = g_key_descriptor_pool.getPtr(tableId);
+ const Uint32 noOfKeyAttr = desc->noOfKeyAttr;
+ Uint32 noOfDistrKeys = desc->noOfDistrKeys;
+
+ Uint32 *src = data;
+ Uint32 *dst = data;
+ Uint32 i = 0;
+ Uint32 dstPos = 0;
+
+ if(keyPartLen)
+ {
+ while (i < noOfKeyAttr && noOfDistrKeys)
+ {
+ Uint32 attr = desc->keyAttr[i].attributeDescriptor;
+ Uint32 len = keyPartLen[i];
+ if(AttributeDescriptor::getDKey(attr))
+ {
+ noOfDistrKeys--;
+ memmove(dst+dstPos, src, len << 2);
+ dstPos += len;
+ }
+ src += len;
+ i++;
+ }
+ }
+ else
+ {
+ while (i < noOfKeyAttr && noOfDistrKeys)
+ {
+ Uint32 attr = desc->keyAttr[i].attributeDescriptor;
+ Uint32 len = AttributeDescriptor::getSizeInWords(attr);
+ if(AttributeDescriptor::getDKey(attr))
+ {
+ noOfDistrKeys--;
+ memmove(dst+dstPos, src, len << 2);
+ dstPos += len;
+ }
+ src += len;
+ i++;
+ }
+ }
+ return dstPos;
+}
diff --git a/ndb/src/kernel/vm/SimulatedBlock.hpp b/ndb/src/kernel/vm/SimulatedBlock.hpp
index 81b4fe7413e..bba92ca7c31 100644
--- a/ndb/src/kernel/vm/SimulatedBlock.hpp
+++ b/ndb/src/kernel/vm/SimulatedBlock.hpp
@@ -20,11 +20,13 @@
#include <NdbTick.h>
#include <kernel_types.h>
#include <ndb_version.h>
+#include <ndb_limits.h>
#include "VMSignal.hpp"
#include <RefConvert.hpp>
#include <BlockNumbers.h>
#include <GlobalSignalNumbers.h>
+
#include "pc.hpp"
#include <NodeInfo.hpp>
#include <NodeState.hpp>
@@ -385,6 +387,24 @@ protected:
*/
const NodeInfo & getNodeInfo(NodeId nodeId) const;
NodeInfo & setNodeInfo(NodeId);
+
+ /**********************
+ * Xfrm stuff
+ */
+
+ /**
+ * @return length
+ */
+ Uint32 xfrm_key(Uint32 tab, const Uint32* src,
+ Uint32 *dst, Uint32 dstLen,
+ Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX]) const;
+
+ /**
+ *
+ */
+ Uint32 create_distr_key(Uint32 tableId,
+ Uint32 *data,
+ const Uint32 keyPaLen[MAX_ATTRIBUTES_IN_INDEX])const;
private:
NewVARIABLE* NewVarRef; /* New Base Address Table for block */
diff --git a/ndb/src/kernel/vm/SuperPool.cpp b/ndb/src/kernel/vm/SuperPool.cpp
new file mode 100644
index 00000000000..65e5dd99629
--- /dev/null
+++ b/ndb/src/kernel/vm/SuperPool.cpp
@@ -0,0 +1,442 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include "SuperPool.hpp"
+
+// SuperPool
+
+SuperPool::SuperPool(Uint32 pageSize, Uint32 pageBits) :
+ m_pageSize(SP_ALIGN_SIZE(pageSize, SP_ALIGN)),
+ m_pageBits(pageBits),
+ m_memRoot(0),
+ m_pageEnt(0),
+ m_typeCheck(0),
+ m_typeSeq(0),
+ m_pageList(),
+ m_totalSize(0),
+ m_initSize(0),
+ m_incrSize(0),
+ m_maxSize(0)
+{
+ assert(5 <= pageBits <= 30);
+}
+
+bool
+SuperPool::init()
+{
+ return true;
+}
+
+SuperPool::~SuperPool()
+{
+}
+
+SuperPool::PageEnt::PageEnt() :
+ m_pageType(0),
+ m_freeRecI(RNIL),
+ m_useCount(0),
+ m_nextPageI(RNIL),
+ m_prevPageI(RNIL)
+{
+}
+
+SuperPool::PageList::PageList() :
+ m_headPageI(RNIL),
+ m_tailPageI(RNIL),
+ m_pageCount(0)
+{
+}
+
+SuperPool::PageList::PageList(PtrI pageI) :
+ m_headPageI(pageI),
+ m_tailPageI(pageI),
+ m_pageCount(1)
+{
+}
+
+SuperPool::RecInfo::RecInfo(Uint32 recType, Uint32 recSize) :
+ m_recType(recType),
+ m_recSize(recSize),
+ m_maxUseCount(0),
+ m_currPageI(RNIL),
+ m_currFreeRecI(RNIL),
+ m_currUseCount(0),
+ m_totalUseCount(0),
+ m_totalRecCount(0),
+ m_freeList(),
+ m_activeList(),
+ m_fullList()
+{
+}
+
+SuperPool::PtrI
+SuperPool::getPageI(void* pageP)
+{
+ const Uint32 pageSize = m_pageSize;
+ const Uint32 pageBits = m_pageBits;
+ const Uint32 recBits = 32 - pageBits;
+ void* const memRoot = m_memRoot;
+ assert(pageP == SP_ALIGN_PTR(pageP, memRoot, pageSize));
+ my_ptrdiff_t ipL = ((Uint8*)pageP - (Uint8*)memRoot) / pageSize;
+ Int32 ip = (Int32)ipL;
+ Int32 lim = 1 << (pageBits - 1);
+ assert(ip == ipL && -lim <= ip && ip < lim && ip != -1);
+ PtrI pageI = ip << recBits;
+ assert(pageP == getPageP(pageI));
+ return pageI;
+}
+
+void
+SuperPool::movePages(PageList& pl1, PageList& pl2)
+{
+ const Uint32 recBits = 32 - m_pageBits;
+ if (pl1.m_pageCount != 0) {
+ if (pl2.m_pageCount != 0) {
+ PtrI pageI1 = pl1.m_tailPageI;
+ PtrI pageI2 = pl2.m_headPageI;
+ PageEnt& pe1 = getPageEnt(pageI1);
+ PageEnt& pe2 = getPageEnt(pageI2);
+ pe1.m_nextPageI = pageI2;
+ pe2.m_prevPageI = pageI1;
+ pl1.m_pageCount += pl2.m_pageCount;
+ }
+ } else {
+ pl1 = pl2;
+ }
+ pl2.m_headPageI = pl2.m_tailPageI = RNIL;
+ pl2.m_pageCount = 0;
+}
+
+void
+SuperPool::addHeadPage(PageList& pl, PtrI pageI)
+{
+ PageList pl2(pageI);
+ movePages(pl2, pl);
+ pl = pl2;
+}
+
+void
+SuperPool::addTailPage(PageList& pl, PtrI pageI)
+{
+ PageList pl2(pageI);
+ movePages(pl, pl2);
+}
+
+void
+SuperPool::removePage(PageList& pl, PtrI pageI)
+{
+ PageEnt& pe = getPageEnt(pageI);
+ PtrI pageI1 = pe.m_prevPageI;
+ PtrI pageI2 = pe.m_nextPageI;
+ if (pageI1 != RNIL) {
+ PageEnt& pe1 = getPageEnt(pageI1);
+ pe1.m_nextPageI = pageI2;
+ if (pageI2 != RNIL) {
+ PageEnt& pe2 = getPageEnt(pageI2);
+ pe2.m_prevPageI = pageI1;
+ } else {
+ pl.m_tailPageI = pageI1;
+ }
+ } else {
+ if (pageI2 != RNIL) {
+ PageEnt& pe2 = getPageEnt(pageI2);
+ pe2.m_prevPageI = pageI1;
+ pl.m_headPageI = pageI2;
+ } else {
+ pl.m_headPageI = pl.m_tailPageI = RNIL;
+ }
+ }
+ pe.m_prevPageI = pe.m_nextPageI = RNIL;
+ assert(pl.m_pageCount != 0);
+ pl.m_pageCount--;
+}
+
+void
+SuperPool::setCurrPage(RecInfo& ri, PtrI newPageI)
+{
+ PtrI oldPageI = ri.m_currPageI;
+ if (oldPageI != RNIL) {
+ // copy from cache
+ PageEnt& pe = getPageEnt(oldPageI);
+ pe.m_freeRecI = ri.m_currFreeRecI;
+ pe.m_useCount = ri.m_currUseCount;
+ // add to right list according to "pp2" policy
+ if (pe.m_useCount == 0) {
+ pe.m_pageType = 0;
+ addHeadPage(m_pageList, oldPageI);
+ ri.m_totalRecCount -= ri.m_maxUseCount;
+ } else if (pe.m_useCount < ri.m_maxUseCount) {
+ addHeadPage(ri.m_activeList, oldPageI);
+ } else {
+ addHeadPage(ri.m_fullList, oldPageI);
+ }
+ }
+ if (newPageI != RNIL) {
+ PageEnt& pe = getPageEnt(newPageI);
+ // copy to cache
+ ri.m_currPageI = newPageI;
+ ri.m_currFreeRecI = pe.m_freeRecI;
+ ri.m_currUseCount = pe.m_useCount;
+ // remove from right list
+ if (pe.m_useCount == 0) {
+ removePage(ri.m_freeList, newPageI);
+ } else if (pe.m_useCount < ri.m_maxUseCount) {
+ removePage(ri.m_activeList, newPageI);
+ } else {
+ removePage(ri.m_fullList, newPageI);
+ }
+ } else {
+ ri.m_currPageI = RNIL;
+ ri.m_currFreeRecI = RNIL;
+ ri.m_currUseCount = 0;
+ }
+}
+
+bool
+SuperPool::getAvailPage(RecInfo& ri)
+{
+ PtrI pageI;
+ if ((pageI = ri.m_activeList.m_headPageI) != RNIL ||
+ (pageI = ri.m_freeList.m_headPageI) != RNIL ||
+ (pageI = getFreePage(ri)) != RNIL) {
+ setCurrPage(ri, pageI);
+ return true;
+ }
+ return false;
+}
+
+SuperPool::PtrI
+SuperPool::getFreePage(RecInfo& ri)
+{
+ PtrI pageI;
+ if (m_pageList.m_pageCount != 0) {
+ pageI = m_pageList.m_headPageI;
+ removePage(m_pageList, pageI);
+ } else {
+ pageI = getNewPage();
+ if (pageI == RNIL)
+ return RNIL;
+ }
+ void* pageP = getPageP(pageI);
+ // set up free record list
+ Uint32 maxUseCount = ri.m_maxUseCount;
+ Uint32 recSize = ri.m_recSize;
+ void* recP = (Uint8*)pageP;
+ Uint32 irNext = 1;
+ while (irNext < maxUseCount) {
+ *(Uint32*)recP = pageI | irNext;
+ recP = (Uint8*)recP + recSize;
+ irNext++;
+ }
+ *(Uint32*)recP = RNIL;
+ // add to total record count
+ ri.m_totalRecCount += maxUseCount;
+ // set up new page entry
+ PageEnt& pe = getPageEnt(pageI);
+ new (&pe) PageEnt();
+ pe.m_pageType = ri.m_recType;
+ pe.m_freeRecI = pageI | 0;
+ pe.m_useCount = 0;
+ // set type check bits
+ setCheckBits(pageI, ri.m_recType);
+ // add to record pool free list
+ addHeadPage(ri.m_freeList, pageI);
+ return pageI;
+}
+
+void
+SuperPool::setSizes(size_t initSize, size_t incrSize, size_t maxSize)
+{
+ const Uint32 pageSize = m_pageSize;
+ m_initSize = SP_ALIGN_SIZE(initSize, pageSize);
+ m_incrSize = SP_ALIGN_SIZE(incrSize, pageSize);
+ m_maxSize = SP_ALIGN_SIZE(maxSize, pageSize);
+}
+
+void
+SuperPool::verify(RecInfo& ri)
+{
+ PageList* plList[3] = { &ri.m_freeList, &ri.m_activeList, &ri.m_fullList };
+ for (int i = 0; i < 3; i++) {
+ PageList& pl = *plList[i];
+ unsigned count = 0;
+ PtrI pageI = pl.m_headPageI;
+ while (pageI != RNIL) {
+ PageEnt& pe = getPageEnt(pageI);
+ PtrI pageI1 = pe.m_prevPageI;
+ PtrI pageI2 = pe.m_nextPageI;
+ if (count == 0) {
+ assert(pageI1 == RNIL);
+ } else {
+ assert(pageI1 != RNIL);
+ PageEnt& pe1 = getPageEnt(pageI1);
+ assert(pe1.m_nextPageI == pageI);
+ if (pageI2 != RNIL) {
+ PageEnt& pe2 = getPageEnt(pageI2);
+ assert(pe2.m_prevPageI == pageI);
+ }
+ }
+ pageI = pageI2;
+ count++;
+ }
+ assert(pl.m_pageCount == count);
+ }
+}
+
+// HeapPool
+
+HeapPool::HeapPool(Uint32 pageSize, Uint32 pageBits) :
+ SuperPool(pageSize, pageBits),
+ m_areaHead(),
+ m_currArea(&m_areaHead),
+ m_lastArea(&m_areaHead),
+ m_mallocPart(4)
+{
+}
+
+bool
+HeapPool::init()
+{
+ const Uint32 pageBits = m_pageBits;
+ if (! SuperPool::init())
+ return false;;
+ // allocate page entry array
+ Uint32 peBytes = (1 << pageBits) * sizeof(PageEnt);
+ m_pageEnt = static_cast<PageEnt*>(malloc(peBytes));
+ if (m_pageEnt == 0)
+ return false;
+ memset(m_pageEnt, 0, peBytes);
+ // allocate type check array
+ Uint32 tcWords = 1 << (pageBits - (5 - SP_CHECK_LOG2));
+ m_typeCheck = static_cast<Uint32*>(malloc(tcWords << 2));
+ if (m_typeCheck == 0)
+ return false;
+ memset(m_typeCheck, 0, tcWords << 2);
+ // allocate initial data
+ assert(m_totalSize == 0);
+ if (! allocMoreData(m_initSize))
+ return false;
+ return true;
+}
+
+HeapPool::~HeapPool()
+{
+ free(m_pageEnt);
+ free(m_typeCheck);
+ Area* ap;
+ while ((ap = m_areaHead.m_nextArea) != 0) {
+ m_areaHead.m_nextArea = ap->m_nextArea;
+ free(ap->m_memory);
+ free(ap);
+ }
+}
+
+HeapPool::Area::Area() :
+ m_nextArea(0),
+ m_firstPageI(RNIL),
+ m_currPage(0),
+ m_numPages(0),
+ m_memory(0)
+{
+}
+
+SuperPool::PtrI
+HeapPool::getNewPage()
+{
+ const Uint32 pageSize = m_pageSize;
+ const Uint32 pageBits = m_pageBits;
+ const Uint32 recBits= 32 - pageBits;
+ Area* ap = m_currArea;
+ if (ap->m_currPage == ap->m_numPages) {
+ // area is used up
+ if (ap->m_nextArea == 0) {
+ // todo dynamic increase
+ assert(m_incrSize == 0);
+ return RNIL;
+ }
+ ap = m_currArea = ap->m_nextArea;
+ }
+ assert(ap->m_currPage < ap->m_numPages);
+ PtrI pageI = ap->m_firstPageI;
+ Int32 ip = (Int32)pageI >> recBits;
+ ip += ap->m_currPage;
+ pageI = ip << recBits;
+ ap->m_currPage++;
+ return pageI;
+}
+
+bool
+HeapPool::allocMoreData(size_t size)
+{
+ const Uint32 pageSize = m_pageSize;
+ const Uint32 pageBits = m_pageBits;
+ const Uint32 recBits = 32 - pageBits;
+ const Uint32 incrSize = m_incrSize;
+ const Uint32 incrPages = incrSize / pageSize;
+ const Uint32 mallocPart = m_mallocPart;
+ size = SP_ALIGN_SIZE(size, pageSize);
+ if (incrSize != 0)
+ size = SP_ALIGN_SIZE(size, incrSize);
+ Uint32 needPages = size / pageSize;
+ while (needPages != 0) {
+ Uint32 wantPages = needPages;
+ if (incrPages != 0 && wantPages > incrPages)
+ wantPages = incrPages;
+ Uint32 tryPages = 0;
+ void* p1 = 0;
+ for (Uint32 i = mallocPart; i > 0 && p1 == 0; i--) {
+ // one page is usually wasted due to alignment to memory root
+ tryPages = ((wantPages + 1) * i) / mallocPart;
+ if (tryPages < 2)
+ break;
+ p1 = malloc(pageSize * tryPages);
+ }
+ if (p1 == 0)
+ return false;
+ if (m_memRoot == 0) {
+ // set memory root at first "big" alloc
+ // assume malloc header makes later ip = -1 impossible
+ m_memRoot = p1;
+ }
+ void* p2 = SP_ALIGN_PTR(p1, m_memRoot, pageSize);
+ Uint32 numPages = tryPages - (p1 != p2);
+ my_ptrdiff_t ipL = ((Uint8*)p2 - (Uint8*)m_memRoot) / pageSize;
+ Int32 ip = (Int32)ipL;
+ Int32 lim = 1 << (pageBits - 1);
+ if (! (ip == ipL && -lim <= ip && ip + numPages < lim)) {
+ free(p1);
+ return false;
+ }
+ assert(ip != -1);
+ PtrI pageI = ip << recBits;
+ needPages = (needPages >= numPages ? needPages - numPages : 0);
+ m_totalSize += numPages * pageSize;
+ // allocate new area
+ Area* ap = static_cast<Area*>(malloc(sizeof(Area)));
+ if (ap == 0) {
+ free(p1);
+ return false;
+ }
+ new (ap) Area();
+ ap->m_firstPageI = pageI;
+ ap->m_numPages = numPages;
+ ap->m_memory = p1;
+ m_lastArea->m_nextArea = ap;
+ m_lastArea = ap;
+ }
+ return true;
+}
diff --git a/ndb/src/kernel/vm/SuperPool.hpp b/ndb/src/kernel/vm/SuperPool.hpp
new file mode 100644
index 00000000000..157c75aa0d5
--- /dev/null
+++ b/ndb/src/kernel/vm/SuperPool.hpp
@@ -0,0 +1,561 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef SUPER_POOL_HPP
+#define SUPER_POOL_HPP
+
+#include <ndb_global.h>
+
+#include <pc.hpp>
+#include <ErrorReporter.hpp>
+
+#define NDB_SP_VERIFY_LEVEL 1
+
+/*
+ * SuperPool - super pool for record pools (abstract class)
+ *
+ * Documents SuperPool and RecordPool<T>.
+ *
+ * GENERAL
+ *
+ * A "super pool" is a shared pool of pages of fixed size. A "record
+ * pool" is a pool of records of fixed size. One super pool instance is
+ * used by any number of record pools to allocate their memory.
+ * A special case is a "page pool" where a record is a simple page,
+ * possibly smaller than super pool page.
+ *
+ * A record pool allocates memory in pages. Thus each used page is
+ * associated with one record pool and one record type. The records on
+ * a page form an array starting at start of page. Thus each record has
+ * an index within the page. Any last partial record which does not fit
+ * on the page is disregarded.
+ *
+ * I-VALUE
+ *
+ * The old "i-p" principle is kept. A reference to a super pool page or
+ * record is stored as an "i-value" from which the record pointer "p" is
+ * computed. In super pool the i-value is a Uint32 with two parts:
+ *
+ * - "ip" index of page within super pool (high pageBits)
+ * - "ir" index of record within page (low recBits)
+ *
+ * The translation between "ip" and page address is described in next
+ * section. Once page address is known, the record address is found
+ * from "ir" in the obvious way.
+ *
+ * The main advantage with i-value is that it can be verified. The
+ * level of verification depends on compile type (release, debug).
+ *
+ * - "v0" minimal sanity check
+ * - "v1" check record type matches page type, see below
+ * - "v2" check record is in use (not yet implemented)
+ *
+ * Another advantage of a 32-bit i-value is that it extends the space of
+ * 32-bit addressable records on a 64-bit platform.
+ *
+ * RNIL is 0xffffff00 and indicates NULL i-value. To avoid hitting RNIL
+ * it is required that pageBits <= 30 and that the maximum value of the
+ * range (2^pageBits-1) is not used.
+ *
+ * MEMORY ROOT
+ *
+ * This super pool requires a "memory root" i.e. a memory address such
+ * that the index of a page "ip" satisfies
+ *
+ * page address = memory root + (signed)ip * page size
+ *
+ * This is possible on most platforms, provided that the memory root and
+ * all pages are either on the heap or on the stack, in order to keep
+ * the size of "ip" reasonably small.
+ *
+ * The cast (signed)ip is done as integer of pageBits bits. "ip" has
+ * same sign bit as i-value "i" so (signed)ip = (Int32)i >> recBits.
+ * The RNIL restriction can be expressed as (signed)ip != -1.
+ *
+ * PAGE ENTRIES
+ *
+ * Each super pool page has a "page entry". It contains:
+ *
+ * - page type
+ * - i-value of first free record on page
+ * - page use count, to see if page can be freed
+ * - pointers (as i-values) to next and previous page in list
+ *
+ * Page entry cannot be stored on the page itself since this prevents
+ * aligning pages to OS block size and the use of BATs (don't ask) for
+ * page pools in NDB. For now the implementation provides an array of
+ * page entries with place for all (2^pageBits) entries.
+ *
+ * PAGE TYPE
+ *
+ * Page type is (in principle) unique to the record pool using the super
+ * pool. It is assigned in record pool constructor. Page type zero
+ * means that the page is free i.e. not allocated to a record pool.
+ *
+ * Each "i-p" conversion checks ("v1") that the record belongs to same
+ * pool as the page. This check is much more common than page or record
+ * allocation. To make it cache effective, there is a separate array of
+ * reduced "type bits" (computed from real type).
+ *
+ * FREE LISTS
+ *
+ * A record is either used or on the free list of the record pool.
+ * A page has a use count i.e. number of used records. When use count
+ * drops to zero the page can be returned to the super pool. This is
+ * not necessarily done at once, or ever.
+ *
+ * To make freeing pages feasible, the record pool free list has two
+ * levels. There are available pages (some free) and a singly linked
+ * free list within the page. A page allocated to record pool is on one
+ * of 4 lists:
+ *
+ * - free page list (all free, available)
+ * - active page list (some free, some used, available)
+ * - full page list (none free)
+ * - current page (list of 1), see below
+ *
+ * Some usage types (temporary pools) may never free records. They pay
+ * a small penalty for the extra overhead.
+ *
+ * RECORD POOL
+ *
+ * A pool of records which allocates its memory from a super pool
+ * instance specified in the constructor. There are 3 basic operations:
+ *
+ * - getPtr - translate i-value to pointer-to-record p
+ * - seize - allocate record
+ * - release - free record
+ *
+ * CURRENT PAGE
+ *
+ * getPtr is a fast computation which does not touch the page. For
+ * seize and release there is an optimization:
+ *
+ * Define "current page" as page of latest seize or release. Its page
+ * entry is cached under record pool instance. The page is removed from
+ * its normal list. Seize and release on current page are fast and
+ * avoid touching the page. The current page is used until
+ *
+ * - seize and current page is full
+ * - release and the page is not current page
+ *
+ * Then the real page entry is updated and the page is added to the
+ * appropriate list, and a new page is made current.
+ *
+ * PAGE POLICY
+ *
+ * Allocating new page to record pool is expensive. Therefore record
+ * pool should not always return empty pages to super pool. There are
+ * two trivial policies, each with problems:
+ *
+ * - "pp1" never return empty page to super pool
+ * - "pp2" always return empty page to super pool
+ *
+ * This implementation uses "pp2" for now. A real policy is implemented
+ * in next version.
+ *
+ * OPEN ISSUES AND LIMITATIONS
+ *
+ * - smarter (virtual) placement of check bits & page entries
+ * - should getPtr etc be inlined? (too much code)
+ * - real page policy
+ * - other implementations (only HeapPool is done)
+ * - super pool list of all record pools, for statistics etc
+ * - access by multiple threads is not supported
+ */
+
+// align size
+#define SP_ALIGN_SIZE(sz, al) \
+ (((sz) + (al) - 1) & ~((al) - 1))
+
+// align pointer relative to base
+#define SP_ALIGN_PTR(p, base, al) \
+ (void*)((Uint8*)(base) + SP_ALIGN_SIZE((Uint8*)(p) - (Uint8*)(base), (al)))
+
+class SuperPool {
+public:
+ // Type of i-value, used to reference both pages and records. Page
+ // index "ip" occupies the high bits. The i-value of a page is same
+ // as i-value of record 0 on the page.
+ typedef Uint32 PtrI;
+
+ // Size and address alignment given as number of bytes (power of 2).
+ STATIC_CONST( SP_ALIGN = 8 );
+
+ // Page entry. Current|y allocated as array of (2^pageBits).
+ struct PageEnt {
+ PageEnt();
+ Uint32 m_pageType;
+ Uint32 m_freeRecI;
+ Uint32 m_useCount;
+ PtrI m_nextPageI;
+ PtrI m_prevPageI;
+ };
+
+ // Number of bits for cache effective type check given as log of 2.
+ // Example: 2 means 4 bits and uses 32k for 2g of 32k pages.
+ STATIC_CONST( SP_CHECK_LOG2 = 2 );
+
+ // Doubly-linked list of pages. There is one free list in super pool
+ // and free, active, full list in each record pool.
+ struct PageList {
+ PageList();
+ PageList(PtrI pageI);
+ PtrI m_headPageI;
+ PtrI m_tailPageI;
+ Uint32 m_pageCount;
+ };
+
+ // Record pool information. Each record pool instance contains one.
+ struct RecInfo {
+ RecInfo(Uint32 recType, Uint32 recSize);
+ const Uint32 m_recType;
+ const Uint32 m_recSize;
+ Uint32 m_maxUseCount; // could be computed
+ Uint32 m_currPageI; // current page
+ Uint32 m_currFreeRecI;
+ Uint32 m_currUseCount;
+ Uint32 m_totalUseCount; // total per pool
+ Uint32 m_totalRecCount;
+ PageList m_freeList;
+ PageList m_activeList;
+ PageList m_fullList;
+ };
+
+ // Constructor. Gives page size in bytes (excluding page header) and
+ // number of bits to use for page index "ip" in i-value.
+ SuperPool(Uint32 pageSize, Uint32 pageBits);
+
+ // Initialize. Must be called after setting sizes or other parameters
+ // and before the pool is used.
+ virtual bool init();
+
+ // Destructor.
+ virtual ~SuperPool() = 0;
+
+ // Translate i-value to page entry.
+ PageEnt& getPageEnt(PtrI pageI);
+
+ // Translate i-value to page address.
+ void* getPageP(PtrI pageI);
+
+ // Translate page address to i-value (unused).
+ PtrI getPageI(void* pageP);
+
+ // Given type, return non-zero reduced type check bits.
+ Uint32 makeCheckBits(Uint32 type);
+
+ // Get type check bits from type check array.
+ Uint32 getCheckBits(PtrI pageI);
+
+ // Set type check bits in type check array.
+ void setCheckBits(PtrI pageI, Uint32 type);
+
+ // Translate i-value to record address.
+ void* getRecP(PtrI recI, RecInfo& ri);
+
+ // Move all pages from second list to end of first list.
+ void movePages(PageList& pl1, PageList& pl2);
+
+ // Add page to beginning of page list.
+ void addHeadPage(PageList& pl, PtrI pageI);
+
+ // Add page to end of page list.
+ void addTailPage(PageList& pl, PtrI pageI);
+
+ // Remove any page from page list.
+ void removePage(PageList& pl, PtrI pageI);
+
+ // Set current page. Previous current page is updated and added to
+ // appropriate list.
+ void setCurrPage(RecInfo& ri, PtrI pageI);
+
+ // Get page with some free records and make it current. Takes head of
+ // active or free list, or else gets free page from super pool.
+ bool getAvailPage(RecInfo& ri);
+
+ // Get free page from super pool and add it to record pool free list.
+ // This is an expensive subroutine of getAvailPage().
+ PtrI getFreePage(RecInfo& ri);
+
+ // Get new free page from the implementation.
+ virtual PtrI getNewPage() = 0;
+
+ // Set 3 size parameters, rounded to page size. If called before
+ // init() then init() allocates the initial size.
+ void setSizes(size_t initSize = 0, size_t incrSize = 0, size_t maxSize = 0);
+
+ const Uint32 m_pageSize;
+ const Uint32 m_pageBits;
+ // implementation must set up these pointers
+ void* m_memRoot;
+ PageEnt* m_pageEnt;
+ Uint32* m_typeCheck;
+ Uint32 m_typeSeq;
+ PageList m_pageList;
+ size_t m_totalSize;
+ size_t m_initSize;
+ size_t m_incrSize;
+ size_t m_maxSize;
+
+ // Debugging.
+ void verify(RecInfo& ri);
+};
+
+inline SuperPool::PageEnt&
+SuperPool::getPageEnt(PtrI pageI)
+{
+ Uint32 ip = pageI >> (32 - m_pageBits);
+ return m_pageEnt[ip];
+}
+
+inline void*
+SuperPool::getPageP(PtrI ptrI)
+{
+ Int32 ip = (Int32)ptrI >> (32 - m_pageBits);
+ my_ptrdiff_t sz = m_pageSize;
+ void* pageP = (Uint8*)m_memRoot + ip * sz;
+ return pageP;
+}
+
+inline Uint32
+SuperPool::makeCheckBits(Uint32 type)
+{
+ Uint32 shift = 1 << SP_CHECK_LOG2;
+ Uint32 mask = (1 << shift) - 1;
+ return 1 + type % mask;
+}
+
+inline Uint32
+SuperPool::getCheckBits(PtrI pageI)
+{
+ Uint32 ip = pageI >> (32 - m_pageBits);
+ Uint32 xp = ip >> (5 - SP_CHECK_LOG2);
+ Uint32 yp = ip & (1 << (5 - SP_CHECK_LOG2)) - 1;
+ Uint32& w = m_typeCheck[xp];
+ Uint32 shift = 1 << SP_CHECK_LOG2;
+ Uint32 mask = (1 << shift) - 1;
+ // get
+ Uint32 bits = (w >> yp * shift) & mask;
+ return bits;
+}
+
+inline void
+SuperPool::setCheckBits(PtrI pageI, Uint32 type)
+{
+ Uint32 ip = pageI >> (32 - m_pageBits);
+ Uint32 xp = ip >> (5 - SP_CHECK_LOG2);
+ Uint32 yp = ip & (1 << (5 - SP_CHECK_LOG2)) - 1;
+ Uint32& w = m_typeCheck[xp];
+ Uint32 shift = 1 << SP_CHECK_LOG2;
+ Uint32 mask = (1 << shift) - 1;
+ // set
+ Uint32 bits = makeCheckBits(type);
+ w &= ~(mask << yp * shift);
+ w |= (bits << yp * shift);
+}
+
+inline void*
+SuperPool::getRecP(PtrI ptrI, RecInfo& ri)
+{
+ const Uint32 recMask = (1 << (32 - m_pageBits)) - 1;
+ PtrI pageI = ptrI & ~recMask;
+#if NDB_SP_VERIFY_LEVEL >= 1
+ Uint32 bits1 = getCheckBits(pageI);
+ Uint32 bits2 = makeCheckBits(ri.m_recType);
+ assert(bits1 == bits2);
+#endif
+ void* pageP = getPageP(pageI);
+ Uint32 ir = ptrI & recMask;
+ void* recP = (Uint8*)pageP + ir * ri.m_recSize;
+ return recP;
+}
+
+/*
+ * HeapPool - SuperPool on heap (concrete class)
+ *
+ * A super pool based on malloc with memory root on the heap. This
+ * pool type has 2 realistic uses:
+ *
+ * - a small pool with only initial malloc and pageBits set to match
+ * - the big pool from which all heap allocations are done
+ *
+ * A "smart" malloc may break "ip" limit by using different VM areas for
+ * different sized requests. For this reason malloc is done in units of
+ * increment size if possible. Memory root is set to start of first
+ * malloc.
+ */
+
+class HeapPool : public SuperPool {
+public:
+ // Describes malloc area. The areas are kept in singly linked list.
+ // There is a list head and pointers to current and last area.
+ struct Area {
+ Area();
+ Area* m_nextArea;
+ PtrI m_firstPageI;
+ Uint32 m_currPage;
+ Uint32 m_numPages;
+ void* m_memory;
+ };
+
+ // Constructor.
+ HeapPool(Uint32 pageSize, Uint32 pageBits);
+
+ // Initialize.
+ virtual bool init();
+
+ // Destructor.
+ virtual ~HeapPool();
+
+ // Use malloc to allocate more.
+ bool allocMoreData(size_t size);
+
+ // Get new page from current area.
+ virtual PtrI getNewPage();
+
+ // List of malloc areas.
+ Area m_areaHead;
+ Area* m_currArea;
+ Area* m_lastArea;
+
+ // Fraction of malloc size to try if cannot get all in one.
+ Uint32 m_mallocPart;
+};
+
+/*
+ * RecordPool - record pool using one super pool instance (template)
+ *
+ * Documented under SuperPool. Satisfies ArrayPool interface.
+ */
+
+template <class T>
+class RecordPool {
+public:
+ // Constructor.
+ RecordPool(SuperPool& superPool);
+
+ // Destructor.
+ ~RecordPool();
+
+ // Update pointer ptr.p according to i-value ptr.i.
+ void getPtr(Ptr<T>& ptr);
+
+ // Allocate record from the pool.
+ bool seize(Ptr<T>& ptr);
+
+ // Return record to the pool.
+ void release(Ptr<T>& ptr);
+
+ // todo variants of basic methods
+
+ // Return all pages to super pool. The force flag is required if
+ // there are any used records.
+ void free(bool force);
+
+ SuperPool& m_superPool;
+ SuperPool::RecInfo m_recInfo;
+};
+
+template <class T>
+inline
+RecordPool<T>::RecordPool(SuperPool& superPool) :
+ m_superPool(superPool),
+ m_recInfo(1 + superPool.m_typeSeq++, sizeof(T))
+{
+ SuperPool::RecInfo& ri = m_recInfo;
+ assert(sizeof(T) == SP_ALIGN_SIZE(sizeof(T), sizeof(Uint32)));
+ Uint32 maxUseCount = superPool.m_pageSize / sizeof(T);
+ Uint32 sizeLimit = 1 << (32 - superPool.m_pageBits);
+ if (maxUseCount >= sizeLimit)
+ maxUseCount = sizeLimit;
+ ri.m_maxUseCount = maxUseCount;
+}
+
+template <class T>
+inline
+RecordPool<T>::~RecordPool()
+{
+ free(true);
+}
+
+template <class T>
+inline void
+RecordPool<T>::getPtr(Ptr<T>& ptr)
+{
+ void* recP = m_superPool.getRecP(ptr.i, m_recInfo);
+ ptr.p = static_cast<T*>(recP);
+}
+
+template <class T>
+inline bool
+RecordPool<T>::seize(Ptr<T>& ptr)
+{
+ SuperPool& sp = m_superPool;
+ SuperPool::RecInfo& ri = m_recInfo;
+ if (ri.m_currFreeRecI != RNIL || sp.getAvailPage(ri)) {
+ SuperPool::PtrI recI = ri.m_currFreeRecI;
+ void* recP = sp.getRecP(recI, ri);
+ ri.m_currFreeRecI = *(Uint32*)recP;
+ Uint32 useCount = ri.m_currUseCount;
+ assert(useCount < ri.m_maxUseCount);
+ ri.m_currUseCount = useCount + 1;
+ ri.m_totalUseCount++;
+ ptr.i = recI;
+ ptr.p = static_cast<T*>(recP);
+ return true;
+ }
+ return false;
+}
+
+template <class T>
+inline void
+RecordPool<T>::release(Ptr<T>& ptr)
+{
+ SuperPool& sp = m_superPool;
+ SuperPool::RecInfo& ri = m_recInfo;
+ const Uint32 recMask = (1 << (32 - sp.m_pageBits)) - 1;
+ SuperPool::PtrI recI = ptr.i;
+ SuperPool::PtrI pageI = recI & ~recMask;
+ if (pageI != ri.m_currPageI) {
+ sp.setCurrPage(ri, pageI);
+ }
+ void* recP = sp.getRecP(recI, ri);
+ *(Uint32*)recP = ri.m_currFreeRecI;
+ ri.m_currFreeRecI = recI;
+ Uint32 useCount = ri.m_currUseCount;
+ assert(useCount != 0);
+ ri.m_currUseCount = useCount - 1;
+ ri.m_totalUseCount--;
+ ptr.i = RNIL;
+ ptr.p = 0;
+}
+
+template <class T>
+inline void
+RecordPool<T>::free(bool force)
+{
+ SuperPool& sp = m_superPool;
+ SuperPool::RecInfo& ri = m_recInfo;
+ sp.setCurrPage(ri, RNIL);
+ assert(force || ri.m_totalUseCount == 0);
+ sp.movePages(sp.m_pageList, ri.m_freeList);
+ sp.movePages(sp.m_pageList, ri.m_activeList);
+ sp.movePages(sp.m_pageList, ri.m_fullList);
+ ri.m_totalRecCount = 0;
+}
+
+#endif
diff --git a/ndb/src/kernel/vm/TransporterCallback.cpp b/ndb/src/kernel/vm/TransporterCallback.cpp
index ba929b7ea7a..e5322edaecc 100644
--- a/ndb/src/kernel/vm/TransporterCallback.cpp
+++ b/ndb/src/kernel/vm/TransporterCallback.cpp
@@ -33,6 +33,7 @@
#include <NdbOut.hpp>
#include "DataBuffer.hpp"
+
/**
* The instance
*/
@@ -337,9 +338,9 @@ reportError(void * callbackObj, NodeId nodeId, TransporterError errorCode){
if(errorCode & 0x8000)
- signal.theData[0] = EventReport::TransporterError;
+ signal.theData[0] = NDB_LE_TransporterError;
else
- signal.theData[0] = EventReport::TransporterWarning;
+ signal.theData[0] = NDB_LE_TransporterWarning;
signal.theData[1] = nodeId;
signal.theData[2] = errorCode;
@@ -363,7 +364,7 @@ reportSendLen(void * callbackObj,
signal.header.theLength = 3;
signal.header.theSendersSignalId = 0;
signal.header.theSendersBlockRef = numberToRef(0, globalData.ownId);
- signal.theData[0] = EventReport::SendBytesStatistic;
+ signal.theData[0] = NDB_LE_SendBytesStatistic;
signal.theData[1] = nodeId;
signal.theData[2] = (bytes/count);
globalScheduler.execute(&signal, JBA, CMVMI, GSN_EVENT_REP);
@@ -382,7 +383,7 @@ reportReceiveLen(void * callbackObj,
signal.header.theLength = 3;
signal.header.theSendersSignalId = 0;
signal.header.theSendersBlockRef = numberToRef(0, globalData.ownId);
- signal.theData[0] = EventReport::ReceiveBytesStatistic;
+ signal.theData[0] = NDB_LE_ReceiveBytesStatistic;
signal.theData[1] = nodeId;
signal.theData[2] = (bytes/count);
globalScheduler.execute(&signal, JBA, CMVMI, GSN_EVENT_REP);
@@ -452,3 +453,8 @@ SignalLoggerManager::printSegmentedSection(FILE * output,
putc('\n', output);
}
+void
+transporter_recv_from(void * callbackObj, NodeId nodeId){
+ globalData.m_nodeInfo[nodeId].m_heartbeat_cnt= 0;
+ return;
+}
diff --git a/ndb/src/kernel/vm/VMSignal.hpp b/ndb/src/kernel/vm/VMSignal.hpp
index 9111ee7949c..45543c5d174 100644
--- a/ndb/src/kernel/vm/VMSignal.hpp
+++ b/ndb/src/kernel/vm/VMSignal.hpp
@@ -78,10 +78,16 @@ public:
#define VMS_DATA_SIZE \
(MAX_ATTRIBUTES_IN_TABLE + MAX_TUPLE_SIZE_IN_WORDS + MAX_KEY_SIZE_IN_WORDS)
+#if VMS_DATA_SIZE > 8192
+#error "VMSignal buffer is too small"
+#endif
+
SignalHeader header; // 28 bytes
SegmentedSectionPtr m_sectionPtr[3];
- Uint32 theData[25+VMS_DATA_SIZE]; // 2048 32-bit words -> 8K Bytes
-
+ union {
+ Uint32 theData[8192]; // 8192 32-bit words -> 32K Bytes
+ Uint64 dummyAlign;
+ };
void garbage_register();
};
diff --git a/ndb/src/kernel/vm/testSuperPool.cpp b/ndb/src/kernel/vm/testSuperPool.cpp
new file mode 100644
index 00000000000..194b3a43fa0
--- /dev/null
+++ b/ndb/src/kernel/vm/testSuperPool.cpp
@@ -0,0 +1,220 @@
+#if 0
+make -f Makefile -f - testSuperPool <<'_eof_'
+testSuperPool: testSuperPool.cpp libkernel.a
+ $(CXXCOMPILE) -o $@ $@.cpp libkernel.a -L../../common/util/.libs -lgeneral
+_eof_
+exit $?
+#endif
+
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include "SuperPool.hpp"
+#include <NdbOut.hpp>
+
+template <Uint32 sz>
+struct A {
+ Uint32 a[sz];
+ void fill() {
+ Uint32 c = 0;
+ for (Uint32 i = 0; i + 1 < sz; i++) {
+ a[i] = random();
+ c = (c << 1) ^ a[i];
+ }
+ a[sz - 1] = c;
+ }
+ void check() {
+ Uint32 c = 0;
+ for (Uint32 i = 0; i + 1 < sz; i++) {
+ c = (c << 1) ^ a[i];
+ }
+ assert(a[sz - 1] == c);
+ }
+};
+
+static Uint32
+urandom(Uint32 n)
+{
+ return (Uint32)random() % n;
+}
+
+static Uint32
+random_coprime(Uint32 n)
+{
+ Uint32 prime[] = { 101, 211, 307, 401, 503, 601, 701, 809, 907 };
+ Uint32 count = sizeof(prime) / sizeof(prime[0]);
+ while (1) {
+ Uint32 i = urandom(count);
+ if (n % prime[i] != 0)
+ return prime[i];
+ }
+}
+
+static int
+cmpPtrI(const void* a, const void* b)
+{
+ Ptr<const void> u = *(Ptr<const void>*)a;
+ Ptr<const void> v = *(Ptr<const void>*)b;
+ return u.i < v.i ? -1 : u.i > v.i ? +1 : 0;
+}
+
+static int
+cmpPtrP(const void* a, const void* b)
+{
+ Ptr<const void> u = *(Ptr<const void>*)a;
+ Ptr<const void> v = *(Ptr<const void>*)b;
+ return u.p < v.p ? -1 : u.p > v.p ? +1 : 0;
+}
+
+static Uint32 loopcount = 3;
+
+template <Uint32 sz>
+void
+sp_test(SuperPool& sp)
+{
+ typedef A<sz> T;
+ RecordPool<T> rp(sp);
+ SuperPool::RecInfo& ri = rp.m_recInfo;
+ Uint32 pageCount = sp.m_totalSize / sp.m_pageSize;
+ Uint32 perPage = rp.m_recInfo.m_maxUseCount;
+ Uint32 perPool = perPage * pageCount;
+ ndbout << "pages=" << pageCount << " perpage=" << perPage << " perpool=" << perPool << endl;
+ Ptr<T>* ptrList = new Ptr<T> [perPool];
+ memset(ptrList, 0x1f, perPool * sizeof(Ptr<T>));
+ Uint32 loop;
+ for (loop = 0; loop < loopcount; loop++) {
+ ndbout << "loop " << loop << endl;
+ Uint32 i, j;
+ // seize all
+ ndbout << "seize all" << endl;
+ for (i = 0; i < perPool + 1; i++) {
+ j = i;
+ sp.verify(ri);
+ Ptr<T> ptr1 = { 0, RNIL };
+ if (! rp.seize(ptr1))
+ break;
+ // write value
+ ptr1.p->fill();
+ ptr1.p->check();
+ // verify getPtr
+ Ptr<T> ptr2 = { 0, ptr1.i };
+ rp.getPtr(ptr2);
+ assert(ptr1.i == ptr2.i && ptr1.p == ptr2.p);
+ // save
+ ptrList[j] = ptr1;
+ }
+ assert(i == perPool);
+ assert(ri.m_totalUseCount == perPool && ri.m_totalRecCount == perPool);
+ sp.verify(ri);
+ // check duplicates
+ {
+ Ptr<T>* ptrList2 = new Ptr<T> [perPool];
+ memcpy(ptrList2, ptrList, perPool * sizeof(Ptr<T>));
+ qsort(ptrList2, perPool, sizeof(Ptr<T>), cmpPtrI);
+ for (i = 1; i < perPool; i++)
+ assert(ptrList2[i - 1].i != ptrList2[i].i);
+ qsort(ptrList2, perPool, sizeof(Ptr<T>), cmpPtrP);
+ for (i = 1; i < perPool; i++)
+ assert(ptrList2[i - 1].p != ptrList2[i].p);
+ delete [] ptrList2;
+ }
+ // release all in various orders
+ ndbout << "release all" << endl;
+ Uint32 coprime = random_coprime(perPool);
+ for (i = 0; i < perPool; i++) {
+ sp.verify(ri);
+ switch (loop % 3) {
+ case 0: // ascending
+ j = i;
+ break;
+ case 1: // descending
+ j = perPool - 1 - i;
+ break;
+ case 2: // pseudo-random
+ j = (coprime * i) % perPool;
+ break;
+ }
+ Ptr<T>& ptr = ptrList[j];
+ assert(ptr.i != RNIL && ptr.p != 0);
+ ptr.p->check();
+ rp.release(ptr);
+ assert(ptr.i == RNIL && ptr.p == 0);
+ }
+ sp.setCurrPage(ri, RNIL);
+ assert(ri.m_totalUseCount == 0 && ri.m_totalRecCount == 0);
+ sp.verify(ri);
+ // seize/release at random
+ ndbout << "seize/release at random" << endl;
+ for (i = 0; i < loopcount * perPool; i++) {
+ j = urandom(perPool);
+ Ptr<T>& ptr = ptrList[j];
+ if (ptr.i == RNIL) {
+ rp.seize(ptr);
+ ptr.p->fill();
+ } else {
+ ptr.p->check();
+ rp.release(ptr);
+ }
+ }
+ ndbout << "used " << ri.m_totalUseCount << endl;
+ sp.verify(ri);
+ // release all
+ ndbout << "release all" << endl;
+ for (i = 0; i < perPool; i++) {
+ j = i;
+ Ptr<T>& ptr = ptrList[j];
+ if (ptr.i != RNIL) {
+ ptr.p->check();
+ rp.release(ptr);
+ }
+ }
+ sp.setCurrPage(ri, RNIL);
+ assert(ri.m_totalUseCount == 0 && ri.m_totalRecCount == 0);
+ sp.verify(ri);
+ }
+ // done
+ delete [] ptrList;
+}
+
+static Uint32 pageCount = 99;
+static Uint32 pageSize = 32768;
+static Uint32 pageBits = 15;
+
+const Uint32 sz1 = 3, sz2 = 4, sz3 = 53, sz4 = 424, sz5 = 5353;
+
+template void sp_test<sz1>(SuperPool& sp);
+template void sp_test<sz2>(SuperPool& sp);
+template void sp_test<sz3>(SuperPool& sp);
+template void sp_test<sz4>(SuperPool& sp);
+template void sp_test<sz5>(SuperPool& sp);
+
+int
+main()
+{
+ HeapPool sp(pageSize, pageBits);
+ sp.setSizes(pageCount * pageSize);
+ if (! sp.init())
+ assert(false);
+ Uint16 s = (Uint16)getpid();
+ srandom(s);
+ ndbout << "rand " << s << endl;
+ sp_test<sz1>(sp);
+ sp_test<sz2>(sp);
+ sp_test<sz3>(sp);
+ sp_test<sz4>(sp);
+ sp_test<sz5>(sp);
+ return 0;
+}
diff --git a/ndb/src/mgmapi/LocalConfig.cpp b/ndb/src/mgmapi/LocalConfig.cpp
index 0265f982df3..75ad8b40a1f 100644
--- a/ndb/src/mgmapi/LocalConfig.cpp
+++ b/ndb/src/mgmapi/LocalConfig.cpp
@@ -27,7 +27,9 @@ LocalConfig::LocalConfig(){
bool
LocalConfig::init(const char *connectString,
- const char *fileName) {
+ const char *fileName)
+{
+ DBUG_ENTER("LocalConfig::init");
/**
* Escalation:
* 1. Check connectString
@@ -38,21 +40,25 @@ LocalConfig::init(const char *connectString,
* 6. Check defaultConnectString
*/
+ _ownNodeId= 0;
+
//1. Check connectString
if(connectString != 0 && connectString[0] != 0){
if(readConnectString(connectString, "connect string")){
- return true;
- }
- return false;
+ if (ids.size())
+ DBUG_RETURN(true);
+ // only nodeid given, continue to find hosts
+ } else
+ DBUG_RETURN(false);
}
//2. Check given filename
if (fileName && strlen(fileName) > 0) {
bool fopenError;
if(readFile(fileName, fopenError)){
- return true;
+ DBUG_RETURN(true);
}
- return false;
+ DBUG_RETURN(false);
}
//3. Check environment variable
@@ -60,9 +66,9 @@ LocalConfig::init(const char *connectString,
if(NdbEnv_GetEnv("NDB_CONNECTSTRING", buf, sizeof(buf)) &&
strlen(buf) != 0){
if(readConnectString(buf, "NDB_CONNECTSTRING")){
- return true;
+ DBUG_RETURN(true);
}
- return false;
+ DBUG_RETURN(false);
}
//4. Check Ndb.cfg in NDB_HOME
@@ -71,9 +77,9 @@ LocalConfig::init(const char *connectString,
char *buf= NdbConfig_NdbCfgName(1 /*true*/);
NdbAutoPtr<char> tmp_aptr(buf);
if(readFile(buf, fopenError))
- return true;
+ DBUG_RETURN(true);
if (!fopenError)
- return false;
+ DBUG_RETURN(false);
}
//5. Check Ndb.cfg in cwd
@@ -82,9 +88,9 @@ LocalConfig::init(const char *connectString,
char *buf= NdbConfig_NdbCfgName(0 /*false*/);
NdbAutoPtr<char> tmp_aptr(buf);
if(readFile(buf, fopenError))
- return true;
+ DBUG_RETURN(true);
if (!fopenError)
- return false;
+ DBUG_RETURN(false);
}
//7. Check
@@ -92,12 +98,12 @@ LocalConfig::init(const char *connectString,
char buf[256];
BaseString::snprintf(buf, sizeof(buf), "host=localhost:%s", NDB_PORT);
if(readConnectString(buf, "default connect string"))
- return true;
+ DBUG_RETURN(true);
}
setError(0, "");
- return false;
+ DBUG_RETURN(false);
}
LocalConfig::~LocalConfig(){
@@ -142,6 +148,7 @@ const char *nodeIdTokens[] = {
const char *hostNameTokens[] = {
"host://%[^:]:%i",
"host=%[^:]:%i",
+ "mgmd=%[^:]:%i",
"%[^:^=^ ]:%i",
"%s %i",
0
@@ -207,36 +214,22 @@ LocalConfig::parseString(const char * connectString, BaseString &err){
char * copy = strdup(connectString);
NdbAutoPtr<char> tmp_aptr(copy);
- bool b_nodeId = false;
- bool found_other = false;
-
for (char *tok = strtok_r(copy,";,",&for_strtok); tok != 0;
tok = strtok_r(NULL, ";,", &for_strtok)) {
if (tok[0] == '#') continue;
- if (!b_nodeId) // only one nodeid definition allowed
- if (b_nodeId = parseNodeId(tok))
+ if (!_ownNodeId) // only one nodeid definition allowed
+ if (parseNodeId(tok))
continue;
- if (found_other = parseHostName(tok))
+ if (parseHostName(tok))
continue;
- if (found_other = parseFileName(tok))
+ if (parseFileName(tok))
continue;
err.assfmt("Unexpected entry: \"%s\"", tok);
return false;
}
- if (b_nodeId && !found_other)
- {
- BaseString tmp;
- tmp.assfmt("host=localhost:%s", NDB_PORT);
- if(parseHostName(tmp.c_str()))
- return true;
-
- err.appfmt("Missing host/file name extry in \"%s\"", connectString);
- return false;
- }
-
return true;
}
diff --git a/ndb/src/mgmapi/Makefile.am b/ndb/src/mgmapi/Makefile.am
index 2f2fb407e46..db730bf8c89 100644
--- a/ndb/src/mgmapi/Makefile.am
+++ b/ndb/src/mgmapi/Makefile.am
@@ -1,7 +1,7 @@
noinst_LTLIBRARIES = libmgmapi.la
-libmgmapi_la_SOURCES = mgmapi.cpp mgmapi_configuration.cpp LocalConfig.cpp
+libmgmapi_la_SOURCES = mgmapi.cpp ndb_logevent.cpp mgmapi_configuration.cpp LocalConfig.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/include/mgmapi
diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp
index 06b534ac0ca..8263e8cbc93 100644
--- a/ndb/src/mgmapi/mgmapi.cpp
+++ b/ndb/src/mgmapi/mgmapi.cpp
@@ -22,8 +22,9 @@
#include <NdbSleep.h>
#include <NdbTCP.h>
-#include "mgmapi.h"
-#include "mgmapi_debug.h"
+#include <mgmapi.h>
+#include <mgmapi_internal.h>
+#include <mgmapi_debug.h>
#include "mgmapi_configuration.hpp"
#include <socket_io.h>
@@ -153,7 +154,7 @@ ndb_mgm_create_handle()
h->socket = NDB_INVALID_SOCKET;
h->read_timeout = 50000;
h->write_timeout = 100;
- h->cfg_i = 0;
+ h->cfg_i = -1;
h->errstream = stdout;
strncpy(h->last_error_desc, "No error", NDB_MGM_MAX_ERR_DESC_SIZE);
@@ -165,7 +166,7 @@ ndb_mgm_create_handle()
h->logfile = 0;
#endif
- DBUG_PRINT("exit",("ret: %lx", h));
+ DBUG_PRINT("info", ("handle=0x%x", (UintPtr)h));
DBUG_RETURN(h);
}
@@ -173,6 +174,8 @@ extern "C"
int
ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv)
{
+ DBUG_ENTER("ndb_mgm_set_connectstring");
+ DBUG_PRINT("info", ("handle=0x%x", (UintPtr)handle));
handle->cfg.~LocalConfig();
new (&(handle->cfg)) LocalConfig;
if (!handle->cfg.init(mgmsrv, 0) ||
@@ -180,12 +183,12 @@ ndb_mgm_set_connectstring(NdbMgmHandle handle, const char * mgmsrv)
{
handle->cfg.~LocalConfig();
new (&(handle->cfg)) LocalConfig;
- handle->cfg.init(0, 0); /* reset the LocalCongig */
+ handle->cfg.init(0, 0); /* reset the LocalConfig */
SET_ERROR(handle, NDB_MGM_ILLEGAL_CONNECT_STRING, "");
- return -1;
+ DBUG_RETURN(-1);
}
- handle->cfg_i= 0;
- return 0;
+ handle->cfg_i= -1;
+ DBUG_RETURN(0);
}
/**
@@ -197,9 +200,12 @@ ndb_mgm_destroy_handle(NdbMgmHandle * handle)
{
DBUG_ENTER("ndb_mgm_destroy_handle");
if(!handle)
- return;
- DBUG_PRINT("enter",("*handle: %lx", *handle));
-
+ DBUG_VOID_RETURN;
+ DBUG_PRINT("info", ("handle=0x%x", (UintPtr)(* handle)));
+ /**
+ * important! only disconnect if connected
+ * other code relies on this
+ */
if((* handle)->connected){
ndb_mgm_disconnect(* handle);
}
@@ -307,8 +313,11 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
out.println("%s: %s", name, val_s.c_str());
break;
case PropertiesType_Properties:
+ DBUG_PRINT("info",("Ignoring PropertiesType_Properties."));
/* Ignore */
break;
+ default:
+ DBUG_PRINT("info",("Ignoring PropertiesType: %d.",t));
}
}
#ifdef MGMAPI_LOG
@@ -324,17 +333,23 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
ParserDummy session(handle->socket);
Parser_t parser(command_reply, in, true, true, true);
-#if 1
const Properties* p = parser.parse(ctx, session);
if (p == NULL){
- /**
- * Print some info about why the parser returns NULL
- */
- DBUG_PRINT("info",("ctx.status: %d, ctx.m_currentToken: %s",
- ctx.m_status, ctx.m_currentToken));
- //ndbout << " status=" << ctx.m_status << ", curr="
- //<< ctx.m_currentToken << endl;
- }
+ if(!ndb_mgm_is_connected(handle)) {
+ return NULL;
+ }
+ else
+ {
+ /**
+ * Print some info about why the parser returns NULL
+ */
+ fprintf(handle->errstream,
+ "Error in mgm protocol parser. cmd: >%s< status: %d curr: %d\n",
+ cmd, (Uint32)ctx.m_status, ctx.m_currentToken);
+ DBUG_PRINT("info",("ctx.status: %d, ctx.m_currentToken: %s",
+ ctx.m_status, ctx.m_currentToken));
+ }
+ }
#ifdef MGMAPI_LOG
else {
/**
@@ -344,9 +359,26 @@ ndb_mgm_call(NdbMgmHandle handle, const ParserRow<ParserDummy> *command_reply,
}
#endif
DBUG_RETURN(p);
-#else
- DBUG_RETURN(parser.parse(ctx, session));
-#endif
+}
+
+/**
+ * Returns true if connected
+ */
+extern "C"
+int ndb_mgm_is_connected(NdbMgmHandle handle)
+{
+ if(!handle)
+ return 0;
+
+ if(handle->connected)
+ {
+ if(Ndb_check_socket_hup(handle->socket))
+ {
+ handle->connected= 0;
+ NDB_CLOSE_SOCKET(handle->socket);
+ }
+ }
+ return handle->connected;
}
/**
@@ -360,6 +392,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_connect");
CHECK_HANDLE(handle, -1);
+ DBUG_ENTER("ndb_mgm_connect");
#ifdef MGMAPI_LOG
/**
* Open the log file
@@ -389,6 +422,13 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
}
if (sockfd != NDB_INVALID_SOCKET)
break;
+#ifndef DBUG_OFF
+ {
+ char buf[1024];
+ DBUG_PRINT("info",("Unable to connect with connect string: %s",
+ cfg.makeConnectString(buf,sizeof(buf))));
+ }
+#endif
if (verbose > 0) {
char buf[1024];
fprintf(handle->errstream, "Unable to connect with connect string: %s\n",
@@ -402,7 +442,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
cfg.makeConnectString(buf,sizeof(buf)));
if (verbose == -2)
fprintf(handle->errstream, ", failed.\n");
- return -1;
+ DBUG_RETURN(-1);
}
if (verbose == -1) {
fprintf(handle->errstream, "Retrying every %d seconds",
@@ -433,7 +473,7 @@ ndb_mgm_connect(NdbMgmHandle handle, int no_retries,
handle->socket = sockfd;
handle->connected = 1;
- return 0;
+ DBUG_RETURN(0);
}
/**
@@ -651,7 +691,6 @@ ndb_mgm_get_status(NdbMgmHandle handle)
malloc(sizeof(ndb_mgm_cluster_state)+
noOfNodes*(sizeof(ndb_mgm_node_state)+sizeof("000.000.000.000#")));
- state->hostname= 0;
state->no_of_nodes= noOfNodes;
ndb_mgm_node_state * ptr = &state->node_states[0];
int nodeId = 0;
@@ -944,67 +983,68 @@ ndb_mgm_restart(NdbMgmHandle handle, int no_of_nodes, const int *node_list)
return ndb_mgm_restart2(handle, no_of_nodes, node_list, 0, 0, 0);
}
-static const char *clusterlog_level_names[]=
+static const char *clusterlog_severity_names[]=
{ "enabled", "debug", "info", "warning", "error", "critical", "alert" };
-struct ndb_mgm_clusterlog_levels
+struct ndb_mgm_event_severities
{
const char* name;
- enum ndb_mgm_clusterlog_level level;
-} clusterlog_levels[] = {
- { clusterlog_level_names[0], NDB_MGM_CLUSTERLOG_ON },
- { clusterlog_level_names[1], NDB_MGM_CLUSTERLOG_DEBUG },
- { clusterlog_level_names[2], NDB_MGM_CLUSTERLOG_INFO },
- { clusterlog_level_names[3], NDB_MGM_CLUSTERLOG_WARNING },
- { clusterlog_level_names[4], NDB_MGM_CLUSTERLOG_ERROR },
- { clusterlog_level_names[5], NDB_MGM_CLUSTERLOG_CRITICAL },
- { clusterlog_level_names[6], NDB_MGM_CLUSTERLOG_ALERT },
- { "all", NDB_MGM_CLUSTERLOG_ALL },
- { 0, NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL },
+ enum ndb_mgm_event_severity severity;
+} clusterlog_severities[] = {
+ { clusterlog_severity_names[0], NDB_MGM_EVENT_SEVERITY_ON },
+ { clusterlog_severity_names[1], NDB_MGM_EVENT_SEVERITY_DEBUG },
+ { clusterlog_severity_names[2], NDB_MGM_EVENT_SEVERITY_INFO },
+ { clusterlog_severity_names[3], NDB_MGM_EVENT_SEVERITY_WARNING },
+ { clusterlog_severity_names[4], NDB_MGM_EVENT_SEVERITY_ERROR },
+ { clusterlog_severity_names[5], NDB_MGM_EVENT_SEVERITY_CRITICAL },
+ { clusterlog_severity_names[6], NDB_MGM_EVENT_SEVERITY_ALERT },
+ { "all", NDB_MGM_EVENT_SEVERITY_ALL },
+ { 0, NDB_MGM_ILLEGAL_EVENT_SEVERITY },
};
extern "C"
-ndb_mgm_clusterlog_level
-ndb_mgm_match_clusterlog_level(const char * name)
+ndb_mgm_event_severity
+ndb_mgm_match_event_severity(const char * name)
{
if(name == 0)
- return NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL;
+ return NDB_MGM_ILLEGAL_EVENT_SEVERITY;
- for(int i = 0; clusterlog_levels[i].name !=0 ; i++)
- if(strcasecmp(name, clusterlog_levels[i].name) == 0)
- return clusterlog_levels[i].level;
+ for(int i = 0; clusterlog_severities[i].name !=0 ; i++)
+ if(strcasecmp(name, clusterlog_severities[i].name) == 0)
+ return clusterlog_severities[i].severity;
- return NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL;
+ return NDB_MGM_ILLEGAL_EVENT_SEVERITY;
}
extern "C"
const char *
-ndb_mgm_get_clusterlog_level_string(enum ndb_mgm_clusterlog_level level)
+ndb_mgm_get_event_severity_string(enum ndb_mgm_event_severity severity)
{
- int i= (int)level;
- if (i >= 0 && i < (int)NDB_MGM_CLUSTERLOG_ALL)
- return clusterlog_level_names[i];
- for(i = (int)NDB_MGM_CLUSTERLOG_ALL; clusterlog_levels[i].name != 0; i++)
- if(clusterlog_levels[i].level == level)
- return clusterlog_levels[i].name;
+ int i= (int)severity;
+ if (i >= 0 && i < (int)NDB_MGM_EVENT_SEVERITY_ALL)
+ return clusterlog_severity_names[i];
+ for(i = (int)NDB_MGM_EVENT_SEVERITY_ALL; clusterlog_severities[i].name != 0; i++)
+ if(clusterlog_severities[i].severity == severity)
+ return clusterlog_severities[i].name;
return 0;
}
extern "C"
-unsigned int *
-ndb_mgm_get_logfilter(NdbMgmHandle handle)
+const unsigned int *
+ndb_mgm_get_clusterlog_severity_filter(NdbMgmHandle handle)
{
- SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_logfilter");
- static Uint32 enabled[(int)NDB_MGM_CLUSTERLOG_ALL] = {0,0,0,0,0,0,0};
+ SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_get_clusterlog_severity_filter");
+ static unsigned int enabled[(int)NDB_MGM_EVENT_SEVERITY_ALL]=
+ {0,0,0,0,0,0,0};
const ParserRow<ParserDummy> getinfo_reply[] = {
MGM_CMD("clusterlog", NULL, ""),
- MGM_ARG(clusterlog_level_names[0], Int, Mandatory, ""),
- MGM_ARG(clusterlog_level_names[1], Int, Mandatory, ""),
- MGM_ARG(clusterlog_level_names[2], Int, Mandatory, ""),
- MGM_ARG(clusterlog_level_names[3], Int, Mandatory, ""),
- MGM_ARG(clusterlog_level_names[4], Int, Mandatory, ""),
- MGM_ARG(clusterlog_level_names[5], Int, Mandatory, ""),
- MGM_ARG(clusterlog_level_names[6], Int, Mandatory, ""),
+ MGM_ARG(clusterlog_severity_names[0], Int, Mandatory, ""),
+ MGM_ARG(clusterlog_severity_names[1], Int, Mandatory, ""),
+ MGM_ARG(clusterlog_severity_names[2], Int, Mandatory, ""),
+ MGM_ARG(clusterlog_severity_names[3], Int, Mandatory, ""),
+ MGM_ARG(clusterlog_severity_names[4], Int, Mandatory, ""),
+ MGM_ARG(clusterlog_severity_names[5], Int, Mandatory, ""),
+ MGM_ARG(clusterlog_severity_names[6], Int, Mandatory, ""),
};
CHECK_HANDLE(handle, NULL);
CHECK_CONNECTED(handle, NULL);
@@ -1014,20 +1054,21 @@ ndb_mgm_get_logfilter(NdbMgmHandle handle)
reply = ndb_mgm_call(handle, getinfo_reply, "get info clusterlog", &args);
CHECK_REPLY(reply, NULL);
- for(int i=0; i < (int)NDB_MGM_CLUSTERLOG_ALL; i++) {
- reply->get(clusterlog_level_names[i], &enabled[i]);
+ for(int i=0; i < (int)NDB_MGM_EVENT_SEVERITY_ALL; i++) {
+ reply->get(clusterlog_severity_names[i], &enabled[i]);
}
return enabled;
}
extern "C"
int
-ndb_mgm_filter_clusterlog(NdbMgmHandle handle,
- enum ndb_mgm_clusterlog_level level,
- int enable,
- struct ndb_mgm_reply* /*reply*/)
+ndb_mgm_set_clusterlog_severity_filter(NdbMgmHandle handle,
+ enum ndb_mgm_event_severity severity,
+ int enable,
+ struct ndb_mgm_reply* /*reply*/)
{
- SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_filter_clusterlog");
+ SET_ERROR(handle, NDB_MGM_NO_ERROR,
+ "Executing: ndb_mgm_set_clusterlog_severity_filter");
const ParserRow<ParserDummy> filter_reply[] = {
MGM_CMD("set logfilter reply", NULL, ""),
MGM_ARG("result", String, Mandatory, "Error message"),
@@ -1038,7 +1079,7 @@ ndb_mgm_filter_clusterlog(NdbMgmHandle handle,
CHECK_CONNECTED(handle, -1);
Properties args;
- args.put("level", level);
+ args.put("level", severity);
args.put("enable", enable);
const Properties *reply;
@@ -1074,8 +1115,8 @@ struct ndb_mgm_event_categories
{ "DEBUG", NDB_MGM_EVENT_CATEGORY_DEBUG },
{ "INFO", NDB_MGM_EVENT_CATEGORY_INFO },
{ "ERROR", NDB_MGM_EVENT_CATEGORY_ERROR },
- { "GREP", NDB_MGM_EVENT_CATEGORY_GREP },
{ "BACKUP", NDB_MGM_EVENT_CATEGORY_BACKUP },
+ { "CONGESTION", NDB_MGM_EVENT_CATEGORY_CONGESTION },
{ 0, NDB_MGM_ILLEGAL_EVENT_CATEGORY }
};
@@ -1107,13 +1148,13 @@ ndb_mgm_get_event_category_string(enum ndb_mgm_event_category status)
extern "C"
int
-ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
+ndb_mgm_set_clusterlog_loglevel(NdbMgmHandle handle, int nodeId,
enum ndb_mgm_event_category cat,
int level,
struct ndb_mgm_reply* /*reply*/)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR,
- "Executing: ndb_mgm_set_loglevel_clusterlog");
+ "Executing: ndb_mgm_set_clusterlog_loglevel");
const ParserRow<ParserDummy> clusterlog_reply[] = {
MGM_CMD("set cluster loglevel reply", NULL, ""),
MGM_ARG("result", String, Mandatory, "Error message"),
@@ -1132,7 +1173,7 @@ ndb_mgm_set_loglevel_clusterlog(NdbMgmHandle handle, int nodeId,
"set cluster loglevel", &args);
CHECK_REPLY(reply, -1);
- DBUG_ENTER("ndb_mgm_set_loglevel_clusterlog");
+ DBUG_ENTER("ndb_mgm_set_clusterlog_loglevel");
DBUG_PRINT("enter",("node=%d, category=%d, level=%d", nodeId, cat, level));
BaseString result;
@@ -1182,9 +1223,9 @@ ndb_mgm_set_loglevel_node(NdbMgmHandle handle, int nodeId,
return 0;
}
-extern "C"
int
-ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
+ndb_mgm_listen_event_internal(NdbMgmHandle handle, const int filter[],
+ int parsable)
{
SET_ERROR(handle, NDB_MGM_NO_ERROR, "Executing: ndb_mgm_listen_event");
const ParserRow<ParserDummy> stat_reply[] = {
@@ -1206,6 +1247,9 @@ ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
}
Properties args;
+
+ if (parsable)
+ args.put("parsable", parsable);
{
BaseString tmp;
for(int i = 0; filter[i] != 0; i += 2){
@@ -1222,11 +1266,21 @@ ndb_mgm_listen_event(NdbMgmHandle handle, int filter[])
handle->socket = tmp;
- CHECK_REPLY(reply, -1);
+ if(reply == NULL) {
+ close(sockfd);
+ CHECK_REPLY(reply, -1);
+ }
return sockfd;
}
extern "C"
+int
+ndb_mgm_listen_event(NdbMgmHandle handle, const int filter[])
+{
+ return ndb_mgm_listen_event_internal(handle,filter,0);
+}
+
+extern "C"
int
ndb_mgm_get_stat_port(NdbMgmHandle handle, struct ndb_mgm_reply* /*reply*/)
{
@@ -1770,13 +1824,19 @@ ndb_mgm_get_configuration_nodeid(NdbMgmHandle handle)
extern "C"
int ndb_mgm_get_connected_port(NdbMgmHandle handle)
{
- return handle->cfg.ids[handle->cfg_i].port;
+ if (handle->cfg_i >= 0)
+ return handle->cfg.ids[handle->cfg_i].port;
+ else
+ return 0;
}
extern "C"
const char *ndb_mgm_get_connected_host(NdbMgmHandle handle)
{
- return handle->cfg.ids[handle->cfg_i].name.c_str();
+ if (handle->cfg_i >= 0)
+ return handle->cfg.ids[handle->cfg_i].name.c_str();
+ else
+ return 0;
}
extern "C"
@@ -1890,9 +1950,9 @@ ndb_mgm_set_int_parameter(NdbMgmHandle handle,
CHECK_CONNECTED(handle, 0);
Properties args;
- args.put("node: ", node);
- args.put("param: ", param);
- args.put("value: ", value);
+ args.put("node", node);
+ args.put("param", param);
+ args.put("value", value);
const ParserRow<ParserDummy> reply[]= {
MGM_CMD("set parameter reply", NULL, ""),
@@ -1929,9 +1989,9 @@ ndb_mgm_set_int64_parameter(NdbMgmHandle handle,
CHECK_CONNECTED(handle, 0);
Properties args;
- args.put("node: ", node);
- args.put("param: ", param);
- args.put("value: ", value);
+ args.put("node", node);
+ args.put("param", param);
+ args.put("value", value);
const ParserRow<ParserDummy> reply[]= {
MGM_CMD("set parameter reply", NULL, ""),
@@ -1972,9 +2032,9 @@ ndb_mgm_set_string_parameter(NdbMgmHandle handle,
CHECK_CONNECTED(handle, 0);
Properties args;
- args.put("node: ", node);
- args.put("parameter: ", param);
- args.put("value: ", value);
+ args.put("node", node);
+ args.put("parameter", param);
+ args.put("value", value);
const ParserRow<ParserDummy> reply[]= {
MGM_CMD("set parameter reply", NULL, ""),
@@ -2080,4 +2140,146 @@ ndb_mgm_check_connection_error:
return -1;
}
+extern "C"
+int
+ndb_mgm_set_connection_int_parameter(NdbMgmHandle handle,
+ int node1,
+ int node2,
+ int param,
+ int value,
+ struct ndb_mgm_reply* mgmreply){
+ DBUG_ENTER("ndb_mgm_set_connection_int_parameter");
+ CHECK_HANDLE(handle, 0);
+ CHECK_CONNECTED(handle, 0);
+
+ Properties args;
+ args.put("node1", node1);
+ args.put("node2", node2);
+ args.put("param", param);
+ args.put("value", (Uint32)value);
+
+ const ParserRow<ParserDummy> reply[]= {
+ MGM_CMD("set connection parameter reply", NULL, ""),
+ MGM_ARG("message", String, Mandatory, "Error Message"),
+ MGM_ARG("result", String, Mandatory, "Status Result"),
+ MGM_END()
+ };
+
+ const Properties *prop;
+ prop= ndb_mgm_call(handle, reply, "set connection parameter", &args);
+ CHECK_REPLY(prop, -1);
+
+ int res= -1;
+ do {
+ const char * buf;
+ if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ fprintf(handle->errstream, "ERROR Message: %s\n", buf);
+ break;
+ }
+ res= 0;
+ } while(0);
+
+ delete prop;
+ DBUG_RETURN(res);
+}
+
+extern "C"
+int
+ndb_mgm_get_connection_int_parameter(NdbMgmHandle handle,
+ int node1,
+ int node2,
+ int param,
+ int *value,
+ struct ndb_mgm_reply* mgmreply){
+ DBUG_ENTER("ndb_mgm_get_connection_int_parameter");
+ CHECK_HANDLE(handle, -1);
+ CHECK_CONNECTED(handle, -2);
+
+ Properties args;
+ args.put("node1", node1);
+ args.put("node2", node2);
+ args.put("param", param);
+
+ const ParserRow<ParserDummy> reply[]= {
+ MGM_CMD("get connection parameter reply", NULL, ""),
+ MGM_ARG("value", Int, Mandatory, "Current Value"),
+ MGM_ARG("result", String, Mandatory, "Result"),
+ MGM_END()
+ };
+
+ const Properties *prop;
+ prop = ndb_mgm_call(handle, reply, "get connection parameter", &args);
+ CHECK_REPLY(prop, -3);
+
+ int res= -1;
+ do {
+ const char * buf;
+ if(!prop->get("result", &buf) || strcmp(buf, "Ok") != 0){
+ fprintf(handle->errstream, "ERROR Message: %s\n", buf);
+ break;
+ }
+ res= 0;
+ } while(0);
+
+ if(!prop->get("value",(Uint32*)value)){
+ fprintf(handle->errstream, "Unable to get value\n");
+ res = -4;
+ }
+
+ delete prop;
+ DBUG_RETURN(res);
+}
+
+extern "C"
+NDB_SOCKET_TYPE
+ndb_mgm_convert_to_transporter(NdbMgmHandle *handle)
+{
+ NDB_SOCKET_TYPE s;
+
+ CHECK_HANDLE((*handle), NDB_INVALID_SOCKET);
+ CHECK_CONNECTED((*handle), NDB_INVALID_SOCKET);
+
+ (*handle)->connected= 0; // we pretend we're disconnected
+ s= (*handle)->socket;
+
+ SocketOutputStream s_output(s);
+ s_output.println("transporter connect");
+ s_output.println("");
+
+ ndb_mgm_destroy_handle(handle); // set connected=0, so won't disconnect
+
+ return s;
+}
+
+extern "C"
+Uint32
+ndb_mgm_get_mgmd_nodeid(NdbMgmHandle handle)
+{
+ Uint32 nodeid=0;
+
+ DBUG_ENTER("ndb_mgm_get_mgmd_nodeid");
+ CHECK_HANDLE(handle, 0);
+ CHECK_CONNECTED(handle, 0);
+
+ Properties args;
+
+ const ParserRow<ParserDummy> reply[]= {
+ MGM_CMD("get mgmd nodeid reply", NULL, ""),
+ MGM_ARG("nodeid", Int, Mandatory, "Node ID"),
+ MGM_END()
+ };
+
+ const Properties *prop;
+ prop = ndb_mgm_call(handle, reply, "get mgmd nodeid", &args);
+ CHECK_REPLY(prop, 0);
+
+ if(!prop->get("nodeid",&nodeid)){
+ fprintf(handle->errstream, "Unable to get value\n");
+ return 0;
+ }
+
+ delete prop;
+ DBUG_RETURN(nodeid);
+}
+
template class Vector<const ParserRow<ParserDummy>*>;
diff --git a/ndb/src/mgmapi/mgmapi_configuration.hpp b/ndb/src/mgmapi/mgmapi_configuration.hpp
index 9e94b3311bf..7d60a4842a1 100644
--- a/ndb/src/mgmapi/mgmapi_configuration.hpp
+++ b/ndb/src/mgmapi/mgmapi_configuration.hpp
@@ -1,3 +1,19 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
#ifndef MGMAPI_CONFIGURATION_HPP
#define MGMAPI_CONFIGURATION_HPP
diff --git a/ndb/src/mgmapi/mgmapi_internal.h b/ndb/src/mgmapi/mgmapi_internal.h
new file mode 100644
index 00000000000..90f93129f2a
--- /dev/null
+++ b/ndb/src/mgmapi/mgmapi_internal.h
@@ -0,0 +1,77 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#ifndef MGMAPI_INTERNAL_H
+#define MGMAPI_INTERNAL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <NdbTCP.h>
+
+ /**
+ * Set an integer parameter for a connection
+ *
+ * @param handle the NDB management handle.
+ * @param node1 the node1 id
+ * @param node2 the node2 id
+ * @param param the parameter (e.g. CFG_CONNECTION_SERVER_PORT)
+ * @param value what to set it to
+ * @param reply from ndb_mgmd
+ */
+ int ndb_mgm_set_connection_int_parameter(NdbMgmHandle handle,
+ int node1,
+ int node2,
+ int param,
+ int value,
+ struct ndb_mgm_reply* reply);
+
+ /**
+ * Get an integer parameter for a connection
+ *
+ * @param handle the NDB management handle.
+ * @param node1 the node1 id
+ * @param node2 the node2 id
+ * @param param the parameter (e.g. CFG_CONNECTION_SERVER_PORT)
+ * @param value where to store the retreived value. In the case of
+ * error, value is not changed.
+ * @param reply from ndb_mgmd
+ * @return 0 on success. < 0 on error.
+ */
+ int ndb_mgm_get_connection_int_parameter(NdbMgmHandle handle,
+ int node1,
+ int node2,
+ int param,
+ int *value,
+ struct ndb_mgm_reply* reply);
+
+ /**
+ * Convert connection to transporter
+ * @param handle NDB management handle.
+ *
+ * @return socket
+ *
+ * @note the socket is now able to be used as a transporter connection
+ */
+ NDB_SOCKET_TYPE ndb_mgm_convert_to_transporter(NdbMgmHandle *handle);
+
+#ifdef __cplusplus
+}
+#endif
+
+
+#endif
diff --git a/ndb/src/mgmapi/ndb_logevent.cpp b/ndb/src/mgmapi/ndb_logevent.cpp
new file mode 100644
index 00000000000..918ec5d6705
--- /dev/null
+++ b/ndb/src/mgmapi/ndb_logevent.cpp
@@ -0,0 +1,503 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <my_sys.h>
+#include <mgmapi.h>
+
+#include <NdbOut.hpp>
+#include <Properties.hpp>
+#include <socket_io.h>
+#include <InputStream.hpp>
+
+#include <debugger/EventLogger.hpp>
+
+#include "ndb_logevent.hpp"
+
+extern
+int ndb_mgm_listen_event_internal(NdbMgmHandle, const int filter[], int);
+
+struct ndb_logevent_error_msg {
+ enum ndb_logevent_handle_error code;
+ const char *msg;
+};
+
+struct ndb_logevent_error_msg ndb_logevent_error_messages[]= {
+ { NDB_LEH_READ_ERROR, "Read error" },
+ { NDB_LEH_MISSING_EVENT_SPECIFIER, "Missing event specifier" },
+ { NDB_LEH_UNKNOWN_EVENT_VARIABLE, "Unknown event variable" },
+ { NDB_LEH_UNKNOWN_EVENT_TYPE, "Unknown event type" },
+ { NDB_LEH_INTERNAL_ERROR, "Unknown internal error" },
+ { NDB_LEH_NO_ERROR,0}
+};
+
+struct ndb_logevent_handle {
+ NDB_SOCKET_TYPE socket;
+ enum ndb_logevent_handle_error m_error;
+};
+
+extern "C"
+NdbLogEventHandle
+ndb_mgm_create_logevent_handle(NdbMgmHandle mh,
+ const int filter[])
+{
+ int fd= ndb_mgm_listen_event_internal(mh, filter, 1);
+
+ if (fd == -1)
+ return 0;
+
+ NdbLogEventHandle h=
+ (NdbLogEventHandle)my_malloc(sizeof(ndb_logevent_handle),MYF(MY_WME));
+
+ h->socket= fd;
+
+ return h;
+}
+
+extern "C"
+void ndb_mgm_destroy_logevent_handle(NdbLogEventHandle * h)
+{
+ if( !h )
+ return;
+
+ if ( *h )
+ close((*h)->socket);
+
+ my_free((char*)* h,MYF(MY_ALLOW_ZERO_PTR));
+ * h = 0;
+}
+
+#define ROW(a,b,c,d) \
+{ NDB_LE_ ## a, b, c, 0, offsetof(struct ndb_logevent, a.d), \
+ sizeof(((struct ndb_logevent *)0)->a.d) }
+
+#define ROW_FN(a,b,c,d,e) \
+{ NDB_LE_ ## a, b, c, e, offsetof(struct ndb_logevent, a.d), \
+ sizeof(((struct ndb_logevent *)0)->a.d) }
+
+static int ref_to_node(int ref){
+ return ref & 0xFFFF;
+}
+
+struct Ndb_logevent_body_row ndb_logevent_body[]= {
+
+ // Connection
+ ROW( Connected, "node", 1, node),
+
+ ROW( Disconnected, "node", 1, node),
+
+ ROW( CommunicationClosed, "node", 1, node),
+
+ ROW( CommunicationOpened, "node", 1, node),
+
+ ROW( ConnectedApiVersion, "node", 1, node),
+ ROW( ConnectedApiVersion, "version", 2, version),
+
+ /* CHECKPOINT */
+
+ ROW( GlobalCheckpointStarted, "gci", 1, gci),
+
+ ROW( GlobalCheckpointCompleted, "gci", 1, gci),
+
+ ROW( LocalCheckpointStarted, "lci", 1, lci),
+ ROW( LocalCheckpointStarted, "keep_gci", 2, keep_gci),
+ ROW( LocalCheckpointStarted, "restore_gci", 3, restore_gci),
+
+ ROW( LocalCheckpointCompleted, "lci", 1, lci),
+
+ ROW( LCPStoppedInCalcKeepGci, "data", 1, data),
+
+ ROW( LCPFragmentCompleted, "node", 1, node),
+ ROW( LCPFragmentCompleted, "table_id", 2, table_id),
+ ROW( LCPFragmentCompleted, "fragment_id", 3, fragment_id),
+
+ ROW( UndoLogBlocked, "acc_count", 1, acc_count),
+ ROW( UndoLogBlocked, "tup_count", 2, tup_count),
+
+ /* STARTUP */
+ ROW( NDBStartStarted, "version", 1, version),
+
+ ROW( NDBStartCompleted, "version", 1, version),
+
+// ROW( STTORRYRecieved),
+
+ ROW( StartPhaseCompleted, "phase", 1, phase),
+ ROW( StartPhaseCompleted, "starttype", 2, starttype),
+
+ ROW( CM_REGCONF, "own_id", 1, own_id),
+ ROW( CM_REGCONF, "president_id", 2, president_id),
+ ROW( CM_REGCONF, "dynamic_id", 3, dynamic_id),
+
+ ROW( CM_REGREF, "own_id", 1, own_id),
+ ROW( CM_REGREF, "other_id", 2, other_id),
+ ROW( CM_REGREF, "cause", 3, cause),
+
+ ROW( FIND_NEIGHBOURS, "own_id", 1, own_id),
+ ROW( FIND_NEIGHBOURS, "left_id", 3, left_id),
+ ROW( FIND_NEIGHBOURS, "right_id", 3, right_id),
+ ROW( FIND_NEIGHBOURS, "dynamic_id", 4, dynamic_id),
+
+ ROW( NDBStopStarted, "stoptype", 1, stoptype),
+
+// ROW( NDBStopAborted),
+
+ ROW( StartREDOLog, "node", 1, node),
+ ROW( StartREDOLog, "keep_gci", 2, keep_gci),
+ ROW( StartREDOLog, "completed_gci", 3, completed_gci),
+ ROW( StartREDOLog, "restorable_gci", 4, restorable_gci),
+
+ ROW( StartLog, "log_part", 1, log_part),
+ ROW( StartLog, "start_mb", 2, start_mb),
+ ROW( StartLog, "stop_mb", 3, stop_mb),
+ ROW( StartLog, "gci", 4, gci),
+
+ ROW( UNDORecordsExecuted, "block", 1, block),
+ ROW( UNDORecordsExecuted, "data1", 2, data1),
+ ROW( UNDORecordsExecuted, "data2", 3, data2),
+ ROW( UNDORecordsExecuted, "data3", 4, data3),
+ ROW( UNDORecordsExecuted, "data4", 5, data4),
+ ROW( UNDORecordsExecuted, "data5", 6, data5),
+ ROW( UNDORecordsExecuted, "data6", 7, data6),
+ ROW( UNDORecordsExecuted, "data7", 8, data7),
+ ROW( UNDORecordsExecuted, "data8", 9, data8),
+ ROW( UNDORecordsExecuted, "data9", 10, data9),
+ ROW( UNDORecordsExecuted, "data10", 11, data10),
+
+ /* NODERESTART */
+// ROW( NR_CopyDict),
+
+// ROW( NR_CopyDistr),
+
+ ROW( NR_CopyFragsStarted, "dest_node", 1, dest_node),
+
+ ROW( NR_CopyFragDone, "dest_node", 1, dest_node),
+ ROW( NR_CopyFragDone, "table_id", 2, table_id),
+ ROW( NR_CopyFragDone, "fragment_id", 3, fragment_id),
+
+ ROW( NR_CopyFragsCompleted, "dest_node", 1, dest_node),
+
+ ROW( NodeFailCompleted, "block", 1, block), /* 0 = all */
+ ROW( NodeFailCompleted, "failed_node", 2, failed_node),
+ ROW( NodeFailCompleted, "completing_node", 3, completing_node), /* 0 = all */
+
+ ROW( NODE_FAILREP, "failed_node", 1, failed_node),
+ ROW( NODE_FAILREP, "failure_state", 2, failure_state),
+
+ /* TODO */
+ ROW( ArbitState, "code", 1, code),
+ ROW( ArbitState, "arbit_node", 2, arbit_node),
+ ROW( ArbitState, "ticket_0", 3, ticket_0),
+ ROW( ArbitState, "ticket_1", 4, ticket_1),
+
+ /* TODO */
+ ROW( ArbitResult, "code", 1, code),
+ ROW( ArbitResult, "arbit_node", 2, arbit_node),
+ ROW( ArbitResult, "ticket_0", 3, ticket_0),
+ ROW( ArbitResult, "ticket_1", 4, ticket_1),
+
+// ROW( GCP_TakeoverStarted),
+
+// ROW( GCP_TakeoverCompleted),
+
+// ROW( LCP_TakeoverStarted),
+
+ ROW( LCP_TakeoverCompleted, "state", 1, state),
+
+ /* STATISTIC */
+ ROW( TransReportCounters, "trans_count", 1, trans_count),
+ ROW( TransReportCounters, "commit_count", 2, commit_count),
+ ROW( TransReportCounters, "read_count", 3, read_count),
+ ROW( TransReportCounters, "simple_read_count", 4, simple_read_count),
+ ROW( TransReportCounters, "write_count", 5, write_count),
+ ROW( TransReportCounters, "attrinfo_count", 6, attrinfo_count),
+ ROW( TransReportCounters, "conc_op_count", 7, conc_op_count),
+ ROW( TransReportCounters, "abort_count", 8, abort_count),
+ ROW( TransReportCounters, "scan_count", 9, scan_count),
+ ROW( TransReportCounters, "range_scan_count", 10, range_scan_count),
+
+ ROW( OperationReportCounters, "ops", 1, ops),
+
+ ROW( TableCreated, "table_id", 1, table_id),
+
+ ROW( JobStatistic, "mean_loop_count", 1, mean_loop_count),
+
+ ROW( SendBytesStatistic, "to_node", 1, to_node),
+ ROW( SendBytesStatistic, "mean_sent_bytes", 2, mean_sent_bytes),
+
+ ROW( ReceiveBytesStatistic, "from_node", 1, from_node),
+ ROW( ReceiveBytesStatistic, "mean_received_bytes", 2, mean_received_bytes),
+
+ ROW( MemoryUsage, "gth", 1, gth),
+ ROW( MemoryUsage, "page_size_kb", 2, page_size_kb),
+ ROW( MemoryUsage, "pages_used", 3, pages_used),
+ ROW( MemoryUsage, "pages_total", 4, pages_total),
+ ROW( MemoryUsage, "block", 5, block),
+
+ /* ERROR */
+ ROW( TransporterError, "to_node", 1, to_node),
+ ROW( TransporterError, "code", 2, code),
+
+ ROW( TransporterWarning, "to_node", 1, to_node),
+ ROW( TransporterWarning, "code", 2, code),
+
+ ROW( MissedHeartbeat, "node", 1, node),
+ ROW( MissedHeartbeat, "count", 2, count),
+
+ ROW( DeadDueToHeartbeat, "node", 1, node),
+
+ /* TODO */
+// ROW( WarningEvent),
+
+ /* INFO */
+ ROW( SentHeartbeat, "node", 1, node),
+
+ ROW( CreateLogBytes, "node", 1, node),
+
+ /* TODO */
+// ROW( InfoEvent),
+
+ // Backup
+ ROW_FN( BackupStarted, "starting_node", 1, starting_node, ref_to_node),
+ ROW( BackupStarted, "backup_id", 2, backup_id),
+
+ ROW_FN(BackupFailedToStart,"starting_node",1, starting_node, ref_to_node),
+ ROW( BackupFailedToStart, "error", 2, error),
+
+ ROW_FN( BackupCompleted, "starting_node", 1, starting_node, ref_to_node),
+ ROW( BackupCompleted, "backup_id", 2, backup_id),
+ ROW( BackupCompleted, "start_gci", 3, start_gci),
+ ROW( BackupCompleted, "stop_gci", 4, stop_gci),
+ ROW( BackupCompleted, "n_bytes", 5, n_bytes),
+ ROW( BackupCompleted, "n_records", 6, n_records),
+ ROW( BackupCompleted, "n_log_bytes", 7, n_log_bytes),
+ ROW( BackupCompleted, "n_log_records", 8, n_log_records),
+
+ ROW_FN( BackupAborted, "starting_node", 1, starting_node, ref_to_node),
+ ROW( BackupAborted, "backup_id", 2, backup_id),
+ ROW( BackupAborted, "error", 3, error),
+
+ ROW( SingleUser, "type", 1, type),
+ ROW( SingleUser, "node_id", 2, node_id),
+ { NDB_LE_ILLEGAL_TYPE, 0, 0, 0, 0, 0}
+};
+
+struct Ndb_logevent_header_row {
+ const char *token; // token to use for text transfer
+ int offset; // offset into struct ndb_logevent
+ int size;
+};
+
+#define ROW2(a,b) \
+{ a, offsetof(struct ndb_logevent, b), \
+ sizeof(((struct ndb_logevent *)0)->b) }
+
+struct Ndb_logevent_header_row ndb_logevent_header[]= {
+ ROW2( "type", type),
+ ROW2( "time", time),
+ ROW2( "source_nodeid", source_nodeid),
+ { 0, 0, 0 }
+};
+
+static int
+insert_row(const char * pair, Properties & p){
+ BaseString tmp(pair);
+
+ tmp.trim(" \t\n\r");
+ Vector<BaseString> split;
+ tmp.split(split, ":=", 2);
+ if(split.size() != 2)
+ return -1;
+ p.put(split[0].trim().c_str(), split[1].trim().c_str());
+
+ return 0;
+}
+
+static
+int memcpy_atoi(void *dst, const char *str, int sz)
+{
+ switch (sz)
+ {
+ case 1:
+ {
+ Int8 val= atoi(str);
+ memcpy(dst,&val,sz);
+ return 0;
+ }
+ case 2:
+ {
+ Int16 val= atoi(str);
+ memcpy(dst,&val,sz);
+ return 0;
+ }
+ case 4:
+ {
+ Int32 val= atoi(str);
+ memcpy(dst,&val,sz);
+ return 0;
+ }
+ case 8:
+ {
+ Int64 val= atoi(str);
+ memcpy(dst,&val,sz);
+ return 0;
+ }
+ default:
+ {
+ return -1;
+ }
+ }
+}
+
+extern "C"
+int ndb_logevent_get_next(const NdbLogEventHandle h,
+ struct ndb_logevent *dst,
+ unsigned timeout_in_milliseconds)
+{
+ SocketInputStream in(h->socket, timeout_in_milliseconds);
+
+ Properties p;
+ char buf[256];
+
+ struct timeval start_time;
+ gettimeofday(&start_time, 0);
+
+ /* header */
+ while (1) {
+ if (in.gets(buf,sizeof(buf)) == 0)
+ {
+ h->m_error= NDB_LEH_READ_ERROR;
+ return -1;
+ }
+ if ( buf[0] == 0 )
+ {
+ // timed out
+ return 0;
+ }
+ if ( strcmp("log event reply\n", buf) == 0 )
+ break;
+
+ if ( strcmp("<PING>\n", buf) )
+ ndbout_c("skipped: %s", buf);
+
+ struct timeval now;
+ gettimeofday(&now, 0);
+ unsigned elapsed_ms= (now.tv_sec-start_time.tv_sec)*1000 +
+ ((signed int)now.tv_usec-(signed int)start_time.tv_usec)/1000;
+
+ if (elapsed_ms >= timeout_in_milliseconds)
+ {
+ // timed out
+ return 0;
+ }
+
+ new (&in) SocketInputStream(h->socket, timeout_in_milliseconds-elapsed_ms);
+ }
+
+ /* read name-value pairs into properties object */
+ while (1)
+ {
+ if (in.gets(buf,sizeof(buf)) == 0)
+ {
+ h->m_error= NDB_LEH_READ_ERROR;
+ return -1;
+ }
+ if ( buf[0] == 0 )
+ {
+ // timed out
+ return 0;
+ }
+ if ( buf[0] == '\n' )
+ {
+ break;
+ }
+ if (insert_row(buf,p))
+ {
+ h->m_error= NDB_LEH_READ_ERROR;
+ return -1;
+ }
+ }
+
+ int i;
+ const char *val;
+
+ dst->type= (enum Ndb_logevent_type)-1;
+ /* fill in header info from p*/
+ for (i= 0; ndb_logevent_header[i].token; i++)
+ {
+ if ( p.get(ndb_logevent_header[i].token, &val) == 0 )
+ {
+ ndbout_c("missing: %s\n", ndb_logevent_header[i].token);
+ h->m_error= NDB_LEH_MISSING_EVENT_SPECIFIER;
+ return -1;
+ }
+ if ( memcpy_atoi((char *)dst+ndb_logevent_header[i].offset, val,
+ ndb_logevent_header[i].size) )
+ {
+ h->m_error= NDB_LEH_INTERNAL_ERROR;
+ return -1;
+ }
+ }
+
+ Uint32 level;
+ LogLevel::EventCategory category;
+ Logger::LoggerLevel severity;
+ EventLoggerBase::EventTextFunction text_fn;
+
+ /* fill in rest of header info event_lookup */
+ if (EventLoggerBase::event_lookup(dst->type,category,level,severity,text_fn))
+ {
+ ndbout_c("unknown type: %d\n", dst->type);
+ h->m_error= NDB_LEH_UNKNOWN_EVENT_TYPE;
+ return -1;
+ }
+ dst->category= (enum ndb_mgm_event_category)category;
+ dst->severity= (enum ndb_mgm_event_severity)severity;
+ dst->level= level;
+
+ /* fill in header info from p */
+ for (i= 0; ndb_logevent_body[i].token; i++)
+ {
+ if ( ndb_logevent_body[i].type != dst->type )
+ continue;
+ if ( p.get(ndb_logevent_body[i].token, &val) == 0 )
+ {
+ h->m_error= NDB_LEH_UNKNOWN_EVENT_VARIABLE;
+ return -1;
+ }
+ if ( memcpy_atoi((char *)dst+ndb_logevent_body[i].offset, val,
+ ndb_logevent_body[i].size) )
+ {
+ h->m_error= NDB_LEH_INTERNAL_ERROR;
+ return -1;
+ }
+ }
+ return 1;
+}
+
+extern "C"
+int ndb_logevent_get_latest_error(const NdbLogEventHandle h)
+{
+ return h->m_error;
+}
+
+extern "C"
+const char *ndb_logevent_get_latest_error_msg(const NdbLogEventHandle h)
+{
+ for (int i= 0; ndb_logevent_error_messages[i].msg; i++)
+ if (ndb_logevent_error_messages[i].code == h->m_error)
+ return ndb_logevent_error_messages[i].msg;
+ return "<unknown error msg>";
+}
diff --git a/ndb/include/ndbapi/NdbCursorOperation.hpp b/ndb/src/mgmapi/ndb_logevent.hpp
index e7eeb54ba2d..cb1a0e388e5 100644
--- a/ndb/include/ndbapi/NdbCursorOperation.hpp
+++ b/ndb/src/mgmapi/ndb_logevent.hpp
@@ -14,7 +14,21 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#ifndef NdbCursorOperation_H
-#define NdbCursorOperation_H
+#ifndef NDB_LOGEVENT_HPP
+#define NDB_LOGEVENT_HPP
+
+#include <ndb_logevent.h>
+
+struct Ndb_logevent_body_row {
+ enum Ndb_logevent_type type; // type
+ const char *token; // token to use for text transfer
+ int index; // index into theData array
+ int (*index_fn)(int); // conversion function on the data array[index]
+ int offset; // offset into struct ndb_logevent
+ int size; // offset into struct ndb_logevent
+};
+
+extern
+struct Ndb_logevent_body_row ndb_logevent_body[];
#endif
diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp
index 72debcc26a9..b5d1f38ba53 100644
--- a/ndb/src/mgmclient/CommandInterpreter.cpp
+++ b/ndb/src/mgmclient/CommandInterpreter.cpp
@@ -389,7 +389,7 @@ CommandInterpreter::CommandInterpreter(const char *_host,int verbose)
}
m_mgmsrv2 = ndb_mgm_create_handle();
if(m_mgmsrv2 == NULL) {
- ndbout_c("Cannot create handle to management server.");
+ ndbout_c("Cannot create 2:nd handle to management server.");
exit(-1);
}
if (ndb_mgm_set_connectstring(m_mgmsrv, _host))
@@ -496,11 +496,13 @@ CommandInterpreter::connect()
{
const char *host= ndb_mgm_get_connected_host(m_mgmsrv);
unsigned port= ndb_mgm_get_connected_port(m_mgmsrv);
- if(!ndb_mgm_set_connectstring(m_mgmsrv2,
- BaseString(host).appfmt(":%d",port).c_str())
- &&
+ BaseString constr;
+ constr.assfmt("%s:%d",host,port);
+ if(!ndb_mgm_set_connectstring(m_mgmsrv2, constr.c_str()) &&
!ndb_mgm_connect(m_mgmsrv2, try_reconnect-1, 5, 1))
{
+ DBUG_PRINT("info",("2:ndb connected to Management Server ok at: %s:%d",
+ host, port));
assert(m_event_thread == 0);
assert(do_event_thread == 0);
do_event_thread= 0;
@@ -511,6 +513,7 @@ CommandInterpreter::connect()
NDB_THREAD_PRIO_LOW);
if (m_event_thread != 0)
{
+ DBUG_PRINT("info",("Thread created ok, waiting for started..."));
int iter= 1000; // try for 30 seconds
while(do_event_thread == 0 &&
iter-- > 0)
@@ -537,11 +540,18 @@ CommandInterpreter::connect()
}
else
{
+ DBUG_PRINT("warning",
+ ("Could not do 2:nd connect to mgmtserver for event listening"));
+ DBUG_PRINT("info", ("code: %d, msg: %s",
+ ndb_mgm_get_latest_error(m_mgmsrv2),
+ ndb_mgm_get_latest_error_msg(m_mgmsrv2)));
printf("Warning, event connect failed, degraded printouts as result\n");
+ printf("code: %d, msg: %s\n",
+ ndb_mgm_get_latest_error(m_mgmsrv2),
+ ndb_mgm_get_latest_error_msg(m_mgmsrv2));
}
m_connected= true;
- DBUG_PRINT("info",("Connected to Management Server at: %s:%d",
- host,port));
+ DBUG_PRINT("info",("Connected to Management Server at: %s:%d", host, port));
if (m_verbose)
{
printf("Connected to Management Server at: %s:%d\n",
@@ -1268,7 +1278,7 @@ CommandInterpreter::executeClusterLog(char* parameters)
DBUG_VOID_RETURN;
}
- enum ndb_mgm_clusterlog_level severity = NDB_MGM_CLUSTERLOG_ALL;
+ enum ndb_mgm_event_severity severity = NDB_MGM_EVENT_SEVERITY_ALL;
char * tmpString = my_strdup(parameters,MYF(MY_WME));
My_auto_ptr<char> ap1(tmpString);
@@ -1276,7 +1286,7 @@ CommandInterpreter::executeClusterLog(char* parameters)
char * item = strtok_r(tmpString, " ", &tmpPtr);
int enable;
- Uint32 *enabled = ndb_mgm_get_logfilter(m_mgmsrv);
+ const unsigned int *enabled= ndb_mgm_get_logfilter(m_mgmsrv);
if(enabled == NULL) {
ndbout << "Couldn't get status" << endl;
printError();
@@ -1298,8 +1308,8 @@ CommandInterpreter::executeClusterLog(char* parameters)
printf("enabled[%d] = %d\n", i, enabled[i]);
#endif
ndbout << "Severities enabled: ";
- for(i = 1; i < (int)NDB_MGM_CLUSTERLOG_ALL; i++) {
- const char *str= ndb_mgm_get_clusterlog_level_string((ndb_mgm_clusterlog_level)i);
+ for(i = 1; i < (int)NDB_MGM_EVENT_SEVERITY_ALL; i++) {
+ const char *str= ndb_mgm_get_event_severity_string((ndb_mgm_event_severity)i);
if (str == 0)
{
DBUG_ASSERT(false);
@@ -1333,8 +1343,10 @@ CommandInterpreter::executeClusterLog(char* parameters)
int res_enable;
item = strtok_r(NULL, " ", &tmpPtr);
if (item == NULL) {
- res_enable= ndb_mgm_filter_clusterlog(m_mgmsrv,
- NDB_MGM_CLUSTERLOG_ON, enable, NULL);
+ res_enable=
+ ndb_mgm_set_clusterlog_severity_filter(m_mgmsrv,
+ NDB_MGM_EVENT_SEVERITY_ON,
+ enable, NULL);
if (res_enable < 0)
{
ndbout << "Couldn't set filter" << endl;
@@ -1346,32 +1358,33 @@ CommandInterpreter::executeClusterLog(char* parameters)
}
do {
- severity= NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL;
+ severity= NDB_MGM_ILLEGAL_EVENT_SEVERITY;
if (strcasecmp(item, "ALL") == 0) {
- severity = NDB_MGM_CLUSTERLOG_ALL;
+ severity = NDB_MGM_EVENT_SEVERITY_ALL;
} else if (strcasecmp(item, "ALERT") == 0) {
- severity = NDB_MGM_CLUSTERLOG_ALERT;
+ severity = NDB_MGM_EVENT_SEVERITY_ALERT;
} else if (strcasecmp(item, "CRITICAL") == 0) {
- severity = NDB_MGM_CLUSTERLOG_CRITICAL;
+ severity = NDB_MGM_EVENT_SEVERITY_CRITICAL;
} else if (strcasecmp(item, "ERROR") == 0) {
- severity = NDB_MGM_CLUSTERLOG_ERROR;
+ severity = NDB_MGM_EVENT_SEVERITY_ERROR;
} else if (strcasecmp(item, "WARNING") == 0) {
- severity = NDB_MGM_CLUSTERLOG_WARNING;
+ severity = NDB_MGM_EVENT_SEVERITY_WARNING;
} else if (strcasecmp(item, "INFO") == 0) {
- severity = NDB_MGM_CLUSTERLOG_INFO;
+ severity = NDB_MGM_EVENT_SEVERITY_INFO;
} else if (strcasecmp(item, "DEBUG") == 0) {
- severity = NDB_MGM_CLUSTERLOG_DEBUG;
+ severity = NDB_MGM_EVENT_SEVERITY_DEBUG;
} else if (strcasecmp(item, "OFF") == 0 ||
strcasecmp(item, "ON") == 0) {
if (enable < 0) // only makes sense with toggle
- severity = NDB_MGM_CLUSTERLOG_ON;
+ severity = NDB_MGM_EVENT_SEVERITY_ON;
}
- if (severity == NDB_MGM_ILLEGAL_CLUSTERLOG_LEVEL) {
+ if (severity == NDB_MGM_ILLEGAL_EVENT_SEVERITY) {
ndbout << "Invalid severity level: " << item << endl;
DBUG_VOID_RETURN;
}
- res_enable = ndb_mgm_filter_clusterlog(m_mgmsrv, severity, enable, NULL);
+ res_enable= ndb_mgm_set_clusterlog_severity_filter(m_mgmsrv, severity,
+ enable, NULL);
if (res_enable < 0)
{
ndbout << "Couldn't set filter" << endl;
diff --git a/ndb/src/mgmclient/main.cpp b/ndb/src/mgmclient/main.cpp
index 9128df4f978..ba5d0308f1f 100644
--- a/ndb/src/mgmclient/main.cpp
+++ b/ndb/src/mgmclient/main.cpp
@@ -90,13 +90,6 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_mgm.trace");
-}
static int
read_and_execute(int _try_reconnect)
@@ -139,7 +132,11 @@ int main(int argc, char** argv){
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_mgm.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
exit(ho_error);
char buf[MAXHOSTNAMELEN+10];
diff --git a/ndb/src/mgmsrv/CommandInterpreter.cpp b/ndb/src/mgmsrv/CommandInterpreter.cpp
deleted file mode 100644
index 686155415d5..00000000000
--- a/ndb/src/mgmsrv/CommandInterpreter.cpp
+++ /dev/null
@@ -1,345 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#include "CommandInterpreter.hpp"
-
-#include <string.h>
-#include <ctype.h>
-
-#include "MgmtSrvr.hpp"
-#include "MgmtErrorReporter.hpp"
-#include <NdbOut.hpp>
-#include "convertStrToInt.hpp"
-#include <EventLogger.hpp>
-#include <signaldata/SetLogLevelOrd.hpp>
-#include "ConfigInfo.hpp"
-
-#include <version.h>
-#include <m_string.h>
-
-//******************************************************************************
-//******************************************************************************
-CommandInterpreter::CommandInterpreter(MgmtSrvr& mgmtSrvr) :
- _mgmtSrvr(mgmtSrvr) {
-}
-
-
-bool emptyString(const char* s) {
- if (s == NULL) {
- return true;
- }
-
- for (unsigned int i = 0; i < strlen(s); ++i) {
- if (! isspace(s[i])) {
- return false;
- }
- }
-
- return true;
-}
-
-class AutoPtr {
-public:
- AutoPtr(void * ptr) : m_ptr(ptr) {}
- ~AutoPtr() { free(m_ptr);}
-private:
- void * m_ptr;
-};
-
-const char *CommandInterpreter::get_error_text(int err_no)
-{
- return _mgmtSrvr.getErrorText(err_no, m_err_str, sizeof(m_err_str));
-}
-
-//*****************************************************************************
-//*****************************************************************************
-int CommandInterpreter::readAndExecute() {
-
- char* _line = readline_gets();
- char * line;
- if(_line == NULL) {
- ndbout << endl;
- return true;
- }
-
- line = strdup(_line);
-
- AutoPtr ptr(line);
-
- if (emptyString(line)) {
- return true;
- }
-
- for (unsigned int i = 0; i < strlen(line); ++i) {
- line[i] = toupper(line[i]);
- }
-
- // if there is anything in the line proceed
- char* firstToken = strtok(line, " ");
- char* allAfterFirstToken = strtok(NULL, "\0");
-
- if (strcmp(firstToken, "ALL") == 0) {
- analyseAfterFirstToken(-1, allAfterFirstToken);
- }
- else if(strcmp(firstToken, "QUIT") == 0 ||
- strcmp(firstToken, "EXIT") == 0 ||
- strcmp(firstToken, "BYE") == 0){
- return false;
- } else {
- // First token should be a digit, process ID
-
- int processId;
- if (! convert(firstToken, processId)) {
- ndbout << "Invalid command: " << _line << "." << endl;
- return true;
- }
- if (processId < 0) {
- ndbout << "Invalid process ID: " << firstToken << "." << endl;
- return true;
- }
-
- analyseAfterFirstToken(processId, allAfterFirstToken);
-
- } // else
- return true;
-}
-
-
-static const CommandInterpreter::CommandFunctionPair commands[] = {
- { "TRACE", &CommandInterpreter::executeTrace }
- ,{ "LOGIN", &CommandInterpreter::executeLogIn }
- ,{ "LOGOUT", &CommandInterpreter::executeLogOut }
- ,{ "LOGOFF", &CommandInterpreter::executeLogOff }
-};
-
-
-//*****************************************************************************
-//*****************************************************************************
-void
-CommandInterpreter::analyseAfterFirstToken(int processId,
- char* allAfterFirstToken) {
-
- if (emptyString(allAfterFirstToken)) {
- if (processId == -1) {
- ndbout << "Expected a command after ALL." << endl;
- }
- else {
- ndbout << "Expected a command after process ID." << endl;
- }
- return;
- }
-
-
- char* secondToken = strtok(allAfterFirstToken, " ");
- char* allAfterSecondToken = strtok(NULL, "\0");
-
- const int tmpSize = sizeof(commands)/sizeof(CommandFunctionPair);
- ExecuteFunction fun = 0;
- const char * command = 0;
- for(int i = 0; i<tmpSize; i++){
- if(strcmp(secondToken, commands[i].command) == 0){
- fun = commands[i].executeFunction;
- command = commands[i].command;
- break;
- }
- }
-
- if(fun == 0){
- ndbout << "Invalid command: " << secondToken << "." << endl;
- return;
- }
-
- if(processId == -1){
- executeForAll(command, fun, allAfterSecondToken);
- } else {
- ndbout << "Executing " << command << " on node: "
- << processId << endl << endl;
- (this->*fun)(processId, allAfterSecondToken, false);
- ndbout << endl;
- }
-}
-
-void
-CommandInterpreter::executeForAll(const char * cmd, ExecuteFunction fun,
- const char * allAfterSecondToken){
-
- NodeId nodeId = 0;
- while(_mgmtSrvr.getNextNodeId(&nodeId, NDB_MGM_NODE_TYPE_NDB)){
- ndbout << "Executing " << cmd << " on node: "
- << nodeId << endl << endl;
- (this->*fun)(nodeId, allAfterSecondToken, true);
- ndbout << endl;
- }
-}
-
-//*****************************************************************************
-//*****************************************************************************
-bool CommandInterpreter::parseBlockSpecification(const char* allAfterLog,
- Vector<BaseString>& blocks) {
-
- // Parse: [BLOCK = {ALL|<blockName>+}]
-
- if (emptyString(allAfterLog)) {
- return true;
- }
-
- // Copy allAfterLog since strtok will modify it
- char* newAllAfterLog = strdup(allAfterLog);
- char* firstTokenAfterLog = strtok(newAllAfterLog, " ");
- for (unsigned int i = 0; i < strlen(firstTokenAfterLog); ++i) {
- firstTokenAfterLog[i] = toupper(firstTokenAfterLog[i]);
- }
-
- if (strcmp(firstTokenAfterLog, "BLOCK") != 0) {
- ndbout << "Unexpected value: " << firstTokenAfterLog
- << ". Expected BLOCK." << endl;
- free(newAllAfterLog);
- return false;
- }
-
- char* allAfterFirstToken = strtok(NULL, "\0");
- if (emptyString(allAfterFirstToken)) {
- ndbout << "Expected =." << endl;
- free(newAllAfterLog);
- return false;
- }
-
- char* secondTokenAfterLog = strtok(allAfterFirstToken, " ");
- if (strcmp(secondTokenAfterLog, "=") != 0) {
- ndbout << "Unexpected value: " << secondTokenAfterLog
- << ". Expected =." << endl;
- free(newAllAfterLog);
- return false;
- }
-
- char* blockName = strtok(NULL, " ");
- bool all = false;
- if (blockName != NULL && (strcmp(blockName, "ALL") == 0)) {
- all = true;
- }
- while (blockName != NULL) {
- blocks.push_back(BaseString(blockName));
- blockName = strtok(NULL, " ");
- }
-
- if (blocks.size() == 0) {
- ndbout << "No block specified." << endl;
- free(newAllAfterLog);
- return false;
- }
- if (blocks.size() > 1 && all) {
- // More than "ALL" specified
- ndbout << "Nothing expected after ALL." << endl;
- free(newAllAfterLog);
- return false;
- }
-
- free(newAllAfterLog);
- return true;
-}
-
-void CommandInterpreter::executeLogIn(int processId,
- const char* parameters, bool all) {
-
- (void)all; // Don't want compiler warning
-
- Vector<BaseString> blocks;
- if (! parseBlockSpecification(parameters, blocks)) {
- return;
- }
-
- int result = _mgmtSrvr.setSignalLoggingMode(processId, MgmtSrvr::In, blocks);
- if (result != 0) {
- ndbout << get_error_text(result) << endl;
- }
-}
-
-//******************************************************************************
-//******************************************************************************
-void CommandInterpreter::executeLogOut(int processId,
- const char* parameters, bool all) {
-
- (void)all; // Don't want compiler warning
-
- Vector<BaseString> blocks;
- if (! parseBlockSpecification(parameters, blocks)) {
- return;
- }
-
-
- int result = _mgmtSrvr.setSignalLoggingMode(processId, MgmtSrvr::Out, blocks);
- if (result != 0) {
- ndbout << get_error_text(result) << endl;
- }
-
-}
-
-
-//******************************************************************************
-//******************************************************************************
-void CommandInterpreter::executeLogOff(int processId,
- const char* parameters, bool all) {
-
- (void)all; // Don't want compiler warning
-
- Vector<BaseString> blocks;
- if (! parseBlockSpecification(parameters, blocks)) {
- return;
- }
-
-
- int result = _mgmtSrvr.setSignalLoggingMode(processId, MgmtSrvr::Off, blocks);
- if (result != 0) {
- ndbout << get_error_text(result) << endl;
- }
-
-}
-
-void CommandInterpreter::executeTrace(int processId,
- const char* parameters, bool all) {
-
- (void)all; // Don't want compiler warning
-
- if (emptyString(parameters)) {
- ndbout << "Missing trace number." << endl;
- return;
- }
-
- char* newpar = strdup(parameters);
- char* firstParameter = strtok(newpar, " ");
-
-
- int traceNo;
- if (! convert(firstParameter, traceNo)) {
- ndbout << "Expected an integer." << endl;
- free(newpar);
- return;
- }
-
- char* allAfterFirstParameter = strtok(NULL, "\0");
-
- if (! emptyString(allAfterFirstParameter)) {
- ndbout << "Nothing expected after trace number." << endl;
- free(newpar);
- return;
- }
-
- int result = _mgmtSrvr.setTraceNo(processId, traceNo);
- if (result != 0) {
- ndbout << get_error_text(result) << endl;
- }
- free(newpar);
-}
diff --git a/ndb/src/mgmsrv/CommandInterpreter.hpp b/ndb/src/mgmsrv/CommandInterpreter.hpp
deleted file mode 100644
index 6b67d1a5a5f..00000000000
--- a/ndb/src/mgmsrv/CommandInterpreter.hpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-#ifndef CommandInterpreter_H
-#define CommandInterpreter_H
-
-#include <ndb_global.h>
-#include <Vector.hpp>
-#include <BaseString.hpp>
-
-class MgmtSrvr;
-
-class CommandInterpreter {
-public:
- CommandInterpreter(MgmtSrvr& mgmtSrvr);
- int readAndExecute();
-
-private:
- char m_err_str[1024];
- const char *get_error_text(int err_no);
-
- char *readline_gets ()
- {
- static char linebuffer[254];
- static char *line_read = (char *)NULL;
-
- /* If the buffer has already been allocated, return the memory
- to the free pool. */
- if (line_read)
- {
- free (line_read);
- line_read = (char *)NULL;
- }
-
- /* Get a line from the user. */
- fputs("ndb_mgmd> ", stdout);
- linebuffer[sizeof(linebuffer)-1]=0;
- line_read = fgets(linebuffer, sizeof(linebuffer)-1, stdin);
- if (line_read == linebuffer) {
- char *q=linebuffer;
- while (*q > 31) q++;
- *q=0;
- line_read= strdup(linebuffer);
- }
- return (line_read);
- }
-
- void analyseAfterFirstToken(int processId, char* allAfterFirstTokenCstr);
- bool parseBlockSpecification(const char* allAfterLog,
- Vector<BaseString>& blocks);
-
-public:
- void executeTrace(int processId, const char* parameters, bool all);
- void executeLogIn(int processId, const char* parameters, bool all);
- void executeLogOut(int processId, const char* parameters, bool all);
- void executeLogOff(int processId, const char* parameters, bool all);
-
-public:
- typedef void (CommandInterpreter::* ExecuteFunction)(int processId,
- const char * param,
- bool all);
-
- struct CommandFunctionPair {
- const char * command;
- ExecuteFunction executeFunction;
- };
-private:
- /**
- *
- */
- void executeForAll(const char * cmd, ExecuteFunction fun, const char * param);
-
- /**
- * Management server to use when executing commands
- */
- MgmtSrvr& _mgmtSrvr;
-};
-
-#endif // CommandInterpreter_H
diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp
index 36a72dcb975..4e96047e54d 100644
--- a/ndb/src/mgmsrv/ConfigInfo.cpp
+++ b/ndb/src/mgmsrv/ConfigInfo.cpp
@@ -1097,6 +1097,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = {
"15" },
{
+ CFG_LOGLEVEL_CONGESTION,
+ "LogLevelCongestion",
+ DB_TOKEN,
+ "Congestion info printed on stdout",
+ ConfigInfo::CI_USED,
+ false,
+ ConfigInfo::CI_INT,
+ "0",
+ "0",
+ "15" },
+
+ {
CFG_LOGLEVEL_ERROR,
"LogLevelError",
DB_TOKEN,
@@ -3082,8 +3094,8 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
const Properties * node;
require(ctx.m_config->get("Node", id1, &node));
+
BaseString hostname(hostName1);
- // require(node->get("HostName", hostname));
if (hostname.c_str()[0] == 0) {
ctx.reportError("Hostname required on nodeid %d since it will "
@@ -3092,29 +3104,51 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
}
Uint32 port= 0;
- if (!node->get("ServerPort", &port) &&
- !ctx.m_userProperties.get("ServerPort_", id1, &port)) {
- Uint32 adder= 0;
- {
- BaseString server_port_adder(hostname);
- server_port_adder.append("_ServerPortAdder");
- ctx.m_userProperties.get(server_port_adder.c_str(), &adder);
- ctx.m_userProperties.put(server_port_adder.c_str(), adder+1, true);
- }
+ const char * type1;
+ const char * type2;
+ const Properties * node2;
+
+ node->get("Type", &type1);
+ ctx.m_config->get("Node", id2, &node2);
+ node2->get("Type", &type2);
+
+ if(strcmp(type1, MGM_TOKEN)==0)
+ node->get("PortNumber",&port);
+ else if(strcmp(type2, MGM_TOKEN)==0)
+ node2->get("PortNumber",&port);
+ if (!port &&
+ !node->get("ServerPort", &port) &&
+ !ctx.m_userProperties.get("ServerPort_", id1, &port))
+ {
Uint32 base= 0;
- if (!ctx.m_userProperties.get("ServerPortBase", &base)){
- if(!(ctx.m_userDefaults &&
+ /*
+ * If the connection doesn't involve an mgm server,
+ * and a default port number has been set, behave the old
+ * way of allocating port numbers for transporters.
+ */
+ if(ctx.m_userDefaults && ctx.m_userDefaults->get("PortNumber", &base))
+ {
+ Uint32 adder= 0;
+ {
+ BaseString server_port_adder(hostname);
+ server_port_adder.append("_ServerPortAdder");
+ ctx.m_userProperties.get(server_port_adder.c_str(), &adder);
+ ctx.m_userProperties.put(server_port_adder.c_str(), adder+1, true);
+ }
+
+ if (!ctx.m_userProperties.get("ServerPortBase", &base)){
+ if(!(ctx.m_userDefaults &&
ctx.m_userDefaults->get("PortNumber", &base)) &&
- !ctx.m_systemDefaults->get("PortNumber", &base)) {
- base= strtoll(NDB_TCP_BASE_PORT,0,0);
- // ctx.reportError("Cannot retrieve base port number");
- // return false;
+ !ctx.m_systemDefaults->get("PortNumber", &base)) {
+ base= strtoll(NDB_TCP_BASE_PORT,0,0);
+ }
+ ctx.m_userProperties.put("ServerPortBase", base);
}
- ctx.m_userProperties.put("ServerPortBase", base);
+
+ port= base + adder;
+ ctx.m_userProperties.put("ServerPort_", id1, port);
}
- port= base + adder;
- ctx.m_userProperties.put("ServerPort_", id1, port);
}
if(ctx.m_currentSection->contains("PortNumber")) {
@@ -3127,6 +3161,7 @@ fixPortNumber(InitConfigFileParser::Context & ctx, const char * data){
{
ctx.m_currentSection->put("PortNumber", port);
}
+
DBUG_PRINT("info", ("connection %d-%d port %d host %s",
id1, id2, port, hostname.c_str()));
diff --git a/ndb/src/mgmsrv/Makefile.am b/ndb/src/mgmsrv/Makefile.am
index 50e0b6023ad..7fd3fa66b43 100644
--- a/ndb/src/mgmsrv/Makefile.am
+++ b/ndb/src/mgmsrv/Makefile.am
@@ -16,17 +16,20 @@ ndb_mgmd_SOURCES = \
MgmtSrvrConfig.cpp \
ConfigInfo.cpp \
InitConfigFileParser.cpp \
- Config.cpp \
- CommandInterpreter.cpp
+ Config.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/src/ndbapi \
-I$(top_srcdir)/ndb/src/mgmapi \
- -I$(top_srcdir)/ndb/src/common/mgmcommon
+ -I$(top_srcdir)/ndb/src/common/mgmcommon \
+ -I$(top_srcdir)/ndb/src/mgmclient
-LDADD_LOC = $(top_builddir)/ndb/src/libndbclient.la \
+LDADD_LOC = $(top_srcdir)/ndb/src/mgmclient/CommandInterpreter.o \
+ $(top_builddir)/ndb/src/libndbclient.la \
$(top_builddir)/dbug/libdbug.a \
$(top_builddir)/mysys/libmysys.a \
- $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@
+ $(top_builddir)/strings/libmystrings.a \
+ @readline_link@ \
+ @NDB_SCI_LIBS@ \
@TERMCAP_LIB@
DEFS_LOC = -DDEFAULT_MYSQL_HOME="\"$(MYSQLBASEdir)\"" \
diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp
index 3ee0dc6e750..936d9a0794d 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.cpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.cpp
@@ -166,16 +166,16 @@ MgmtSrvr::logLevelThreadRun()
void
MgmtSrvr::startEventLog()
{
+ NdbMutex_Lock(m_configMutex);
+
g_eventLogger.setCategory("MgmSrvr");
- ndb_mgm_configuration_iterator * iter = ndb_mgm_create_configuration_iterator
- ((ndb_mgm_configuration*)_config->m_configValues, CFG_SECTION_NODE);
- if(iter == 0)
- return ;
-
- if(ndb_mgm_find(iter, CFG_NODE_ID, _ownNodeId) != 0){
- ndb_mgm_destroy_iterator(iter);
- return ;
+ ndb_mgm_configuration_iterator
+ iter(* _config->m_configValues, CFG_SECTION_NODE);
+
+ if(iter.find(CFG_NODE_ID, _ownNodeId) != 0){
+ NdbMutex_Unlock(m_configMutex);
+ return;
}
const char * tmp;
@@ -183,10 +183,10 @@ MgmtSrvr::startEventLog()
char *clusterLog= NdbConfig_ClusterLogFileName(_ownNodeId);
NdbAutoPtr<char> tmp_aptr(clusterLog);
- if(ndb_mgm_get_string_parameter(iter, CFG_LOG_DESTINATION, &tmp) == 0){
+ if(iter.get(CFG_LOG_DESTINATION, &tmp) == 0){
logdest.assign(tmp);
}
- ndb_mgm_destroy_iterator(iter);
+ NdbMutex_Unlock(m_configMutex);
if(logdest.length() == 0 || logdest == "") {
logdest.assfmt("FILE:filename=%s,maxsize=1000000,maxfiles=6",
@@ -320,42 +320,41 @@ MgmtSrvr::getNodeCount(enum ndb_mgm_node_type type) const
}
int
-MgmtSrvr::getPort() const {
- const Properties *mgmProps;
-
- ndb_mgm_configuration_iterator * iter =
- ndb_mgm_create_configuration_iterator(_config->m_configValues,
- CFG_SECTION_NODE);
- if(iter == 0)
+MgmtSrvr::getPort() const
+{
+ if(NdbMutex_Lock(m_configMutex))
return 0;
- if(ndb_mgm_find(iter, CFG_NODE_ID, getOwnNodeId()) != 0){
+ ndb_mgm_configuration_iterator
+ iter(* _config->m_configValues, CFG_SECTION_NODE);
+
+ if(iter.find(CFG_NODE_ID, getOwnNodeId()) != 0){
ndbout << "Could not retrieve configuration for Node "
<< getOwnNodeId() << " in config file." << endl
<< "Have you set correct NodeId for this node?" << endl;
- ndb_mgm_destroy_iterator(iter);
+ NdbMutex_Unlock(m_configMutex);
return 0;
}
unsigned type;
- if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0 ||
+ if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0 ||
type != NODE_TYPE_MGM){
ndbout << "Local node id " << getOwnNodeId()
<< " is not defined as management server" << endl
<< "Have you set correct NodeId for this node?" << endl;
- ndb_mgm_destroy_iterator(iter);
+ NdbMutex_Unlock(m_configMutex);
return 0;
}
Uint32 port = 0;
- if(ndb_mgm_get_int_parameter(iter, CFG_MGM_PORT, &port) != 0){
+ if(iter.get(CFG_MGM_PORT, &port) != 0){
ndbout << "Could not find PortNumber in the configuration file." << endl;
- ndb_mgm_destroy_iterator(iter);
+ NdbMutex_Unlock(m_configMutex);
return 0;
}
- ndb_mgm_destroy_iterator(iter);
-
+ NdbMutex_Unlock(m_configMutex);
+
return port;
}
@@ -448,14 +447,14 @@ MgmtSrvr::MgmtSrvr(SocketServer *socket_server,
{
ndb_mgm_configuration_iterator
- *iter = ndb_mgm_create_configuration_iterator(_config->m_configValues,
- CFG_SECTION_NODE);
- for(ndb_mgm_first(iter); ndb_mgm_valid(iter); ndb_mgm_next(iter)){
+ iter(* _config->m_configValues, CFG_SECTION_NODE);
+
+ for(iter.first(); iter.valid(); iter.next()){
unsigned type, id;
- if(ndb_mgm_get_int_parameter(iter, CFG_TYPE_OF_SECTION, &type) != 0)
+ if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0)
continue;
- if(ndb_mgm_get_int_parameter(iter, CFG_NODE_ID, &id) != 0)
+ if(iter.get(CFG_NODE_ID, &id) != 0)
continue;
MGM_REQUIRE(id < MAX_NODES);
@@ -478,7 +477,6 @@ MgmtSrvr::MgmtSrvr(SocketServer *socket_server,
break;
}
}
- ndb_mgm_destroy_iterator(iter);
}
_props = NULL;
@@ -554,7 +552,8 @@ MgmtSrvr::start(BaseString &error_string)
DBUG_RETURN(false);
}
}
- theFacade= TransporterFacade::theFacadeInstance= new TransporterFacade();
+ theFacade= TransporterFacade::theFacadeInstance
+ = new TransporterFacade();
if(theFacade == 0) {
DEBUG("MgmtSrvr.cpp: theFacade is NULL.");
@@ -582,7 +581,26 @@ MgmtSrvr::start(BaseString &error_string)
theFacade = 0;
DBUG_RETURN(false);
}
-
+
+ TransporterRegistry *reg = theFacade->get_registry();
+ for(unsigned int i=0;i<reg->m_transporter_interface.size();i++) {
+ BaseString msg;
+ DBUG_PRINT("info",("Setting dynamic port %d->%d : %d",
+ reg->get_localNodeId(),
+ reg->m_transporter_interface[i].m_remote_nodeId,
+ reg->m_transporter_interface[i].m_s_service_port
+ )
+ );
+ int res = setConnectionDbParameter((int)reg->get_localNodeId(),
+ (int)reg->m_transporter_interface[i]
+ .m_remote_nodeId,
+ (int)CFG_CONNECTION_SERVER_PORT,
+ reg->m_transporter_interface[i]
+ .m_s_service_port,
+ msg);
+ DBUG_PRINT("info",("Set result: %d: %s",res,msg.c_str()));
+ }
+
_ownReference = numberToRef(_blockNumber, _ownNodeId);
startEventLog();
@@ -1570,8 +1588,6 @@ void
MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal)
{
// The way of handling a received signal is taken from the Ndb class.
- int returnCode;
-
int gsn = signal->readSignalNumber();
switch (gsn) {
@@ -1609,9 +1625,9 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete)
theData[1] = nodeId;
if (alive) {
m_started_nodes.push_back(nodeId);
- theData[0] = EventReport::Connected;
+ theData[0] = NDB_LE_Connected;
} else {
- theData[0] = EventReport::Disconnected;
+ theData[0] = NDB_LE_Disconnected;
if(nfComplete)
{
DBUG_VOID_RETURN;
@@ -1683,10 +1699,7 @@ MgmtSrvr::get_connected_nodes(NodeBitmask &connected_nodes) const
if (getNodeType(i) == NDB_MGM_NODE_TYPE_NDB)
{
const ClusterMgr::Node &node= theFacade->theClusterMgr->getNodeInfo(i);
- if (node.connected)
- {
- connected_nodes.bitOR(node.m_state.m_connected_nodes);
- }
+ connected_nodes.bitOR(node.m_state.m_connected_nodes);
}
}
}
@@ -1728,8 +1741,13 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
int r_config_addr= -1;
unsigned type_c= 0;
+ if(NdbMutex_Lock(m_configMutex))
+ {
+ error_string.appfmt("unable to lock configuration mutex");
+ return false;
+ }
ndb_mgm_configuration_iterator
- iter(*(ndb_mgm_configuration *)_config->m_configValues, CFG_SECTION_NODE);
+ iter(* _config->m_configValues, CFG_SECTION_NODE);
for(iter.first(); iter.valid(); iter.next()) {
unsigned tmp= 0;
if(iter.get(CFG_NODE_ID, &tmp)) require(false);
@@ -1796,6 +1814,7 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
"Suggest specifying node id in connectstring,\n"
"or specifying unique host names in config file.",
id_found, tmp);
+ NdbMutex_Unlock(m_configMutex);
DBUG_RETURN(false);
}
if (config_hostname == 0) {
@@ -1808,6 +1827,7 @@ MgmtSrvr::alloc_node_id(NodeId * nodeId,
}
id_found= tmp; // mgmt server matched, check for more matches
}
+ NdbMutex_Unlock(m_configMutex);
if (id_found)
{
@@ -1950,7 +1970,7 @@ MgmtSrvr::eventReport(NodeId nodeId, const Uint32 * theData)
{
const EventReport * const eventReport = (EventReport *)&theData[0];
- EventReport::EventType type = eventReport->getEventType();
+ Ndb_logevent_type type = eventReport->getEventType();
// Log event
g_eventLogger.log(type, theData, nodeId,
&m_event_listner[0].m_logLevel);
@@ -2172,13 +2192,18 @@ MgmtSrvr::Allocated_resources::get_nodeid() const
int
MgmtSrvr::setDbParameter(int node, int param, const char * value,
BaseString& msg){
+
+ if(NdbMutex_Lock(m_configMutex))
+ return -1;
+
/**
* Check parameter
*/
- ndb_mgm_configuration_iterator iter(* _config->m_configValues,
- CFG_SECTION_NODE);
+ ndb_mgm_configuration_iterator
+ iter(* _config->m_configValues, CFG_SECTION_NODE);
if(iter.first() != 0){
msg.assign("Unable to find node section (iter.first())");
+ NdbMutex_Unlock(m_configMutex);
return -1;
}
@@ -2186,16 +2211,19 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
if(node != 0){
if(iter.find(CFG_NODE_ID, node) != 0){
msg.assign("Unable to find node (iter.find())");
+ NdbMutex_Unlock(m_configMutex);
return -1;
}
if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){
msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))");
+ NdbMutex_Unlock(m_configMutex);
return -1;
}
} else {
do {
if(iter.get(CFG_TYPE_OF_SECTION, &type) != 0){
msg.assign("Unable to get node type(iter.get(CFG_TYPE_OF_SECTION))");
+ NdbMutex_Unlock(m_configMutex);
return -1;
}
if(type == NODE_TYPE_DB)
@@ -2206,6 +2234,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
if(type != NODE_TYPE_DB){
msg.assfmt("Invalid node type or no such node (%d %d)",
type, NODE_TYPE_DB);
+ NdbMutex_Unlock(m_configMutex);
return -1;
}
@@ -2231,6 +2260,7 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
break;
}
msg.assign("Could not get parameter");
+ NdbMutex_Unlock(m_configMutex);
return -1;
} while(0);
@@ -2268,8 +2298,143 @@ MgmtSrvr::setDbParameter(int node, int param, const char * value,
} while(node == 0 && iter.next() == 0);
msg.assign("Success");
+ NdbMutex_Unlock(m_configMutex);
return 0;
}
+int
+MgmtSrvr::setConnectionDbParameter(int node1,
+ int node2,
+ int param,
+ int value,
+ BaseString& msg){
+ Uint32 current_value,new_value;
+
+ DBUG_ENTER("MgmtSrvr::setConnectionDbParameter");
+
+ if(NdbMutex_Lock(m_configMutex))
+ {
+ DBUG_RETURN(-1);
+ }
+
+ ndb_mgm_configuration_iterator
+ iter(* _config->m_configValues, CFG_SECTION_CONNECTION);
+
+ if(iter.first() != 0){
+ msg.assign("Unable to find connection section (iter.first())");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-1);
+ }
+
+ for(;iter.valid();iter.next()) {
+ Uint32 n1,n2;
+ iter.get(CFG_CONNECTION_NODE_1, &n1);
+ iter.get(CFG_CONNECTION_NODE_2, &n2);
+ if((n1 == (unsigned)node1 && n2 == (unsigned)node2)
+ || (n1 == (unsigned)node2 && n2 == (unsigned)node1))
+ break;
+ }
+ if(!iter.valid()) {
+ msg.assign("Unable to find connection between nodes");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-2);
+ }
+
+ if(iter.get(param, &current_value) != 0) {
+ msg.assign("Unable to get current value of parameter");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-3);
+ }
+
+ ConfigValues::Iterator i2(_config->m_configValues->m_config,
+ iter.m_config);
+
+ if(i2.set(param, (unsigned)value) == false) {
+ msg.assign("Unable to set new value of parameter");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-4);
+ }
+
+ if(iter.get(param, &new_value) != 0) {
+ msg.assign("Unable to get parameter after setting it.");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-5);
+ }
+
+ msg.assfmt("%u -> %u",current_value,new_value);
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(1);
+}
+
+
+int
+MgmtSrvr::getConnectionDbParameter(int node1,
+ int node2,
+ int param,
+ int *value,
+ BaseString& msg){
+ DBUG_ENTER("MgmtSrvr::getConnectionDbParameter");
+
+ if(NdbMutex_Lock(m_configMutex))
+ {
+ DBUG_RETURN(-1);
+ }
+
+ ndb_mgm_configuration_iterator
+ iter(* _config->m_configValues, CFG_SECTION_CONNECTION);
+
+ if(iter.first() != 0){
+ msg.assign("Unable to find connection section (iter.first())");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-1);
+ }
+
+ for(;iter.valid();iter.next()) {
+ Uint32 n1=0,n2=0;
+ iter.get(CFG_CONNECTION_NODE_1, &n1);
+ iter.get(CFG_CONNECTION_NODE_2, &n2);
+ if((n1 == (unsigned)node1 && n2 == (unsigned)node2)
+ || (n1 == (unsigned)node2 && n2 == (unsigned)node1))
+ break;
+ }
+ if(!iter.valid()) {
+ msg.assign("Unable to find connection between nodes");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-1);
+ }
+
+ if(iter.get(param, (Uint32*)value) != 0) {
+ msg.assign("Unable to get current value of parameter");
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(-1);
+ }
+
+ msg.assfmt("%d",*value);
+ NdbMutex_Unlock(m_configMutex);
+ DBUG_RETURN(1);
+}
+
+void MgmtSrvr::transporter_connect(NDB_SOCKET_TYPE sockfd)
+{
+ if (theFacade->get_registry()->connect_server(sockfd))
+ {
+ /**
+ * Force an update_connections() so that the
+ * ClusterMgr and TransporterFacade is up to date
+ * with the new connection.
+ * Important for correct node id reservation handling
+ */
+ NdbMutex_Lock(theFacade->theMutexPtr);
+ theFacade->get_registry()->update_connections();
+ NdbMutex_Unlock(theFacade->theMutexPtr);
+ }
+}
+
+int MgmtSrvr::set_connect_string(const char *str)
+{
+ return ndb_mgm_set_connectstring(m_config_retriever->get_mgmHandle(),str);
+}
+
+
template class MutexVector<unsigned short>;
template class MutexVector<Ndb_mgmd_event_service::Event_listener>;
diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp
index 927b610e75c..3b14fa60e6b 100644
--- a/ndb/src/mgmsrv/MgmtSrvr.hpp
+++ b/ndb/src/mgmsrv/MgmtSrvr.hpp
@@ -51,6 +51,7 @@ class Ndb_mgmd_event_service : public EventLoggerBase
public:
struct Event_listener : public EventLoggerBase {
NDB_SOCKET_TYPE m_socket;
+ Uint32 m_parsable;
};
private:
@@ -463,7 +464,17 @@ public:
int getPort() const;
int setDbParameter(int node, int parameter, const char * value, BaseString&);
-
+ int setConnectionDbParameter(int node1, int node2, int param, int value,
+ BaseString& msg);
+ int getConnectionDbParameter(int node1, int node2, int param,
+ int *value, BaseString& msg);
+
+ int set_connect_string(const char *str);
+
+ void transporter_connect(NDB_SOCKET_TYPE sockfd);
+
+ ConfigRetriever *get_config_retriever() { return m_config_retriever; };
+
const char *get_connect_address(Uint32 node_id);
void get_connected_nodes(NodeBitmask &connected_nodes) const;
SocketServer *get_socket_server() { return m_socket_server; }
diff --git a/ndb/src/mgmsrv/Services.cpp b/ndb/src/mgmsrv/Services.cpp
index bcc18d75b09..8c087c2a3ca 100644
--- a/ndb/src/mgmsrv/Services.cpp
+++ b/ndb/src/mgmsrv/Services.cpp
@@ -31,6 +31,7 @@
#include <mgmapi_configuration.hpp>
#include <Vector.hpp>
#include "Services.hpp"
+#include "../mgmapi/ndb_logevent.hpp"
extern bool g_StopServer;
@@ -226,14 +227,32 @@ ParserRow<MgmApiSession> commands[] = {
MGM_ARG("parameter", String, Mandatory, "Parameter"),
MGM_ARG("value", String, Mandatory, "Value"),
+ MGM_CMD("set connection parameter",
+ &MgmApiSession::setConnectionParameter, ""),
+ MGM_ARG("node1", String, Mandatory, "Node1 ID"),
+ MGM_ARG("node2", String, Mandatory, "Node2 ID"),
+ MGM_ARG("param", String, Mandatory, "Parameter"),
+ MGM_ARG("value", String, Mandatory, "Value"),
+
+ MGM_CMD("get connection parameter",
+ &MgmApiSession::getConnectionParameter, ""),
+ MGM_ARG("node1", String, Mandatory, "Node1 ID"),
+ MGM_ARG("node2", String, Mandatory, "Node2 ID"),
+ MGM_ARG("param", String, Mandatory, "Parameter"),
+
MGM_CMD("listen event", &MgmApiSession::listen_event, ""),
MGM_ARG("node", Int, Optional, "Node"),
+ MGM_ARG("parsable", Int, Optional, "Parsable"),
MGM_ARG("filter", String, Mandatory, "Event category"),
MGM_CMD("purge stale sessions", &MgmApiSession::purge_stale_sessions, ""),
MGM_CMD("check connection", &MgmApiSession::check_connection, ""),
+ MGM_CMD("transporter connect", &MgmApiSession::transporter_connect, ""),
+
+ MGM_CMD("get mgmd nodeid", &MgmApiSession::get_mgmd_nodeid, ""),
+
MGM_END()
};
@@ -556,11 +575,13 @@ MgmApiSession::getConfig_common(Parser_t::Context &,
}
}
+ NdbMutex_Lock(m_mgmsrv.m_configMutex);
const ConfigValues * cfg = &conf->m_configValues->m_config;
const Uint32 size = cfg->getPackedSize();
UtilBuffer src;
cfg->pack(src);
+ NdbMutex_Unlock(m_mgmsrv.m_configMutex);
BaseString str;
int res = base64_encode(src, str);
@@ -651,7 +672,8 @@ MgmApiSession::startBackup(Parser<MgmApiSession>::Context &,
}
else{
m_output->println("result: Ok");
- m_output->println("id: %d", backupId);
+ if (completed)
+ m_output->println("id: %d", backupId);
}
m_output->println("");
DBUG_VOID_RETURN;
@@ -1174,13 +1196,13 @@ MgmApiSession::startAll(Parser<MgmApiSession>::Context &,
void
MgmApiSession::setLogFilter(Parser_t::Context &ctx,
const class Properties &args) {
- Uint32 level;
+ Uint32 severity;
Uint32 enable;
- args.get("level", &level);
+ args.get("level", &severity);
args.get("enable", &enable);
- int result = m_mgmsrv.setEventLogFilter(level, enable);
+ int result = m_mgmsrv.setEventLogFilter(severity, enable);
m_output->println("set logfilter reply");
m_output->println("result: %d", result);
@@ -1203,28 +1225,52 @@ Ndb_mgmd_event_service::log(int eventType, const Uint32* theData, NodeId nodeId)
Uint32 threshold;
LogLevel::EventCategory cat;
Logger::LoggerLevel severity;
+ EventLoggerBase::EventTextFunction textF;
int i, n;
DBUG_ENTER("Ndb_mgmd_event_service::log");
DBUG_PRINT("enter",("eventType=%d, nodeid=%d", eventType, nodeId));
- if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity))
+ if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF))
DBUG_VOID_RETURN;
char m_text[256];
- EventLogger::getText(m_text, sizeof(m_text), eventType, theData, nodeId);
+ EventLogger::getText(m_text, sizeof(m_text),
+ textF, theData, nodeId);
+
+ BaseString str("log event reply\n");
+ str.appfmt("type=%d\n", eventType);
+ str.appfmt("time=%d\n", 0);
+ str.appfmt("source_nodeid=%d\n", nodeId);
+ for (i= 0; ndb_logevent_body[i].token; i++)
+ {
+ if ( ndb_logevent_body[i].type != eventType)
+ continue;
+ int val= theData[ndb_logevent_body[i].index];
+ if (ndb_logevent_body[i].index_fn)
+ val= (*(ndb_logevent_body[i].index_fn))(val);
+ str.appfmt("%s=%d\n",ndb_logevent_body[i].token, val);
+ }
- Vector<NDB_SOCKET_TYPE> copy;
+ Vector<NDB_SOCKET_TYPE> copy;
m_clients.lock();
for(i = m_clients.size() - 1; i >= 0; i--)
{
if(threshold <= m_clients[i].m_logLevel.getLogLevel(cat))
{
- int fd= m_clients[i].m_socket;
- if(fd != NDB_INVALID_SOCKET &&
- println_socket(fd, MAX_WRITE_TIMEOUT, m_text) == -1)
+ NDB_SOCKET_TYPE fd= m_clients[i].m_socket;
+ if(fd != NDB_INVALID_SOCKET)
{
- copy.push_back(fd);
- m_clients.erase(i, false);
+ int r;
+ if (m_clients[i].m_parsable)
+ r= println_socket(fd,
+ MAX_WRITE_TIMEOUT, str.c_str());
+ else
+ r= println_socket(fd,
+ MAX_WRITE_TIMEOUT, m_text);
+ if (r == -1) {
+ copy.push_back(fd);
+ m_clients.erase(i, false);
+ }
}
}
}
@@ -1343,17 +1389,64 @@ MgmApiSession::setParameter(Parser_t::Context &,
}
void
+MgmApiSession::setConnectionParameter(Parser_t::Context &ctx,
+ Properties const &args) {
+ BaseString node1, node2, param, value;
+ args.get("node1", node1);
+ args.get("node2", node2);
+ args.get("param", param);
+ args.get("value", value);
+
+ BaseString result;
+ int ret = m_mgmsrv.setConnectionDbParameter(atoi(node1.c_str()),
+ atoi(node2.c_str()),
+ atoi(param.c_str()),
+ atoi(value.c_str()),
+ result);
+
+ m_output->println("set connection parameter reply");
+ m_output->println("message: %s", result.c_str());
+ m_output->println("result: %s", (ret>0)?"Ok":"Failed");
+ m_output->println("");
+}
+
+void
+MgmApiSession::getConnectionParameter(Parser_t::Context &ctx,
+ Properties const &args) {
+ BaseString node1, node2, param;
+ int value = 0;
+
+ args.get("node1", node1);
+ args.get("node2", node2);
+ args.get("param", param);
+
+ BaseString result;
+ int ret = m_mgmsrv.getConnectionDbParameter(atoi(node1.c_str()),
+ atoi(node2.c_str()),
+ atoi(param.c_str()),
+ &value,
+ result);
+
+ m_output->println("get connection parameter reply");
+ m_output->println("value: %d", value);
+ m_output->println("result: %s", (ret>0)?"Ok":result.c_str());
+ m_output->println("");
+}
+
+void
MgmApiSession::listen_event(Parser<MgmApiSession>::Context & ctx,
Properties const & args) {
-
+ Uint32 parsable= 0;
BaseString node, param, value;
args.get("node", node);
args.get("filter", param);
+ args.get("parsable", &parsable);
int result = 0;
BaseString msg;
Ndb_mgmd_event_service::Event_listener le;
+ le.m_parsable = parsable;
le.m_socket = m_socket;
Vector<BaseString> list;
@@ -1460,6 +1553,25 @@ MgmApiSession::check_connection(Parser_t::Context &ctx,
m_output->println("");
}
+void
+MgmApiSession::transporter_connect(Parser_t::Context &ctx,
+ Properties const &args)
+{
+ m_mgmsrv.transporter_connect(m_socket);
+
+ m_stop= true;
+ m_stopped= true; // force a stop (no closing socket)
+ m_socket= NDB_INVALID_SOCKET; // so nobody closes it
+}
+
+void
+MgmApiSession::get_mgmd_nodeid(Parser_t::Context &ctx,
+ Properties const &args)
+{
+ m_output->println("get mgmd nodeid reply");
+ m_output->println("nodeid:%u",m_mgmsrv.getOwnNodeId());
+ m_output->println("");
+}
+
template class MutexVector<int>;
template class Vector<ParserRow<MgmApiSession> const*>;
-template class Vector<unsigned short>;
diff --git a/ndb/src/mgmsrv/Services.hpp b/ndb/src/mgmsrv/Services.hpp
index f5621a319a6..431126a1f35 100644
--- a/ndb/src/mgmsrv/Services.hpp
+++ b/ndb/src/mgmsrv/Services.hpp
@@ -85,10 +85,19 @@ public:
void setLogFilter(Parser_t::Context &ctx, const class Properties &args);
void setParameter(Parser_t::Context &ctx, const class Properties &args);
+ void setConnectionParameter(Parser_t::Context &ctx,
+ const class Properties &args);
+ void getConnectionParameter(Parser_t::Context &ctx,
+ Properties const &args);
+
void listen_event(Parser_t::Context &ctx, const class Properties &args);
void purge_stale_sessions(Parser_t::Context &ctx, const class Properties &args);
void check_connection(Parser_t::Context &ctx, const class Properties &args);
+
+ void transporter_connect(Parser_t::Context &ctx, Properties const &args);
+
+ void get_mgmd_nodeid(Parser_t::Context &ctx, Properties const &args);
void repCommand(Parser_t::Context &ctx, const class Properties &args);
};
diff --git a/ndb/src/mgmsrv/main.cpp b/ndb/src/mgmsrv/main.cpp
index 01845687ce1..ec20101493e 100644
--- a/ndb/src/mgmsrv/main.cpp
+++ b/ndb/src/mgmsrv/main.cpp
@@ -40,7 +40,7 @@
#if defined NDB_OSE || defined NDB_SOFTOSE
#include <efs.h>
#else
-#include "CommandInterpreter.hpp"
+#include <ndb_mgmclient.hpp>
#endif
#undef DEBUG
@@ -48,11 +48,55 @@
const char progname[] = "mgmtsrvr";
+// copied from mysql.cc to get readline
+extern "C" {
+#if defined( __WIN__) || defined(OS2)
+#include <conio.h>
+#elif !defined(__NETWARE__)
+#include <readline/readline.h>
+extern "C" int add_history(const char *command); /* From readline directory */
+#define HAVE_READLINE
+#endif
+}
+
+static int
+read_and_execute(Ndb_mgmclient* com, const char * prompt, int _try_reconnect)
+{
+ static char *line_read = (char *)NULL;
+
+ /* If the buffer has already been allocated, return the memory
+ to the free pool. */
+ if (line_read)
+ {
+ free (line_read);
+ line_read = (char *)NULL;
+ }
+#ifdef HAVE_READLINE
+ /* Get a line from the user. */
+ line_read = readline (prompt);
+ /* If the line has any text in it, save it on the history. */
+ if (line_read && *line_read)
+ add_history (line_read);
+#else
+ static char linebuffer[254];
+ fputs(prompt, stdout);
+ linebuffer[sizeof(linebuffer)-1]=0;
+ line_read = fgets(linebuffer, sizeof(linebuffer)-1, stdin);
+ if (line_read == linebuffer) {
+ char *q=linebuffer;
+ while (*q > 31) q++;
+ *q=0;
+ line_read= strdup(linebuffer);
+ }
+#endif
+ return com->execute(line_read,_try_reconnect);
+}
/**
* @struct MgmGlobals
* @brief Global Variables used in the management server
- ******************************************************************************/
+ *****************************************************************************/
+
/** Command line arguments */
static int opt_daemon; // NOT bool, bool need not be int
static int opt_non_interactive;
@@ -67,7 +111,7 @@ struct MgmGlobals {
NodeId localNodeId;
bool use_specific_ip;
char * interface_name;
- int port;
+ short unsigned int port;
/** The Mgmt Server */
MgmtSrvr * mgmObject;
@@ -98,13 +142,6 @@ enum ndb_mgmd_options {
};
NDB_STD_OPTS_VARS;
-#if NDB_VERSION_MAJOR <= 4
-#undef OPT_NDB_CONNECTSTRING
-#define OPT_NDB_CONNECTSTRING 1023
-#else
-
-#endif
-
static struct my_option my_long_options[] =
{
NDB_STD_OPTS("ndb_mgmd"),
@@ -129,15 +166,9 @@ static struct my_option my_long_options[] =
"Don't run as daemon, but don't read from stdin",
(gptr*) &opt_non_interactive, (gptr*) &opt_non_interactive, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
-#if NDB_VERSION_MAJOR <= 4
- { "config-file", 'c',
- "-c provided for backwards compatability, will be removed in 5.0."
- " Use -f instead",
- (gptr*) &opt_config_filename, (gptr*) &opt_config_filename, 0,
- GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
-#endif
{ 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
+
static void short_usage_sub(void)
{
printf("Usage: %s [OPTIONS]\n", my_progname);
@@ -149,21 +180,6 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_mgmd.trace");
-#if NDB_VERSION_MAJOR <= 4
- switch (optid) {
- case 'c':
- printf("Warning: -c will be removed in 5.0, use -f instead\n");
- break;
- }
-#endif
- return 0;
-}
/*
* MAIN
@@ -188,7 +204,11 @@ int main(int argc, char** argv)
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_mgmd.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
exit(ho_error);
if (opt_interactive ||
@@ -244,7 +264,8 @@ int main(int argc, char** argv)
glob->interface_name = 0;
}
- if(!glob->socketServer->setup(mapi, glob->port, glob->interface_name)){
+ if(!glob->socketServer->setup(mapi, &glob->port, glob->interface_name))
+ {
ndbout_c("Unable to setup management port: %d!\n"
"Please check if the port is already used,\n"
"(perhaps a ndb_mgmd is already running),\n"
@@ -253,13 +274,35 @@ int main(int argc, char** argv)
delete mapi;
goto error_end;
}
-
+
+ /* Construct a fake connectstring to connect back to ourselves */
+ char connect_str[20];
+ if(!opt_connect_str) {
+ snprintf(connect_str,20,"localhost:%u",glob->mgmObject->getPort());
+ opt_connect_str= connect_str;
+ }
+ glob->mgmObject->set_connect_string(opt_connect_str);
+
if(!glob->mgmObject->check_start()){
ndbout_c("Unable to check start management server.");
ndbout_c("Probably caused by illegal initial configuration file.");
goto error_end;
}
+ /*
+ * Connect back to ourselves so we can use mgmapi to fetch
+ * config info
+ */
+ int mgm_connect_result;
+ mgm_connect_result = glob->mgmObject->get_config_retriever()->
+ do_connect(0,0,0);
+
+ if(mgm_connect_result<0) {
+ ndbout_c("Unable to connect to our own ndb_mgmd (Error %d)",
+ mgm_connect_result);
+ ndbout_c("This is probably a bug.");
+ }
+
if (opt_daemon) {
// Become a daemon
char *lockfile= NdbConfig_PidFileName(glob->localNodeId);
@@ -304,17 +347,23 @@ int main(int argc, char** argv)
#if ! defined NDB_OSE && ! defined NDB_SOFTOSE
if(opt_interactive) {
- CommandInterpreter com(* glob->mgmObject);
- while(com.readAndExecute());
+ BaseString con_str;
+ if(glob->interface_name)
+ con_str.appfmt("host=%s:%d", glob->interface_name, glob->port);
+ else
+ con_str.appfmt("localhost:%d", glob->port);
+ Ndb_mgmclient com(con_str.c_str(), 1);
+ while(g_StopServer != true && read_and_execute(&com, "ndb_mgm> ", 1));
} else
#endif
- {
- while(g_StopServer != true)
- NdbSleep_MilliSleep(500);
- }
+ {
+ while(g_StopServer != true)
+ NdbSleep_MilliSleep(500);
+ }
g_eventLogger.info("Shutting down server...");
glob->socketServer->stopServer();
+ glob->mgmObject->get_config_retriever()->disconnect();
glob->socketServer->stopSessions(true);
g_eventLogger.info("Shutdown complete");
the_end:
diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp
index 543bdf1155e..42b7c5b115d 100644
--- a/ndb/src/ndbapi/ClusterMgr.cpp
+++ b/ndb/src/ndbapi/ClusterMgr.cpp
@@ -214,7 +214,7 @@ ClusterMgr::threadMain( ){
* It is now time to send a new Heartbeat
*/
if (theNode.hbCounter >= theNode.hbFrequency) {
- theNode.hbSent++;
+ theNode.m_info.m_heartbeat_cnt++;
theNode.hbCounter = 0;
}
@@ -231,7 +231,7 @@ ClusterMgr::threadMain( ){
theFacade.sendSignalUnCond(&signal, nodeId);
}//if
- if (theNode.hbSent == 4 && theNode.hbFrequency > 0){
+ if (theNode.m_info.m_heartbeat_cnt == 4 && theNode.hbFrequency > 0){
reportNodeFailed(i);
}//if
}
@@ -265,6 +265,7 @@ ClusterMgr::Node::Node()
: m_state(NodeState::SL_NOTHING) {
compatible = nfCompleteRep = true;
connected = defined = m_alive = false;
+ m_state.m_connected_nodes.clear();
}
/******************************************************************************
@@ -336,7 +337,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
node.compatible = ndbCompatible_api_ndb(NDB_VERSION,
node.m_info.m_version);
}
-
+
node.m_state = apiRegConf->nodeState;
if (node.compatible && (node.m_state.startLevel == NodeState::SL_STARTED ||
node.m_state.startLevel == NodeState::SL_SINGLEUSER)){
@@ -344,7 +345,7 @@ ClusterMgr::execAPI_REGCONF(const Uint32 * theData){
} else {
set_node_alive(node, false);
}//if
- node.hbSent = 0;
+ node.m_info.m_heartbeat_cnt = 0;
node.hbCounter = 0;
if (node.m_info.m_type != NodeInfo::REP) {
node.hbFrequency = (apiRegConf->apiHeartbeatFrequency * 10) - 50;
@@ -413,9 +414,15 @@ ClusterMgr::reportConnected(NodeId nodeId){
Node & theNode = theNodes[nodeId];
theNode.connected = true;
- theNode.hbSent = 0;
+ theNode.m_info.m_heartbeat_cnt = 0;
theNode.hbCounter = 0;
-
+
+ /**
+ * make sure the node itself is marked connected even
+ * if first API_REGCONF has not arrived
+ */
+ theNode.m_state.m_connected_nodes.set(nodeId);
+
if (theNode.m_info.m_type != NodeInfo::REP) {
theNode.hbFrequency = 0;
}
@@ -433,7 +440,9 @@ ClusterMgr::reportDisconnected(NodeId nodeId){
noOfConnectedNodes--;
theNodes[nodeId].connected = false;
- theNodes[nodeId].m_info.m_connectCount ++;
+
+ theNodes[nodeId].m_state.m_connected_nodes.clear();
+
reportNodeFailed(nodeId);
}
@@ -443,19 +452,23 @@ ClusterMgr::reportNodeFailed(NodeId nodeId){
Node & theNode = theNodes[nodeId];
set_node_alive(theNode, false);
+ theNode.m_info.m_connectCount ++;
+
if(theNode.connected)
+ {
theFacade.doDisconnect(nodeId);
-
+ }
const bool report = (theNode.m_state.startLevel != NodeState::SL_NOTHING);
theNode.m_state.startLevel = NodeState::SL_NOTHING;
- if(report){
+ if(report)
+ {
theFacade.ReportNodeDead(nodeId);
- }
-
- theNode.nfCompleteRep = false;
+ }
- if(noOfAliveNodes == 0){
+ theNode.nfCompleteRep = false;
+ if(noOfAliveNodes == 0)
+ {
NFCompleteRep rep;
for(Uint32 i = 1; i<MAX_NODES; i++){
if(theNodes[i].defined && theNodes[i].nfCompleteRep == false){
diff --git a/ndb/src/ndbapi/ClusterMgr.hpp b/ndb/src/ndbapi/ClusterMgr.hpp
index d75b820e9cb..da8f16d6789 100644
--- a/ndb/src/ndbapi/ClusterMgr.hpp
+++ b/ndb/src/ndbapi/ClusterMgr.hpp
@@ -73,12 +73,12 @@ public:
*/
Uint32 hbFrequency; // Heartbeat frequence
Uint32 hbCounter; // # milliseconds passed since last hb sent
- Uint32 hbSent; // # heartbeats sent (without answer)
};
const Node & getNodeInfo(NodeId) const;
Uint32 getNoOfConnectedNodes() const;
-
+ void hb_received(NodeId);
+
private:
Uint32 noOfAliveNodes;
Uint32 noOfConnectedNodes;
@@ -128,6 +128,12 @@ ClusterMgr::getNoOfConnectedNodes() const {
return noOfConnectedNodes;
}
+inline
+void
+ClusterMgr::hb_received(NodeId nodeId) {
+ theNodes[nodeId].m_info.m_heartbeat_cnt= 0;
+}
+
/*****************************************************************************/
/**
diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp
index ca361e900b1..57d9b361522 100644
--- a/ndb/src/ndbapi/DictCache.cpp
+++ b/ndb/src/ndbapi/DictCache.cpp
@@ -83,11 +83,14 @@ LocalDictCache::drop(const char * name){
* Global cache
*/
GlobalDictCache::GlobalDictCache(){
+ DBUG_ENTER("GlobalDictCache::GlobalDictCache");
m_tableHash.createHashTable();
m_waitForTableCondition = NdbCondition_Create();
+ DBUG_VOID_RETURN;
}
GlobalDictCache::~GlobalDictCache(){
+ DBUG_ENTER("GlobalDictCache::~GlobalDictCache");
NdbElement_t<Vector<TableVersion> > * curr = m_tableHash.getNext(0);
while(curr != 0){
Vector<TableVersion> * vers = curr->theData;
@@ -97,20 +100,52 @@ GlobalDictCache::~GlobalDictCache(){
delete (* vers)[i].m_impl;
}
delete curr->theData;
+ curr->theData= NULL;
curr = m_tableHash.getNext(curr);
}
-
m_tableHash.releaseHashTable();
NdbCondition_Destroy(m_waitForTableCondition);
+ DBUG_VOID_RETURN;
}
-#include <NdbOut.hpp>
+void GlobalDictCache::printCache()
+{
+ DBUG_ENTER("GlobalDictCache::printCache");
+ NdbElement_t<Vector<TableVersion> > * curr = m_tableHash.getNext(0);
+ while(curr != 0){
+ DBUG_PRINT("curr", ("len: %d, hash: %d, lk: %d, str: %s",
+ curr->len, curr->hash, curr->localkey1, curr->str));
+ if (curr->theData){
+ Vector<TableVersion> * vers = curr->theData;
+ const unsigned sz = vers->size();
+ for(unsigned i = 0; i<sz ; i++){
+ TableVersion tv= (*vers)[i];
+ DBUG_PRINT(" ", ("vers[%d]: ver: %d, refCount: %d, status: %d",
+ sz, tv.m_version, tv.m_refCount, tv.m_status));
+ if(tv.m_impl != 0)
+ {
+ DBUG_PRINT(" ", ("m_impl: internalname: %s",
+ tv.m_impl->m_internalName.c_str()));
+ }
+ }
+ }
+ else
+ {
+ DBUG_PRINT(" ", ("NULL"));
+ }
+ curr = m_tableHash.getNext(curr);
+ }
+ DBUG_VOID_RETURN;
+}
-NdbTableImpl *
+NdbTableImpl *
GlobalDictCache::get(const char * name)
{
+ DBUG_ENTER("GlobalDictCache::get");
+ DBUG_PRINT("enter", ("name: %s", name));
+
const Uint32 len = strlen(name);
- Vector<TableVersion> * versions = 0;
+ Vector<TableVersion> * versions = 0;
versions = m_tableHash.getData(name, len);
if(versions == 0){
versions = new Vector<TableVersion>(2);
@@ -125,7 +160,7 @@ GlobalDictCache::get(const char * name)
switch(ver->m_status){
case OK:
ver->m_refCount++;
- return ver->m_impl;
+ DBUG_RETURN(ver->m_impl);
case DROPPED:
retreive = true; // Break loop
break;
@@ -144,24 +179,28 @@ GlobalDictCache::get(const char * name)
tmp.m_status = RETREIVING;
tmp.m_refCount = 1; // The one retreiving it
versions->push_back(tmp);
- return 0;
+ DBUG_RETURN(0);
}
NdbTableImpl *
GlobalDictCache::put(const char * name, NdbTableImpl * tab)
{
+ DBUG_ENTER("GlobalDictCache::put");
+ DBUG_PRINT("enter", ("name: %s, internal_name: %s",
+ name, tab ? tab->m_internalName.c_str() : "tab NULL"));
+
const Uint32 len = strlen(name);
Vector<TableVersion> * vers = m_tableHash.getData(name, len);
if(vers == 0){
// Should always tried to retreive it first
- // and then there should be a record
+ // and thus there should be a record
abort();
}
const Uint32 sz = vers->size();
if(sz == 0){
// Should always tried to retreive it first
- // and then there should be a record
+ // and thus there should be a record
abort();
}
@@ -176,7 +215,7 @@ GlobalDictCache::put(const char * name, NdbTableImpl * tab)
if(tab == 0)
{
- // No table found in db
+ DBUG_PRINT("info", ("No table found in db"));
vers->erase(sz - 1);
}
else if (ver.m_impl == 0) {
@@ -203,74 +242,84 @@ GlobalDictCache::put(const char * name, NdbTableImpl * tab)
abort();
}
NdbCondition_Broadcast(m_waitForTableCondition);
- return tab;
+ DBUG_RETURN(tab);
}
void
GlobalDictCache::drop(NdbTableImpl * tab)
{
+ DBUG_ENTER("GlobalDictCache::drop");
+ DBUG_PRINT("enter", ("internal_name: %s", tab->m_internalName.c_str()));
+
unsigned i;
const Uint32 len = strlen(tab->m_internalName.c_str());
Vector<TableVersion> * vers =
m_tableHash.getData(tab->m_internalName.c_str(), len);
if(vers == 0){
// Should always tried to retreive it first
- // and then there should be a record
+ // and thus there should be a record
abort();
}
const Uint32 sz = vers->size();
if(sz == 0){
// Should always tried to retreive it first
- // and then there should be a record
+ // and thus there should be a record
abort();
}
-
+
for(i = 0; i < sz; i++){
TableVersion & ver = (* vers)[i];
if(ver.m_impl == tab){
- if(ver.m_refCount == 0 || ver.m_status == RETREIVING ||
+ if(ver.m_refCount == 0 || ver.m_status == RETREIVING ||
ver.m_version != tab->m_version){
- ndbout_c("Dropping with refCount=%d status=%d impl=%p",
- ver.m_refCount, ver.m_status, ver.m_impl);
+ DBUG_PRINT("info", ("Dropping with refCount=%d status=%d impl=%p",
+ ver.m_refCount, ver.m_status, ver.m_impl));
break;
}
-
+ DBUG_PRINT("info", ("Found table to drop, i: %d, name: %s",
+ i, ver.m_impl->m_internalName.c_str()));
ver.m_refCount--;
ver.m_status = DROPPED;
if(ver.m_refCount == 0){
+ DBUG_PRINT("info", ("refCount is zero, deleting m_impl"))
delete ver.m_impl;
vers->erase(i);
}
- return;
+ DBUG_VOID_RETURN;
}
}
-
+
for(i = 0; i<sz; i++){
TableVersion & ver = (* vers)[i];
- ndbout_c("%d: version: %d refCount: %d status: %d impl: %p",
- i, ver.m_version, ver.m_refCount, ver.m_status, ver.m_impl);
+ DBUG_PRINT("info", ("%d: version: %d refCount: %d status: %d impl: %p",
+ i, ver.m_version, ver.m_refCount,
+ ver.m_status, ver.m_impl));
}
abort();
}
void
-GlobalDictCache::release(NdbTableImpl * tab){
+GlobalDictCache::release(NdbTableImpl * tab)
+{
+ DBUG_ENTER("GlobalDictCache::release");
+ DBUG_PRINT("enter", ("internal_name: %s", tab->m_internalName.c_str()));
+
unsigned i;
const Uint32 len = strlen(tab->m_internalName.c_str());
Vector<TableVersion> * vers =
m_tableHash.getData(tab->m_internalName.c_str(), len);
if(vers == 0){
// Should always tried to retreive it first
- // and then there should be a record
+ // and thus there should be a record
abort();
}
const Uint32 sz = vers->size();
if(sz == 0){
// Should always tried to retreive it first
- // and then there should be a record
+ // and thus there should be a record
abort();
}
@@ -279,20 +328,21 @@ GlobalDictCache::release(NdbTableImpl * tab){
if(ver.m_impl == tab){
if(ver.m_refCount == 0 || ver.m_status == RETREIVING ||
ver.m_version != tab->m_version){
- ndbout_c("Releasing with refCount=%d status=%d impl=%p",
- ver.m_refCount, ver.m_status, ver.m_impl);
+ DBUG_PRINT("info", ("Releasing with refCount=%d status=%d impl=%p",
+ ver.m_refCount, ver.m_status, ver.m_impl));
break;
}
ver.m_refCount--;
- return;
+ DBUG_VOID_RETURN;
}
}
for(i = 0; i<sz; i++){
TableVersion & ver = (* vers)[i];
- ndbout_c("%d: version: %d refCount: %d status: %d impl: %p",
- i, ver.m_version, ver.m_refCount, ver.m_status, ver.m_impl);
+ DBUG_PRINT("info", ("%d: version: %d refCount: %d status: %d impl: %p",
+ i, ver.m_version, ver.m_refCount,
+ ver.m_status, ver.m_impl));
}
abort();
diff --git a/ndb/src/ndbapi/DictCache.hpp b/ndb/src/ndbapi/DictCache.hpp
index 7f2ee457476..2df6a139542 100644
--- a/ndb/src/ndbapi/DictCache.hpp
+++ b/ndb/src/ndbapi/DictCache.hpp
@@ -79,6 +79,8 @@ public:
};
private:
+ void printCache();
+
struct TableVersion {
Uint32 m_version;
Uint32 m_refCount;
diff --git a/ndb/src/ndbapi/Makefile.am b/ndb/src/ndbapi/Makefile.am
index 1ba80ef7d85..99b75ffbd53 100644
--- a/ndb/src/ndbapi/Makefile.am
+++ b/ndb/src/ndbapi/Makefile.am
@@ -14,15 +14,14 @@ libndbapi_la_SOURCES = \
Ndberr.cpp \
ndberror.c \
NdbErrorOut.cpp \
- NdbConnection.cpp \
- NdbConnectionScan.cpp \
+ NdbTransaction.cpp \
+ NdbTransactionScan.cpp \
NdbOperation.cpp \
NdbOperationSearch.cpp \
NdbOperationScan.cpp \
NdbOperationInt.cpp \
NdbOperationDefine.cpp \
NdbOperationExec.cpp \
- NdbResultSet.cpp \
NdbScanOperation.cpp NdbScanFilter.cpp \
NdbIndexOperation.cpp \
NdbEventOperation.cpp \
diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp
index 1ae1030e463..7893aaae15c 100644
--- a/ndb/src/ndbapi/Ndb.cpp
+++ b/ndb/src/ndbapi/Ndb.cpp
@@ -27,7 +27,7 @@ Name: Ndb.cpp
#include "NdbApiSignal.hpp"
#include "NdbImpl.hpp"
#include <NdbOperation.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <NdbEventOperation.hpp>
#include <NdbRecAttr.hpp>
#include <md5_hash.hpp>
@@ -43,11 +43,13 @@ void connect();
Connect to any node which has no connection at the moment.
****************************************************************************/
-NdbConnection* Ndb::doConnect(Uint32 tConNode)
+NdbTransaction* Ndb::doConnect(Uint32 tConNode)
{
Uint32 tNode;
Uint32 tAnyAlive = 0;
- int TretCode;
+ int TretCode= 0;
+
+ DBUG_ENTER("Ndb::doConnect");
if (tConNode != 0) {
TretCode = NDB_connect(tConNode);
@@ -55,7 +57,7 @@ NdbConnection* Ndb::doConnect(Uint32 tConNode)
//****************************************************************************
// We have connections now to the desired node. Return
//****************************************************************************
- return getConnectedNdbConnection(tConNode);
+ DBUG_RETURN(getConnectedNdbTransaction(tConNode));
} else if (TretCode != 0) {
tAnyAlive = 1;
}//if
@@ -78,10 +80,13 @@ NdbConnection* Ndb::doConnect(Uint32 tConNode)
//****************************************************************************
// We have connections now to the desired node. Return
//****************************************************************************
- return getConnectedNdbConnection(tNode);
+ DBUG_RETURN(getConnectedNdbTransaction(tNode));
} else if (TretCode != 0) {
tAnyAlive= 1;
}//if
+ DBUG_PRINT("info",("tried node %d, TretCode %d, error code %d, %s",
+ tNode, TretCode, getNdbError().code,
+ getNdbError().message));
}
}
else // just do a regular round robin
@@ -103,10 +108,11 @@ NdbConnection* Ndb::doConnect(Uint32 tConNode)
//****************************************************************************
// We have connections now to the desired node. Return
//****************************************************************************
- return getConnectedNdbConnection(tNode);
+ DBUG_RETURN(getConnectedNdbTransaction(tNode));
} else if (TretCode != 0) {
tAnyAlive= 1;
}//if
+ DBUG_PRINT("info",("tried node %d TretCode %d", tNode, TretCode));
} while (Tcount < tNoOfDbNodes);
}
//****************************************************************************
@@ -121,7 +127,7 @@ NdbConnection* Ndb::doConnect(Uint32 tConNode)
} else {
theError.code = 4009;
}//if
- return NULL;
+ DBUG_RETURN(NULL);
}
int
@@ -134,36 +140,38 @@ Ndb::NDB_connect(Uint32 tNode)
int tReturnCode;
TransporterFacade *tp = TransporterFacade::instance();
+ DBUG_ENTER("Ndb::NDB_connect");
+
bool nodeAvail = tp->get_node_alive(tNode);
if(nodeAvail == false){
- return 0;
+ DBUG_RETURN(0);
}
- NdbConnection * tConArray = theConnectionArray[tNode];
+ NdbTransaction * tConArray = theConnectionArray[tNode];
if (tConArray != NULL) {
- return 2;
+ DBUG_RETURN(2);
}
- NdbConnection * tNdbCon = getNdbCon(); // Get free connection object.
+ NdbTransaction * tNdbCon = getNdbCon(); // Get free connection object.
if (tNdbCon == NULL) {
- return 4;
+ DBUG_RETURN(4);
}//if
NdbApiSignal* tSignal = getSignal(); // Get signal object
if (tSignal == NULL) {
releaseNdbCon(tNdbCon);
- return 4;
+ DBUG_RETURN(4);
}//if
if (tSignal->setSignal(GSN_TCSEIZEREQ) == -1) {
releaseNdbCon(tNdbCon);
releaseSignal(tSignal);
- return 4;
+ DBUG_RETURN(4);
}//if
tSignal->setData(tNdbCon->ptr2int(), 1);
//************************************************
-// Set connection pointer as NdbConnection object
+// Set connection pointer as NdbTransaction object
//************************************************
tSignal->setData(theMyRef, 2); // Set my block reference
- tNdbCon->Status(NdbConnection::Connecting); // Set status to connecting
+ tNdbCon->Status(NdbTransaction::Connecting); // Set status to connecting
Uint32 nodeSequence;
{ // send and receive signal
Guard guard(tp->theMutexPtr);
@@ -182,34 +190,37 @@ Ndb::NDB_connect(Uint32 tNode)
tReturnCode = -1;
}//if
}
- if ((tReturnCode == 0) && (tNdbCon->Status() == NdbConnection::Connected)) {
+ if ((tReturnCode == 0) && (tNdbCon->Status() == NdbTransaction::Connected)) {
//************************************************
// Send and receive was successful
//************************************************
- NdbConnection* tPrevFirst = theConnectionArray[tNode];
+ NdbTransaction* tPrevFirst = theConnectionArray[tNode];
tNdbCon->setConnectedNodeId(tNode, nodeSequence);
tNdbCon->setMyBlockReference(theMyRef);
theConnectionArray[tNode] = tNdbCon;
tNdbCon->theNext = tPrevFirst;
- return 1;
+ DBUG_RETURN(1);
} else {
releaseNdbCon(tNdbCon);
//****************************************************************************
// Unsuccessful connect is indicated by 3.
//****************************************************************************
- return 3;
+ DBUG_PRINT("info",
+ ("unsuccessful connect tReturnCode %d, tNdbCon->Status() %d",
+ tReturnCode, tNdbCon->Status()));
+ DBUG_RETURN(3);
}//if
}//Ndb::NDB_connect()
-NdbConnection *
-Ndb::getConnectedNdbConnection(Uint32 nodeId){
- NdbConnection* next = theConnectionArray[nodeId];
+NdbTransaction *
+Ndb::getConnectedNdbTransaction(Uint32 nodeId){
+ NdbTransaction* next = theConnectionArray[nodeId];
theConnectionArray[nodeId] = next->theNext;
next->theNext = NULL;
return next;
-}//Ndb::getConnectedNdbConnection()
+}//Ndb::getConnectedNdbTransaction()
/*****************************************************************************
disconnect();
@@ -219,9 +230,10 @@ Remark: Disconnect all connections to the database.
void
Ndb::doDisconnect()
{
- DBUG_ENTER("Ndb::doDisconnect");
- NdbConnection* tNdbCon;
+ NdbTransaction* tNdbCon;
CHECK_STATUS_MACRO_VOID;
+ /* DBUG_ENTER must be after CHECK_STATUS_MACRO_VOID because of 'return' */
+ DBUG_ENTER("Ndb::doDisconnect");
Uint32 tNoOfDbNodes = theImpl->theNoOfDBnodes;
Uint8 *theDBnodes= theImpl->theDBnodes;
@@ -231,14 +243,14 @@ Ndb::doDisconnect()
Uint32 tNode = theDBnodes[i];
tNdbCon = theConnectionArray[tNode];
while (tNdbCon != NULL) {
- NdbConnection* tmpNdbCon = tNdbCon;
+ NdbTransaction* tmpNdbCon = tNdbCon;
tNdbCon = tNdbCon->theNext;
releaseConnectToNdb(tmpNdbCon);
}//while
}//for
tNdbCon = theTransactionList;
while (tNdbCon != NULL) {
- NdbConnection* tmpNdbCon = tNdbCon;
+ NdbTransaction* tmpNdbCon = tNdbCon;
tNdbCon = tNdbCon->theNext;
releaseConnectToNdb(tmpNdbCon);
}//while
@@ -292,34 +304,58 @@ Ndb::waitUntilReady(int timeout)
}
/*****************************************************************************
-NdbConnection* startTransaction();
+NdbTransaction* startTransaction();
Return Value: Returns a pointer to a connection object.
Return NULL otherwise.
Remark: Start transaction. Synchronous.
*****************************************************************************/
-NdbConnection*
-Ndb::startTransaction(Uint32 aPriority, const char * keyData, Uint32 keyLen)
+NdbTransaction*
+Ndb::startTransaction(const NdbDictionary::Table *table,
+ const char * keyData, Uint32 keyLen)
{
DBUG_ENTER("Ndb::startTransaction");
if (theInitState == Initialised) {
theError.code = 0;
checkFailedNode();
- /**
- * If the user supplied key data
- * We will make a qualified quess to which node is the primary for the
- * the fragment and contact that node
- */
+ /**
+ * If the user supplied key data
+ * We will make a qualified quess to which node is the primary for the
+ * the fragment and contact that node
+ */
Uint32 nodeId;
- if(keyData != 0) {
- nodeId = 0; // guess not supported
- // nodeId = m_ndb_cluster_connection->guess_primary_node(keyData, keyLen);
+ NdbTableImpl* impl;
+ if(table != 0 && keyData != 0 && (impl= &NdbTableImpl::getImpl(*table)))
+ {
+ Uint32 hashValue;
+ {
+ Uint32 buf[4];
+ if((UintPtr(keyData) & 7) == 0 && (keyLen & 3) == 0)
+ {
+ md5_hash(buf, (const Uint64*)keyData, keyLen >> 2);
+ }
+ else
+ {
+ Uint64 tmp[1000];
+ tmp[keyLen/8] = 0;
+ memcpy(tmp, keyData, keyLen);
+ md5_hash(buf, tmp, (keyLen+3) >> 2);
+ }
+ hashValue= buf[1];
+ }
+ const Uint16 *nodes;
+ Uint32 cnt= impl->get_nodes(hashValue, &nodes);
+ if(cnt)
+ nodeId= nodes[0];
+ else
+ nodeId= 0;
} else {
nodeId = 0;
}//if
+
{
- NdbConnection *trans= startTransactionLocal(aPriority, nodeId);
+ NdbTransaction *trans= startTransactionLocal(0, nodeId);
DBUG_PRINT("exit",("start trans: 0x%x transid: 0x%llx",
trans, trans ? trans->getTransactionId() : 0));
DBUG_RETURN(trans);
@@ -330,15 +366,15 @@ Ndb::startTransaction(Uint32 aPriority, const char * keyData, Uint32 keyLen)
}//Ndb::startTransaction()
/*****************************************************************************
-NdbConnection* hupp(NdbConnection* pBuddyTrans);
+NdbTransaction* hupp(NdbTransaction* pBuddyTrans);
Return Value: Returns a pointer to a connection object.
Connected to the same node as pBuddyTrans
and also using the same transction id
Remark: Start transaction. Synchronous.
*****************************************************************************/
-NdbConnection*
-Ndb::hupp(NdbConnection* pBuddyTrans)
+NdbTransaction*
+Ndb::hupp(NdbTransaction* pBuddyTrans)
{
DBUG_ENTER("Ndb::hupp");
@@ -354,7 +390,7 @@ Ndb::hupp(NdbConnection* pBuddyTrans)
checkFailedNode();
Uint32 nodeId = pBuddyTrans->getConnectedNodeId();
- NdbConnection* pCon = startTransactionLocal(aPriority, nodeId);
+ NdbTransaction* pCon = startTransactionLocal(aPriority, nodeId);
if(pCon == NULL)
DBUG_RETURN(NULL);
@@ -375,8 +411,7 @@ Ndb::hupp(NdbConnection* pBuddyTrans)
}//if
}//Ndb::hupp()
-
-NdbConnection*
+NdbTransaction*
Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
{
#ifdef VM_TRACE
@@ -390,13 +425,21 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
DBUG_ENTER("Ndb::startTransactionLocal");
DBUG_PRINT("enter", ("nodeid: %d", nodeId));
- NdbConnection* tConnection;
+ if(unlikely(theRemainingStartTransactions == 0))
+ {
+ theError.code = 4006;
+ DBUG_RETURN(0);
+ }
+
+ NdbTransaction* tConnection;
Uint64 tFirstTransId = theFirstTransId;
tConnection = doConnect(nodeId);
if (tConnection == NULL) {
DBUG_RETURN(NULL);
}//if
- NdbConnection* tConNext = theTransactionList;
+
+ theRemainingStartTransactions--;
+ NdbTransaction* tConNext = theTransactionList;
tConnection->init();
theTransactionList = tConnection; // into a transaction list.
tConnection->next(tConNext); // Add the active connection object
@@ -412,7 +455,7 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
theFirstTransId = tFirstTransId + 1;
}//if
#ifdef VM_TRACE
- if (tConnection->theListState != NdbConnection::NotInList) {
+ if (tConnection->theListState != NdbTransaction::NotInList) {
printState("startTransactionLocal %x", tConnection);
abort();
}
@@ -421,17 +464,17 @@ Ndb::startTransactionLocal(Uint32 aPriority, Uint32 nodeId)
}//Ndb::startTransactionLocal()
/*****************************************************************************
-void closeTransaction(NdbConnection* aConnection);
+void closeTransaction(NdbTransaction* aConnection);
Parameters: aConnection: the connection used in the transaction.
Remark: Close transaction by releasing the connection and all operations.
*****************************************************************************/
void
-Ndb::closeTransaction(NdbConnection* aConnection)
+Ndb::closeTransaction(NdbTransaction* aConnection)
{
DBUG_ENTER("Ndb::closeTransaction");
- NdbConnection* tCon;
- NdbConnection* tPreviousCon;
+ NdbTransaction* tCon;
+ NdbTransaction* tPreviousCon;
if (aConnection == NULL) {
//-----------------------------------------------------
@@ -446,6 +489,7 @@ Ndb::closeTransaction(NdbConnection* aConnection)
CHECK_STATUS_MACRO_VOID;
tCon = theTransactionList;
+ theRemainingStartTransactions++;
DBUG_PRINT("info",("close trans: 0x%x transid: 0x%llx",
aConnection, aConnection->getTransactionId()));
@@ -464,12 +508,12 @@ Ndb::closeTransaction(NdbConnection* aConnection)
if(aConnection->theError.code == 4008){
/**
- * When a SCAN timed-out, returning the NdbConnection leads
+ * When a SCAN timed-out, returning the NdbTransaction leads
* to reuse. And TC crashes when the API tries to reuse it to
* something else...
*/
#ifdef VM_TRACE
- printf("Scan timeout:ed NdbConnection-> "
+ printf("Scan timeout:ed NdbTransaction-> "
"not returning it-> memory leak\n");
#endif
DBUG_VOID_RETURN;
@@ -491,12 +535,12 @@ Ndb::closeTransaction(NdbConnection* aConnection)
if(aConnection->theError.code == 4008){
/**
- * Something timed-out, returning the NdbConnection leads
+ * Something timed-out, returning the NdbTransaction leads
* to reuse. And TC crashes when the API tries to reuse it to
* something else...
*/
#ifdef VM_TRACE
- printf("Con timeout:ed NdbConnection-> not returning it-> memory leak\n");
+ printf("Con timeout:ed NdbTransaction-> not returning it-> memory leak\n");
#endif
DBUG_VOID_RETURN;
}
@@ -535,7 +579,7 @@ Remark: Sends a signal to DIH.
int
Ndb::NdbTamper(TamperType aAction, int aNode)
{
- NdbConnection* tNdbConn;
+ NdbTransaction* tNdbConn;
NdbApiSignal tSignal(theMyRef);
int tNode;
int tAction;
@@ -577,7 +621,7 @@ Ndb::NdbTamper(TamperType aAction, int aNode)
tSignal.setData (tAction, 1);
tSignal.setData(tNdbConn->ptr2int(),2);
tSignal.setData(theMyRef,3); // Set return block reference
- tNdbConn->Status(NdbConnection::Connecting); // Set status to connecting
+ tNdbConn->Status(NdbTransaction::Connecting); // Set status to connecting
TransporterFacade *tp = TransporterFacade::instance();
if (tAction == 3) {
tp->lock_mutex();
@@ -610,7 +654,7 @@ Ndb::NdbTamper(TamperType aAction, int aNode)
}//if
ret_code = sendRecSignal(tNode, WAIT_NDB_TAMPER, &tSignal, 0);
if (ret_code == 0) {
- if (tNdbConn->Status() != NdbConnection::Connected) {
+ if (tNdbConn->Status() != NdbTransaction::Connected) {
theRestartGCI = 0;
}//if
releaseNdbCon(tNdbConn);
@@ -716,16 +760,14 @@ Remark: Returns a new TupleId to the application.
The TupleId comes from SYSTAB_0 where SYSKEY_0 = TableId.
It is initialized to (TableId << 48) + 1 in NdbcntrMain.cpp.
****************************************************************************/
-#define DEBUG_TRACE(msg) \
-// ndbout << __FILE__ << " line: " << __LINE__ << " msg: " << msg << endl
-
Uint64
Ndb::getAutoIncrementValue(const char* aTableName, Uint32 cacheSize)
{
DBUG_ENTER("getAutoIncrementValue");
- const char * internalTableName = internalizeTableName(aTableName);
+ BaseString internal_tabname(internalize_table_name(aTableName));
+
Ndb_local_table_info *info=
- theDictionary->get_local_table_info(internalTableName, false);
+ theDictionary->get_local_table_info(internal_tabname, false);
if (info == 0)
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl *table= info->m_table_impl;
@@ -777,7 +819,7 @@ Ndb::getTupleIdFromNdb(Uint32 aTableId, Uint32 cacheSize)
Uint64
Ndb::readAutoIncrementValue(const char* aTableName)
{
- DBUG_ENTER("readtAutoIncrementValue");
+ DBUG_ENTER("readAutoIncrementValue");
const NdbTableImpl* table = theDictionary->getTable(aTableName);
if (table == 0) {
theError= theDictionary->getNdbError();
@@ -791,7 +833,7 @@ Ndb::readAutoIncrementValue(const char* aTableName)
Uint64
Ndb::readAutoIncrementValue(const NdbDictionary::Table * aTable)
{
- DBUG_ENTER("readtAutoIncrementValue");
+ DBUG_ENTER("readAutoIncrementValue");
if (aTable == 0)
DBUG_RETURN(~(Uint64)0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
@@ -813,71 +855,73 @@ Ndb::readTupleIdFromNdb(Uint32 aTableId)
bool
Ndb::setAutoIncrementValue(const char* aTableName, Uint64 val, bool increase)
{
- DEBUG_TRACE("setAutoIncrementValue " << val);
- const char * internalTableName= internalizeTableName(aTableName);
+ DBUG_ENTER("setAutoIncrementValue");
+ BaseString internal_tabname(internalize_table_name(aTableName));
+
Ndb_local_table_info *info=
- theDictionary->get_local_table_info(internalTableName, false);
+ theDictionary->get_local_table_info(internal_tabname, false);
if (info == 0) {
theError= theDictionary->getNdbError();
- return false;
+ DBUG_RETURN(false);
}
const NdbTableImpl* table= info->m_table_impl;
- return setTupleIdInNdb(table->m_tableId, val, increase);
+ DBUG_RETURN(setTupleIdInNdb(table->m_tableId, val, increase));
}
bool
Ndb::setAutoIncrementValue(const NdbDictionary::Table * aTable, Uint64 val, bool increase)
{
- DEBUG_TRACE("setAutoIncrementValue " << val);
+ DBUG_ENTER("setAutoIncrementValue");
if (aTable == 0)
- return ~(Uint64)0;
+ DBUG_RETURN(~(Uint64)0);
const NdbTableImpl* table = & NdbTableImpl::getImpl(*aTable);
- return setTupleIdInNdb(table->m_tableId, val, increase);
+ DBUG_RETURN(setTupleIdInNdb(table->m_tableId, val, increase));
}
-bool
+bool
Ndb::setTupleIdInNdb(const char* aTableName, Uint64 val, bool increase )
{
- DEBUG_TRACE("setTupleIdInNdb");
+ DBUG_ENTER("setTupleIdInNdb(const char*, ...)");
const NdbTableImpl* table = theDictionary->getTable(aTableName);
if (table == 0) {
theError= theDictionary->getNdbError();
- return false;
+ DBUG_RETURN(false);
}
- return setTupleIdInNdb(table->m_tableId, val, increase);
+ DBUG_RETURN(setTupleIdInNdb(table->m_tableId, val, increase));
}
bool
Ndb::setTupleIdInNdb(Uint32 aTableId, Uint64 val, bool increase )
{
- DEBUG_TRACE("setTupleIdInNdb");
+ DBUG_ENTER("setTupleIdInNdb(Uint32, ...)");
if (increase)
{
if (theFirstTupleId[aTableId] != theLastTupleId[aTableId])
{
// We have a cache sequence
if (val <= theFirstTupleId[aTableId]+1)
- return false;
+ DBUG_RETURN(false);
if (val <= theLastTupleId[aTableId])
{
theFirstTupleId[aTableId] = val - 1;
- return true;
+ DBUG_RETURN(true);
}
// else continue;
- }
- return (opTupleIdOnNdb(aTableId, val, 2) == val);
+ }
+ DBUG_RETURN((opTupleIdOnNdb(aTableId, val, 2) == val));
}
else
- return (opTupleIdOnNdb(aTableId, val, 1) == val);
+ DBUG_RETURN((opTupleIdOnNdb(aTableId, val, 1) == val));
}
Uint64
Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op)
{
- DEBUG_TRACE("opTupleIdOnNdb");
+ DBUG_ENTER("Ndb::opTupleIdOnNdb");
+ DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op));
- NdbConnection* tConnection;
- NdbOperation* tOperation;
+ NdbTransaction* tConnection;
+ NdbOperation* tOperation= 0; // Compiler warning if not initialized
Uint64 tValue;
NdbRecAttr* tRecAttrResult;
int result;
@@ -970,7 +1014,7 @@ Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op)
setDatabaseName(currentDb.c_str());
setDatabaseSchemaName(currentSchema.c_str());
- return ret;
+ DBUG_RETURN(ret);
error_handler:
theError.code = tConnection->theError.code;
@@ -980,7 +1024,11 @@ Ndb::opTupleIdOnNdb(Uint32 aTableId, Uint64 opValue, Uint32 op)
setDatabaseName(currentDb.c_str());
setDatabaseSchemaName(currentSchema.c_str());
- return ~(Uint64)0;
+ DBUG_PRINT("error", ("ndb=%d con=%d op=%d",
+ theError.code,
+ tConnection ? tConnection->theError.code : -1,
+ tOperation ? tOperation->theError.code : -1));
+ DBUG_RETURN(~(Uint64)0);
}
Uint32
@@ -1002,39 +1050,31 @@ convertEndian(Uint32 Data)
}
const char * Ndb::getCatalogName() const
{
- return theDataBase;
+ return theImpl->m_dbname.c_str();
}
-
+
+
void Ndb::setCatalogName(const char * a_catalog_name)
{
- if (a_catalog_name) {
- BaseString::snprintf(theDataBase, sizeof(theDataBase), "%s",
- a_catalog_name ? a_catalog_name : "");
-
- int len = BaseString::snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
- theDataBase, table_name_separator,
- theDataBaseSchema, table_name_separator);
- prefixEnd = prefixName + (len < (int) sizeof(prefixName) ? len :
- sizeof(prefixName) - 1);
+ if (a_catalog_name)
+ {
+ theImpl->m_dbname.assign(a_catalog_name);
+ theImpl->update_prefix();
}
}
-
+
+
const char * Ndb::getSchemaName() const
{
- return theDataBaseSchema;
+ return theImpl->m_schemaname.c_str();
}
-
+
+
void Ndb::setSchemaName(const char * a_schema_name)
{
if (a_schema_name) {
- BaseString::snprintf(theDataBaseSchema, sizeof(theDataBase), "%s",
- a_schema_name ? a_schema_name : "");
-
- int len = BaseString::snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
- theDataBase, table_name_separator,
- theDataBaseSchema, table_name_separator);
- prefixEnd = prefixName + (len < (int) sizeof(prefixName) ? len :
- sizeof(prefixName) - 1);
+ theImpl->m_schemaname.assign(a_schema_name);
+ theImpl->update_prefix();
}
}
@@ -1111,35 +1151,63 @@ Ndb::externalizeIndexName(const char * internalIndexName)
return externalizeIndexName(internalIndexName, usingFullyQualifiedNames());
}
-const char *
-Ndb::internalizeTableName(const char * externalTableName)
+
+const BaseString
+Ndb::internalize_table_name(const char *external_name) const
{
- if (fullyQualifiedNames) {
- strncpy(prefixEnd, externalTableName, NDB_MAX_TAB_NAME_SIZE);
- return prefixName;
+ BaseString ret;
+ DBUG_ENTER("internalize_table_name");
+ DBUG_PRINT("enter", ("external_name: %s", external_name));
+
+ if (fullyQualifiedNames)
+ {
+ /* Internal table name format <db>/<schema>/<table>
+ <db>/<schema> is already available in m_prefix
+ so just concat the two strings
+ */
+ ret.assfmt("%s%s",
+ theImpl->m_prefix.c_str(),
+ external_name);
}
else
- return externalTableName;
+ ret.assign(external_name);
+
+ DBUG_PRINT("exit", ("internal_name: %s", ret.c_str()));
+ DBUG_RETURN(ret);
}
-
-const char *
-Ndb::internalizeIndexName(const NdbTableImpl * table,
- const char * externalIndexName)
+
+
+const BaseString
+Ndb::internalize_index_name(const NdbTableImpl * table,
+ const char * external_name) const
{
- if (fullyQualifiedNames) {
- char tableId[10];
- sprintf(tableId, "%d", table->m_tableId);
- Uint32 tabIdLen = strlen(tableId);
- strncpy(prefixEnd, tableId, tabIdLen);
- prefixEnd[tabIdLen] = table_name_separator;
- strncpy(prefixEnd + tabIdLen + 1,
- externalIndexName, NDB_MAX_TAB_NAME_SIZE);
- return prefixName;
+ BaseString ret;
+ DBUG_ENTER("internalize_index_name");
+ DBUG_PRINT("enter", ("external_name: %s, table_id: %d",
+ external_name, table ? table->m_tableId : ~0));
+ if (!table)
+ {
+ DBUG_PRINT("error", ("!table"));
+ return ret;
+ }
+
+ if (fullyQualifiedNames)
+ {
+ /* Internal index name format <db>/<schema>/<tabid>/<table> */
+ ret.assfmt("%s%d%c%s",
+ theImpl->m_prefix.c_str(),
+ table->m_tableId,
+ table_name_separator,
+ external_name);
}
else
- return externalIndexName;
+ ret.assign(external_name);
+
+ DBUG_PRINT("exit", ("internal_name: %s", ret.c_str()));
+ DBUG_RETURN(ret);
}
+
const BaseString
Ndb::getDatabaseFromInternalName(const char * internalName)
{
@@ -1182,7 +1250,14 @@ NdbEventOperation* Ndb::createEventOperation(const char* eventName,
tOp = new NdbEventOperation(this, eventName, bufferLength);
- if (tOp->getState() != NdbEventOperation::CREATED) {
+ if (tOp == 0)
+ {
+ theError.code= 4000;
+ return NULL;
+ }
+
+ if (tOp->getState() != NdbEventOperation::EO_CREATED) {
+ theError.code= tOp->getNdbError().code;
delete tOp;
tOp = NULL;
}
@@ -1218,7 +1293,7 @@ Ndb::pollEvents(int aMillisecondNumber)
extern NdbMutex *ndb_print_state_mutex;
static bool
-checkdups(NdbConnection** list, unsigned no)
+checkdups(NdbTransaction** list, unsigned no)
{
for (unsigned i = 0; i < no; i++)
for (unsigned j = i + 1; j < no; j++)
@@ -1243,7 +1318,7 @@ Ndb::printState(const char* fmt, ...)
#endif
ndbout << endl;
for (unsigned n = 0; n < MAX_NDB_NODES; n++) {
- NdbConnection* con = theConnectionArray[n];
+ NdbTransaction* con = theConnectionArray[n];
if (con != 0) {
ndbout << "conn " << n << ":" << endl;
while (con != 0) {
diff --git a/ndb/src/ndbapi/NdbApiSignal.cpp b/ndb/src/ndbapi/NdbApiSignal.cpp
index 953d87ac7b0..94695185224 100644
--- a/ndb/src/ndbapi/NdbApiSignal.cpp
+++ b/ndb/src/ndbapi/NdbApiSignal.cpp
@@ -232,7 +232,7 @@ NdbApiSignal::setSignal(int aNdbSignalType)
theTrace = TestOrd::TraceAPI;
theReceiversBlockNumber = DBTC;
theVerId_signalNumber = GSN_TCINDXREQ;
- theLength = TcIndxReq::SignalLength;
+ theLength = TcKeyReq::SignalLength;
}
break;
diff --git a/ndb/src/ndbapi/NdbApiSignal.hpp b/ndb/src/ndbapi/NdbApiSignal.hpp
index 9a8326bd666..9d04a8594a8 100644
--- a/ndb/src/ndbapi/NdbApiSignal.hpp
+++ b/ndb/src/ndbapi/NdbApiSignal.hpp
@@ -94,7 +94,7 @@ private:
void setDataPtr(Uint32 *);
- friend class NdbConnection;
+ friend class NdbTransaction;
friend class NdbScanReceiver;
friend class Table;
void copyFrom(const NdbApiSignal * src);
diff --git a/ndb/src/ndbapi/NdbBlob.cpp b/ndb/src/ndbapi/NdbBlob.cpp
index c5692d79e83..d1aa4e61c40 100644
--- a/ndb/src/ndbapi/NdbBlob.cpp
+++ b/ndb/src/ndbapi/NdbBlob.cpp
@@ -16,7 +16,7 @@
#include <Ndb.hpp>
#include <NdbDictionaryImpl.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <NdbOperation.hpp>
#include <NdbIndexOperation.hpp>
#include <NdbRecAttr.hpp>
@@ -24,34 +24,6 @@
#include "NdbBlobImpl.hpp"
#include <NdbScanOperation.hpp>
-#ifdef NDB_BLOB_DEBUG
-#define DBG(x) \
- do { \
- static const char* p = getenv("NDB_BLOB_DEBUG"); \
- if (p == 0 || *p == 0 || *p == '0') break; \
- static char* prefix = "BLOB"; \
- const char* cname = theColumn == NULL ? "-" : theColumn->m_name.c_str(); \
- ndbout << prefix << " " << hex << (void*)this << " " << cname; \
- ndbout << " " << dec << __LINE__ << " " << x << " " << *this << endl; \
- } while (0)
-
-static char*
-ndb_blob_debug(const Uint32* data, unsigned size)
-{
- static char buf[200]; // MT irrelevant
- buf[0] = 0;
- for (unsigned i = 0; i < size; i++) {
- unsigned n = strlen(buf);
- if (n + 10 < sizeof(buf))
- sprintf(buf + n, "%*s%08x", i != 0, "", data[i]);
- }
- return buf;
-}
-
-#else
-#define DBG(x)
-#endif
-
/*
* Reading index table directly (as a table) is faster but there are
* bugs or limitations. Keep the code and make possible to choose.
@@ -63,8 +35,10 @@ static const bool g_ndb_blob_ok_to_read_index_table = false;
inline void
NdbBlob::setState(State newState)
{
- DBG("setState " << newState);
+ DBUG_ENTER("NdbBlob::setState");
+ DBUG_PRINT("info", ("this=%p newState=%u", this, newState));
theState = newState;
+ DBUG_VOID_RETURN;
}
// define blob table
@@ -100,8 +74,8 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm
bt.setFragmentType(t->getFragmentType());
{ NdbDictionary::Column bc("PK");
bc.setType(NdbDictionary::Column::Unsigned);
- assert(t->m_sizeOfKeysInWords != 0);
- bc.setLength(t->m_sizeOfKeysInWords);
+ assert(t->m_keyLenInWords != 0);
+ bc.setLength(t->m_keyLenInWords);
bc.setPrimaryKey(true);
bc.setDistributionKey(true);
bt.addColumn(bc);
@@ -319,9 +293,9 @@ NdbBlob::getDistKey(Uint32 part)
int
NdbBlob::getTableKeyValue(NdbOperation* anOp)
{
+ DBUG_ENTER("NdbBlob::getTableKeyValue");
Uint32* data = (Uint32*)theKeyBuf.data;
unsigned pos = 0;
- DBG("getTableKeyValue");
for (unsigned i = 0; i < theTable->m_columns.size(); i++) {
NdbColumnImpl* c = theTable->m_columns[i];
assert(c != NULL);
@@ -329,7 +303,7 @@ NdbBlob::getTableKeyValue(NdbOperation* anOp)
unsigned len = c->m_attrSize * c->m_arraySize;
if (anOp->getValue_impl(c, (char*)&data[pos]) == NULL) {
setErrorCode(anOp);
- return -1;
+ DBUG_RETURN(-1);
}
// odd bytes receive no data and must be zeroed
while (len % 4 != 0) {
@@ -340,14 +314,15 @@ NdbBlob::getTableKeyValue(NdbOperation* anOp)
}
}
assert(pos == theKeyBuf.size / 4);
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::setTableKeyValue(NdbOperation* anOp)
{
+ DBUG_ENTER("NdbBlob::setTableKeyValue");
+ DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
const Uint32* data = (const Uint32*)theKeyBuf.data;
- DBG("setTableKeyValue key=" << ndb_blob_debug(data, theTable->m_sizeOfKeysInWords));
const unsigned columns = theTable->m_columns.size();
unsigned pos = 0;
for (unsigned i = 0; i < columns; i++) {
@@ -357,20 +332,21 @@ NdbBlob::setTableKeyValue(NdbOperation* anOp)
unsigned len = c->m_attrSize * c->m_arraySize;
if (anOp->equal_impl(c, (const char*)&data[pos], len) == -1) {
setErrorCode(anOp);
- return -1;
+ DBUG_RETURN(-1);
}
pos += (len + 3) / 4;
}
}
assert(pos == theKeyBuf.size / 4);
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::setAccessKeyValue(NdbOperation* anOp)
{
+ DBUG_ENTER("NdbBlob::setAccessKeyValue");
+ DBUG_DUMP("info", theAccessKeyBuf.data, 4 * theAccessTable->m_keyLenInWords);
const Uint32* data = (const Uint32*)theAccessKeyBuf.data;
- DBG("setAccessKeyValue key=" << ndb_blob_debug(data, theAccessTable->m_sizeOfKeysInWords));
const unsigned columns = theAccessTable->m_columns.size();
unsigned pos = 0;
for (unsigned i = 0; i < columns; i++) {
@@ -380,57 +356,60 @@ NdbBlob::setAccessKeyValue(NdbOperation* anOp)
unsigned len = c->m_attrSize * c->m_arraySize;
if (anOp->equal_impl(c, (const char*)&data[pos], len) == -1) {
setErrorCode(anOp);
- return -1;
+ DBUG_RETURN(-1);
}
pos += (len + 3) / 4;
}
}
assert(pos == theAccessKeyBuf.size / 4);
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::setPartKeyValue(NdbOperation* anOp, Uint32 part)
{
+ DBUG_ENTER("NdbBlob::setPartKeyValue");
+ DBUG_PRINT("info", ("dist=%u part=%u key=", getDistKey(part), part));
+ DBUG_DUMP("info", theKeyBuf.data, 4 * theTable->m_keyLenInWords);
Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_sizeOfKeysInWords;
- DBG("setPartKeyValue dist=" << getDistKey(part) << " part=" << part << " key=" << ndb_blob_debug(data, size));
+ unsigned size = theTable->m_keyLenInWords;
// TODO use attr ids after compatibility with 4.1.7 not needed
if (anOp->equal("PK", theKeyBuf.data) == -1 ||
anOp->equal("DIST", getDistKey(part)) == -1 ||
anOp->equal("PART", part) == -1) {
setErrorCode(anOp);
- return -1;
+ DBUG_RETURN(-1);
}
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::getHeadInlineValue(NdbOperation* anOp)
{
- DBG("getHeadInlineValue");
+ DBUG_ENTER("NdbBlob::getHeadInlineValue");
theHeadInlineRecAttr = anOp->getValue_impl(theColumn, theHeadInlineBuf.data);
if (theHeadInlineRecAttr == NULL) {
setErrorCode(anOp);
- return -1;
+ DBUG_RETURN(-1);
}
- return 0;
+ DBUG_RETURN(0);
}
void
NdbBlob::getHeadFromRecAttr()
{
+ DBUG_ENTER("NdbBlob::getHeadFromRecAttr");
assert(theHeadInlineRecAttr != NULL);
theNullFlag = theHeadInlineRecAttr->isNULL();
assert(theNullFlag != -1);
theLength = ! theNullFlag ? theHead->length : 0;
- DBG("getHeadFromRecAttr [out]");
+ DBUG_VOID_RETURN;
}
int
NdbBlob::setHeadInlineValue(NdbOperation* anOp)
{
- DBG("setHeadInlineValue");
+ DBUG_ENTER("NdbBlob::setHeadInlineValue");
theHead->length = theLength;
if (theLength < theInlineSize)
memset(theInlineData + theLength, 0, theInlineSize - theLength);
@@ -438,10 +417,10 @@ NdbBlob::setHeadInlineValue(NdbOperation* anOp)
const char* aValue = theNullFlag ? 0 : theHeadInlineBuf.data;
if (anOp->setValue(theColumn, aValue, theHeadInlineBuf.size) == -1) {
setErrorCode(anOp);
- return -1;
+ DBUG_RETURN(-1);
}
theHeadInlineUpdateFlag = false;
- return 0;
+ DBUG_RETURN(0);
}
// getValue/setValue
@@ -449,40 +428,42 @@ NdbBlob::setHeadInlineValue(NdbOperation* anOp)
int
NdbBlob::getValue(void* data, Uint32 bytes)
{
- DBG("getValue data=" << hex << data << " bytes=" << dec << bytes);
+ DBUG_ENTER("NdbBlob::getValue");
+ DBUG_PRINT("info", ("data=%p bytes=%u", data, bytes));
if (theGetFlag || theState != Prepared) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
if (! isReadOp() && ! isScanOp()) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
if (data == NULL && bytes != 0) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
theGetFlag = true;
theGetBuf = static_cast<char*>(data);
theGetSetBytes = bytes;
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::setValue(const void* data, Uint32 bytes)
{
- DBG("setValue data=" << hex << data << " bytes=" << dec << bytes);
+ DBUG_ENTER("NdbBlob::setValue");
+ DBUG_PRINT("info", ("data=%p bytes=%u", data, bytes));
if (theSetFlag || theState != Prepared) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
if (! isInsertOp() && ! isUpdateOp() && ! isWriteOp()) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
if (data == NULL && bytes != 0) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
theSetFlag = true;
theSetBuf = static_cast<const char*>(data);
@@ -495,15 +476,15 @@ NdbBlob::setValue(const void* data, Uint32 bytes)
n = theInlineSize;
assert(thePos == 0);
if (writeDataPrivate(theSetBuf, n) == -1)
- return -1;
+ DBUG_RETURN(-1);
} else {
theNullFlag = true;
theLength = 0;
}
if (setHeadInlineValue(theNdbOp) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
- return 0;
+ DBUG_RETURN(0);
}
// activation hook
@@ -511,14 +492,15 @@ NdbBlob::setValue(const void* data, Uint32 bytes)
int
NdbBlob::setActiveHook(ActiveHook activeHook, void* arg)
{
- DBG("setActiveHook hook=" << hex << (void*)activeHook << " arg=" << hex << arg);
+ DBUG_ENTER("NdbBlob::setActiveHook");
+ DBUG_PRINT("info", ("hook=%p arg=%p", (void*)activeHook, arg));
if (theState != Prepared) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
theActiveHook = activeHook;
theActiveHookArg = arg;
- return 0;
+ DBUG_RETURN(0);
}
// misc operations
@@ -526,63 +508,64 @@ NdbBlob::setActiveHook(ActiveHook activeHook, void* arg)
int
NdbBlob::getNull(bool& isNull)
{
- DBG("getNull");
+ DBUG_ENTER("NdbBlob::getNull");
if (theState == Prepared && theSetFlag) {
isNull = (theSetBuf == NULL);
- return 0;
+ DBUG_RETURN(0);
}
if (theNullFlag == -1) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
isNull = theNullFlag;
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::setNull()
{
- DBG("setNull");
+ DBUG_ENTER("NdbBlob::setNull");
if (theNullFlag == -1) {
if (theState == Prepared) {
- return setValue(0, 0);
+ DBUG_RETURN(setValue(0, 0));
}
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
if (theNullFlag)
- return 0;
+ DBUG_RETURN(0);
if (deleteParts(0, getPartCount()) == -1)
- return -1;
+ DBUG_RETURN(-1);
theNullFlag = true;
theLength = 0;
theHeadInlineUpdateFlag = true;
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::getLength(Uint64& len)
{
- DBG("getLength");
+ DBUG_ENTER("NdbBlob::getLength");
if (theState == Prepared && theSetFlag) {
len = theGetSetBytes;
- return 0;
+ DBUG_RETURN(0);
}
if (theNullFlag == -1) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
len = theLength;
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::truncate(Uint64 length)
{
- DBG("truncate [in] length=" << length);
+ DBUG_ENTER("NdbBlob::truncate");
+ DBUG_PRINT("info", ("length=%llu", length));
if (theNullFlag == -1) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
if (theLength > length) {
if (length > theInlineSize) {
@@ -590,46 +573,46 @@ NdbBlob::truncate(Uint64 length)
Uint32 part2 = getPartNumber(theLength - 1);
assert(part2 >= part1);
if (part2 > part1 && deleteParts(part1 + 1, part2 - part1) == -1)
- return -1;
+ DBUG_RETURN(-1);
} else {
if (deleteParts(0, getPartCount()) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
theLength = length;
theHeadInlineUpdateFlag = true;
if (thePos > length)
thePos = length;
}
- DBG("truncate [out]");
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::getPos(Uint64& pos)
{
- DBG("getPos");
+ DBUG_ENTER("NdbBlob::getPos");
if (theNullFlag == -1) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
pos = thePos;
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::setPos(Uint64 pos)
{
- DBG("setPos pos=" << pos);
+ DBUG_ENTER("NdbBlob::setPos");
+ DBUG_PRINT("info", ("pos=%llu", pos));
if (theNullFlag == -1) {
setErrorCode(NdbBlobImpl::ErrState);
- return -1;
+ DBUG_RETURN(-1);
}
if (pos > theLength) {
setErrorCode(NdbBlobImpl::ErrSeek);
- return -1;
+ DBUG_RETURN(-1);
}
thePos = pos;
- return 0;
+ DBUG_RETURN(0);
}
// read/write
@@ -648,7 +631,8 @@ NdbBlob::readData(void* data, Uint32& bytes)
int
NdbBlob::readDataPrivate(char* buf, Uint32& bytes)
{
- DBG("readData [in] bytes=" << bytes);
+ DBUG_ENTER("NdbBlob::readDataPrivate");
+ DBUG_PRINT("info", ("bytes=%u", bytes));
assert(thePos <= theLength);
Uint64 pos = thePos;
if (bytes > theLength - pos)
@@ -668,20 +652,20 @@ NdbBlob::readDataPrivate(char* buf, Uint32& bytes)
}
if (len > 0 && thePartSize == 0) {
setErrorCode(NdbBlobImpl::ErrSeek);
- return -1;
+ DBUG_RETURN(-1);
}
if (len > 0) {
assert(pos >= theInlineSize);
Uint32 off = (pos - theInlineSize) % thePartSize;
// partial first block
if (off != 0) {
- DBG("partial first block pos=" << pos << " len=" << len);
+ DBUG_PRINT("info", ("partial first block pos=%llu len=%u", pos, len));
Uint32 part = (pos - theInlineSize) / thePartSize;
if (readParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
// need result now
if (executePendingBlobReads() == -1)
- return -1;
+ DBUG_RETURN(-1);
Uint32 n = thePartSize - off;
if (n > len)
n = len;
@@ -698,7 +682,7 @@ NdbBlob::readDataPrivate(char* buf, Uint32& bytes)
Uint32 part = (pos - theInlineSize) / thePartSize;
Uint32 count = len / thePartSize;
if (readParts(buf, part, count) == -1)
- return -1;
+ DBUG_RETURN(-1);
Uint32 n = thePartSize * count;
pos += n;
buf += n;
@@ -707,14 +691,14 @@ NdbBlob::readDataPrivate(char* buf, Uint32& bytes)
}
if (len > 0) {
// partial last block
- DBG("partial last block pos=" << pos << " len=" << len);
+ DBUG_PRINT("info", ("partial last block pos=%llu len=%u", pos, len));
assert((pos - theInlineSize) % thePartSize == 0 && len < thePartSize);
Uint32 part = (pos - theInlineSize) / thePartSize;
if (readParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
// need result now
if (executePendingBlobReads() == -1)
- return -1;
+ DBUG_RETURN(-1);
memcpy(buf, thePartBuf.data, len);
Uint32 n = len;
pos += n;
@@ -724,8 +708,7 @@ NdbBlob::readDataPrivate(char* buf, Uint32& bytes)
assert(len == 0);
thePos = pos;
assert(thePos <= theLength);
- DBG("readData [out]");
- return 0;
+ DBUG_RETURN(0);
}
int
@@ -742,7 +725,8 @@ NdbBlob::writeData(const void* data, Uint32 bytes)
int
NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
{
- DBG("writeData [in] bytes=" << bytes);
+ DBUG_ENTER("NdbBlob::writeDataPrivate");
+ DBUG_PRINT("info", ("bytes=%u", bytes));
assert(thePos <= theLength);
Uint64 pos = thePos;
Uint32 len = bytes;
@@ -766,23 +750,23 @@ NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
}
if (len > 0 && thePartSize == 0) {
setErrorCode(NdbBlobImpl::ErrSeek);
- return -1;
+ DBUG_RETURN(-1);
}
if (len > 0) {
assert(pos >= theInlineSize);
Uint32 off = (pos - theInlineSize) % thePartSize;
// partial first block
if (off != 0) {
- DBG("partial first block pos=" << pos << " len=" << len);
+ DBUG_PRINT("info", ("partial first block pos=%llu len=%u", pos, len));
// flush writes to guarantee correct read
if (executePendingBlobWrites() == -1)
- return -1;
+ DBUG_RETURN(-1);
Uint32 part = (pos - theInlineSize) / thePartSize;
if (readParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
// need result now
if (executePendingBlobReads() == -1)
- return -1;
+ DBUG_RETURN(-1);
Uint32 n = thePartSize - off;
if (n > len) {
memset(thePartBuf.data + off + len, theFillChar, n - len);
@@ -790,7 +774,7 @@ NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
}
memcpy(thePartBuf.data + off, buf, n);
if (updateParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
pos += n;
buf += n;
len -= n;
@@ -805,10 +789,10 @@ NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
for (unsigned i = 0; i < count; i++) {
if (part + i < getPartCount()) {
if (updateParts(buf, part + i, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
} else {
if (insertParts(buf, part + i, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
Uint32 n = thePartSize;
pos += n;
@@ -819,30 +803,30 @@ NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
}
if (len > 0) {
// partial last block
- DBG("partial last block pos=" << pos << " len=" << len);
+ DBUG_PRINT("info", ("partial last block pos=%llu len=%u", pos, len));
assert((pos - theInlineSize) % thePartSize == 0 && len < thePartSize);
Uint32 part = (pos - theInlineSize) / thePartSize;
if (theLength > pos + len) {
// flush writes to guarantee correct read
if (executePendingBlobWrites() == -1)
- return -1;
+ DBUG_RETURN(-1);
if (readParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
// need result now
if (executePendingBlobReads() == -1)
- return -1;
+ DBUG_RETURN(-1);
memcpy(thePartBuf.data, buf, len);
if (updateParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
} else {
memcpy(thePartBuf.data, buf, len);
memset(thePartBuf.data + len, theFillChar, thePartSize - len);
if (part < getPartCount()) {
if (updateParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
} else {
if (insertParts(thePartBuf.data, part, 1) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
}
Uint32 n = len;
@@ -857,14 +841,14 @@ NdbBlob::writeDataPrivate(const char* buf, Uint32 bytes)
}
thePos = pos;
assert(thePos <= theLength);
- DBG("writeData [out]");
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
{
- DBG("readParts [in] part=" << part << " count=" << count);
+ DBUG_ENTER("NdbBlob::readParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
Uint32 n = 0;
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
@@ -873,21 +857,22 @@ NdbBlob::readParts(char* buf, Uint32 part, Uint32 count)
setPartKeyValue(tOp, part + n) == -1 ||
tOp->getValue((Uint32)3, buf) == NULL) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
- tOp->m_abortOption = AbortOnError;
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
buf += thePartSize;
n++;
thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
theNdbCon->thePendingBlobOps |= (1 << NdbOperation::ReadRequest);
}
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count)
{
- DBG("insertParts [in] part=" << part << " count=" << count);
+ DBUG_ENTER("NdbBlob::insertParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
Uint32 n = 0;
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
@@ -896,21 +881,22 @@ NdbBlob::insertParts(const char* buf, Uint32 part, Uint32 count)
setPartKeyValue(tOp, part + n) == -1 ||
tOp->setValue((Uint32)3, buf) == -1) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
- tOp->m_abortOption = AbortOnError;
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
buf += thePartSize;
n++;
thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
theNdbCon->thePendingBlobOps |= (1 << NdbOperation::InsertRequest);
}
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count)
{
- DBG("updateParts [in] part=" << part << " count=" << count);
+ DBUG_ENTER("NdbBlob::updateParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
Uint32 n = 0;
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
@@ -919,21 +905,22 @@ NdbBlob::updateParts(const char* buf, Uint32 part, Uint32 count)
setPartKeyValue(tOp, part + n) == -1 ||
tOp->setValue((Uint32)3, buf) == -1) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
- tOp->m_abortOption = AbortOnError;
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
buf += thePartSize;
n++;
thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
theNdbCon->thePendingBlobOps |= (1 << NdbOperation::UpdateRequest);
}
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::deleteParts(Uint32 part, Uint32 count)
{
- DBG("deleteParts [in] part=" << part << " count=" << count);
+ DBUG_ENTER("NdbBlob::deleteParts");
+ DBUG_PRINT("info", ("part=%u count=%u", part, count));
Uint32 n = 0;
while (n < count) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theBlobTable);
@@ -941,14 +928,14 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count)
tOp->deleteTuple() == -1 ||
setPartKeyValue(tOp, part + n) == -1) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
- tOp->m_abortOption = AbortOnError;
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
n++;
thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
theNdbCon->thePendingBlobOps |= (1 << NdbOperation::DeleteRequest);
}
- return 0;
+ DBUG_RETURN(0);
}
/*
@@ -958,7 +945,8 @@ NdbBlob::deleteParts(Uint32 part, Uint32 count)
int
NdbBlob::deletePartsUnknown(Uint32 part)
{
- DBG("deletePartsUnknown [in] part=" << part << " count=all");
+ DBUG_ENTER("NdbBlob::deletePartsUnknown");
+ DBUG_PRINT("info", ("part=%u count=all", part));
static const unsigned maxbat = 256;
static const unsigned minbat = 1;
unsigned bat = minbat;
@@ -974,26 +962,25 @@ NdbBlob::deletePartsUnknown(Uint32 part)
tOp->deleteTuple() == -1 ||
setPartKeyValue(tOp, part + count + n) == -1) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
- tOp->m_abortOption = AO_IgnoreError;
+ tOp->m_abortOption= NdbTransaction::AO_IgnoreError;
n++;
}
- DBG("deletePartsUnknown: executeNoBlobs [in] bat=" << bat);
- if (theNdbCon->executeNoBlobs(NoCommit) == -1)
- return -1;
- DBG("deletePartsUnknown: executeNoBlobs [out]");
+ DBUG_PRINT("info", ("bat=%u", bat));
+ if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
+ DBUG_RETURN(-1);
n = 0;
while (n < bat) {
NdbOperation* tOp = tOpList[n];
if (tOp->theError.code != 0) {
if (tOp->theError.code != 626) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
// first non-existent part
- DBG("deletePartsUnknown [out] count=" << count);
- return 0;
+ DBUG_PRINT("info", ("count=%u", count));
+ DBUG_RETURN(0);
}
n++;
count++;
@@ -1009,31 +996,29 @@ NdbBlob::deletePartsUnknown(Uint32 part)
int
NdbBlob::executePendingBlobReads()
{
+ DBUG_ENTER("NdbBlob::executePendingBlobReads");
Uint8 flags = (1 << NdbOperation::ReadRequest);
if (thePendingBlobOps & flags) {
- DBG("executePendingBlobReads: executeNoBlobs [in]");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1)
- return -1;
- DBG("executePendingBlobReads: executeNoBlobs [out]");
+ if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
+ DBUG_RETURN(-1);
thePendingBlobOps = 0;
theNdbCon->thePendingBlobOps = 0;
}
- return 0;
+ DBUG_RETURN(0);
}
int
NdbBlob::executePendingBlobWrites()
{
+ DBUG_ENTER("NdbBlob::executePendingBlobWrites");
Uint8 flags = 0xFF & ~(1 << NdbOperation::ReadRequest);
if (thePendingBlobOps & flags) {
- DBG("executePendingBlobWrites: executeNoBlobs [in]");
- if (theNdbCon->executeNoBlobs(NoCommit) == -1)
- return -1;
- DBG("executePendingBlobWrites: executeNoBlobs [out]");
+ if (theNdbCon->executeNoBlobs(NdbTransaction::NoCommit) == -1)
+ DBUG_RETURN(-1);
thePendingBlobOps = 0;
theNdbCon->thePendingBlobOps = 0;
}
- return 0;
+ DBUG_RETURN(0);
}
// callbacks
@@ -1041,15 +1026,14 @@ NdbBlob::executePendingBlobWrites()
int
NdbBlob::invokeActiveHook()
{
- DBG("invokeActiveHook [in]");
+ DBUG_ENTER("NdbBlob::invokeActiveHook");
assert(theState == Active && theActiveHook != NULL);
int ret = (*theActiveHook)(this, theActiveHookArg);
- DBG("invokeActiveHook [out] ret=" << ret);
if (ret != 0) {
// no error is set on blob level
- return -1;
+ DBUG_RETURN(-1);
}
- return 0;
+ DBUG_RETURN(0);
}
// blob handle maintenance
@@ -1060,8 +1044,10 @@ NdbBlob::invokeActiveHook()
* data. For read operation adds read of head+inline.
*/
int
-NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn)
+NdbBlob::atPrepare(NdbTransaction* aCon, NdbOperation* anOp, const NdbColumnImpl* aColumn)
{
+ DBUG_ENTER("NdbBlob::atPrepare");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, anOp, aCon));
assert(theState == Idle);
// ndb api stuff
theNdb = anOp->theNdb;
@@ -1070,7 +1056,6 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
theTable = anOp->m_currentTable;
theAccessTable = anOp->m_accessTable;
theColumn = aColumn;
- DBG("atPrepare [in]");
NdbDictionary::Column::Type partType = NdbDictionary::Column::Undefined;
switch (theColumn->getType()) {
case NdbDictionary::Column::Blob:
@@ -1083,7 +1068,7 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
break;
default:
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
// sizes
theInlineSize = theColumn->getInlineSize();
@@ -1101,13 +1086,13 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
bc->getType() != partType ||
bc->getLength() != (int)thePartSize) {
setErrorCode(NdbBlobImpl::ErrTable);
- return -1;
+ DBUG_RETURN(-1);
}
theBlobTable = &NdbTableImpl::getImpl(*bt);
}
// buffers
- theKeyBuf.alloc(theTable->m_sizeOfKeysInWords << 2);
- theAccessKeyBuf.alloc(theAccessTable->m_sizeOfKeysInWords << 2);
+ theKeyBuf.alloc(theTable->m_keyLenInWords << 2);
+ theAccessKeyBuf.alloc(theAccessTable->m_keyLenInWords << 2);
theHeadInlineBuf.alloc(sizeof(Head) + theInlineSize);
theHeadInlineCopyBuf.alloc(sizeof(Head) + theInlineSize);
thePartBuf.alloc(thePartSize);
@@ -1119,25 +1104,25 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
if (isTableOp()) {
// get table key
Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_sizeOfKeysInWords;
+ unsigned size = theTable->m_keyLenInWords;
if (theNdbOp->getKeyFromTCREQ(data, size) == -1) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
}
if (isIndexOp()) {
// get index key
Uint32* data = (Uint32*)theAccessKeyBuf.data;
- unsigned size = theAccessTable->m_sizeOfKeysInWords;
+ unsigned size = theAccessTable->m_keyLenInWords;
if (theNdbOp->getKeyFromTCREQ(data, size) == -1) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
}
if (isReadOp()) {
// add read of head+inline in this op
if (getHeadInlineValue(theNdbOp) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
if (isInsertOp()) {
// becomes NULL unless set before execute
@@ -1155,16 +1140,15 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
if (isScanOp()) {
// add read of head+inline in this op
if (getHeadInlineValue(theNdbOp) == -1)
- return -1;
+ DBUG_RETURN(-1);
supportedOp = true;
}
if (! supportedOp) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
setState(Prepared);
- DBG("atPrepare [out]");
- return 0;
+ DBUG_RETURN(0);
}
/*
@@ -1175,11 +1159,12 @@ NdbBlob::atPrepare(NdbConnection* aCon, NdbOperation* anOp, const NdbColumnImpl*
* back after postExecute.
*/
int
-NdbBlob::preExecute(ExecType anExecType, bool& batch)
+NdbBlob::preExecute(NdbTransaction::ExecType anExecType, bool& batch)
{
- DBG("preExecute [in]");
+ DBUG_ENTER("NdbBlob::preExecute");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
if (theState == Invalid)
- return -1;
+ DBUG_RETURN(-1);
assert(theState == Prepared);
// handle different operation types
assert(isKeyOp());
@@ -1197,7 +1182,7 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
Uint32 bytes = theGetSetBytes - theInlineSize;
assert(thePos == theInlineSize);
if (writeDataPrivate(buf, bytes) == -1)
- return -1;
+ DBUG_RETURN(-1);
if (theHeadInlineUpdateFlag) {
// add an operation to update head+inline
NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
@@ -1206,9 +1191,9 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
setTableKeyValue(tOp) == -1 ||
setHeadInlineValue(tOp) == -1) {
setErrorCode(NdbBlobImpl::ErrAbort);
- return -1;
+ DBUG_RETURN(-1);
}
- DBG("add op to update head+inline");
+ DBUG_PRINT("info", ("add op to update head+inline"));
}
}
}
@@ -1221,15 +1206,15 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
setTableKeyValue(tOp) == -1 ||
getHeadInlineValue(tOp) == -1) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
if (isWriteOp()) {
- tOp->m_abortOption = AO_IgnoreError;
+ tOp->m_abortOption = NdbTransaction::AO_IgnoreError;
}
theHeadInlineReadOp = tOp;
// execute immediately
batch = true;
- DBG("add op before to read head+inline");
+ DBUG_PRINT("info", ("add op before to read head+inline"));
}
}
if (isIndexOp()) {
@@ -1245,7 +1230,7 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
setAccessKeyValue(tOp) == -1 ||
tOp->getValue(pkAttrId, theKeyBuf.data) == NULL) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
} else {
NdbIndexOperation* tOp = theNdbCon->getNdbIndexOperation(theAccessTable->m_index, theTable, theNdbOp);
@@ -1254,11 +1239,11 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
setAccessKeyValue(tOp) == -1 ||
getTableKeyValue(tOp) == -1) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
}
}
- DBG("added op before to read table key");
+ DBUG_PRINT("info", ("added op before to read table key"));
if (isUpdateOp() || isDeleteOp()) {
// add op before this one to read head+inline via index
NdbIndexOperation* tOp = theNdbCon->getNdbIndexOperation(theAccessTable->m_index, theTable, theNdbOp);
@@ -1267,15 +1252,15 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
setAccessKeyValue(tOp) == -1 ||
getHeadInlineValue(tOp) == -1) {
setErrorCode(tOp);
- return -1;
+ DBUG_RETURN(-1);
}
if (isWriteOp()) {
- tOp->m_abortOption = AO_IgnoreError;
+ tOp->m_abortOption = NdbTransaction::AO_IgnoreError;
}
theHeadInlineReadOp = tOp;
// execute immediately
batch = true;
- DBG("added index op before to read head+inline");
+ DBUG_PRINT("info", ("added index op before to read head+inline"));
}
if (isWriteOp()) {
// XXX until IgnoreError fixed for index op
@@ -1293,10 +1278,10 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
n = theInlineSize;
assert(thePos == 0);
if (writeDataPrivate(theSetBuf, n) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
if (setHeadInlineValue(theNdbOp) == -1)
- return -1;
+ DBUG_RETURN(-1);
// the read op before us may overwrite
theHeadInlineCopyBuf.copyfrom(theHeadInlineBuf);
}
@@ -1305,8 +1290,8 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
// need blob head for callback
batch = true;
}
- DBG("preExecute [out] batch=" << batch);
- return 0;
+ DBUG_PRINT("info", ("batch=%u", batch));
+ DBUG_RETURN(0);
}
/*
@@ -1316,18 +1301,19 @@ NdbBlob::preExecute(ExecType anExecType, bool& batch)
* any remaining prepared operations.
*/
int
-NdbBlob::postExecute(ExecType anExecType)
+NdbBlob::postExecute(NdbTransaction::ExecType anExecType)
{
- DBG("postExecute [in] type=" << anExecType);
+ DBUG_ENTER("NdbBlob::postExecute");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p anExecType=%u", this, theNdbOp, theNdbCon, anExecType));
if (theState == Invalid)
- return -1;
+ DBUG_RETURN(-1);
if (theState == Active) {
- setState(anExecType == NoCommit ? Active : Closed);
- DBG("postExecute [skip]");
- return 0;
+ setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
+ DBUG_PRINT("info", ("skip active"));
+ DBUG_RETURN(0);
}
assert(theState == Prepared);
- setState(anExecType == NoCommit ? Active : Closed);
+ setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
assert(isKeyOp());
if (isIndexOp()) {
NdbBlob* tFirstBlob = theNdbOp->theBlobList;
@@ -1340,42 +1326,43 @@ NdbBlob::postExecute(ExecType anExecType)
if (isReadOp()) {
getHeadFromRecAttr();
if (setPos(0) == -1)
- return -1;
+ DBUG_RETURN(-1);
if (theGetFlag) {
assert(theGetSetBytes == 0 || theGetBuf != 0);
- assert(theGetSetBytes <= theInlineSize || anExecType == NoCommit);
+ assert(theGetSetBytes <= theInlineSize ||
+ anExecType == NdbTransaction::NoCommit);
Uint32 bytes = theGetSetBytes;
if (readDataPrivate(theGetBuf, bytes) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
}
if (isUpdateOp()) {
- assert(anExecType == NoCommit);
+ assert(anExecType == NdbTransaction::NoCommit);
getHeadFromRecAttr();
if (theSetFlag) {
// setValue overwrites everything
if (theSetBuf != NULL) {
if (truncate(0) == -1)
- return -1;
+ DBUG_RETURN(-1);
assert(thePos == 0);
if (writeDataPrivate(theSetBuf, theGetSetBytes) == -1)
- return -1;
+ DBUG_RETURN(-1);
} else {
if (setNull() == -1)
- return -1;
+ DBUG_RETURN(-1);
}
}
}
if (isWriteOp() && isTableOp()) {
- assert(anExecType == NoCommit);
+ assert(anExecType == NdbTransaction::NoCommit);
if (theHeadInlineReadOp->theError.code == 0) {
int tNullFlag = theNullFlag;
Uint64 tLength = theLength;
Uint64 tPos = thePos;
getHeadFromRecAttr();
- DBG("tuple found");
+ DBUG_PRINT("info", ("tuple found"));
if (truncate(0) == -1)
- return -1;
+ DBUG_RETURN(-1);
// restore previous head+inline
theHeadInlineBuf.copyfrom(theHeadInlineCopyBuf);
theNullFlag = tNullFlag;
@@ -1384,16 +1371,16 @@ NdbBlob::postExecute(ExecType anExecType)
} else {
if (theHeadInlineReadOp->theError.code != 626) {
setErrorCode(theHeadInlineReadOp);
- return -1;
+ DBUG_RETURN(-1);
}
- DBG("tuple not found");
+ DBUG_PRINT("info", ("tuple not found"));
/*
* Read found no tuple but it is possible that a tuple was
* created after the read by another transaction. Delete all
* blob parts which may exist.
*/
if (deletePartsUnknown(0) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
if (theSetFlag && theGetSetBytes > theInlineSize) {
assert(theSetBuf != NULL);
@@ -1401,48 +1388,47 @@ NdbBlob::postExecute(ExecType anExecType)
Uint32 bytes = theGetSetBytes - theInlineSize;
assert(thePos == theInlineSize);
if (writeDataPrivate(buf, bytes) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
}
if (isWriteOp() && isIndexOp()) {
// XXX until IgnoreError fixed for index op
if (deletePartsUnknown(0) == -1)
- return -1;
+ DBUG_RETURN(-1);
if (theSetFlag && theGetSetBytes > theInlineSize) {
assert(theSetBuf != NULL);
const char* buf = theSetBuf + theInlineSize;
Uint32 bytes = theGetSetBytes - theInlineSize;
assert(thePos == theInlineSize);
if (writeDataPrivate(buf, bytes) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
}
if (isDeleteOp()) {
- assert(anExecType == NoCommit);
+ assert(anExecType == NdbTransaction::NoCommit);
getHeadFromRecAttr();
if (deleteParts(0, getPartCount()) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
- setState(anExecType == NoCommit ? Active : Closed);
+ setState(anExecType == NdbTransaction::NoCommit ? Active : Closed);
// activation callback
if (theActiveHook != NULL) {
if (invokeActiveHook() == -1)
- return -1;
+ DBUG_RETURN(-1);
}
- if (anExecType == NoCommit && theHeadInlineUpdateFlag) {
+ if (anExecType == NdbTransaction::NoCommit && theHeadInlineUpdateFlag) {
NdbOperation* tOp = theNdbCon->getNdbOperation(theTable);
if (tOp == NULL ||
tOp->updateTuple() == -1 ||
setTableKeyValue(tOp) == -1 ||
setHeadInlineValue(tOp) == -1) {
setErrorCode(NdbBlobImpl::ErrAbort);
- return -1;
+ DBUG_RETURN(-1);
}
- tOp->m_abortOption = AbortOnError;
- DBG("added op to update head+inline");
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ DBUG_PRINT("info", ("added op to update head+inline"));
}
- DBG("postExecute [out]");
- return 0;
+ DBUG_RETURN(0);
}
/*
@@ -1452,9 +1438,10 @@ NdbBlob::postExecute(ExecType anExecType)
int
NdbBlob::preCommit()
{
- DBG("preCommit [in]");
+ DBUG_ENTER("NdbBlob::preCommit");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
if (theState == Invalid)
- return -1;
+ DBUG_RETURN(-1);
assert(theState == Active);
assert(isKeyOp());
if (isInsertOp() || isUpdateOp() || isWriteOp()) {
@@ -1466,14 +1453,13 @@ NdbBlob::preCommit()
setTableKeyValue(tOp) == -1 ||
setHeadInlineValue(tOp) == -1) {
setErrorCode(NdbBlobImpl::ErrAbort);
- return -1;
+ DBUG_RETURN(-1);
}
- tOp->m_abortOption = AbortOnError;
- DBG("added op to update head+inline");
+ tOp->m_abortOption = NdbTransaction::AbortOnError;
+ DBUG_PRINT("info", ("added op to update head+inline"));
}
}
- DBG("preCommit [out]");
- return 0;
+ DBUG_RETURN(0);
}
/*
@@ -1482,35 +1468,35 @@ NdbBlob::preCommit()
int
NdbBlob::atNextResult()
{
- DBG("atNextResult [in]");
+ DBUG_ENTER("NdbBlob::atNextResult");
+ DBUG_PRINT("info", ("this=%p op=%p con=%p", this, theNdbOp, theNdbCon));
if (theState == Invalid)
- return -1;
+ DBUG_RETURN(-1);
assert(isScanOp());
// get primary key
{ Uint32* data = (Uint32*)theKeyBuf.data;
- unsigned size = theTable->m_sizeOfKeysInWords;
+ unsigned size = theTable->m_keyLenInWords;
if (((NdbScanOperation*)theNdbOp)->getKeyFromKEYINFO20(data, size) == -1) {
setErrorCode(NdbBlobImpl::ErrUsage);
- return -1;
+ DBUG_RETURN(-1);
}
}
getHeadFromRecAttr();
if (setPos(0) == -1)
- return -1;
+ DBUG_RETURN(-1);
if (theGetFlag) {
assert(theGetSetBytes == 0 || theGetBuf != 0);
Uint32 bytes = theGetSetBytes;
if (readDataPrivate(theGetBuf, bytes) == -1)
- return -1;
+ DBUG_RETURN(-1);
}
setState(Active);
// activation callback
if (theActiveHook != NULL) {
if (invokeActiveHook() == -1)
- return -1;
+ DBUG_RETURN(-1);
}
- DBG("atNextResult [out]");
- return 0;
+ DBUG_RETURN(0);
}
// misc
@@ -1526,13 +1512,15 @@ NdbBlob::getColumn()
void
NdbBlob::setErrorCode(int anErrorCode, bool invalidFlag)
{
- DBG("setErrorCode code=" << anErrorCode);
+ DBUG_ENTER("NdbBlob::setErrorCode");
+ DBUG_PRINT("info", ("this=%p code=%u", this, anErrorCode));
theError.code = anErrorCode;
// conditionally copy error to operation level
if (theNdbOp != NULL && theNdbOp->theError.code == 0)
theNdbOp->setErrorCode(theError.code);
if (invalidFlag)
setState(Invalid);
+ DBUG_VOID_RETURN;
}
void
@@ -1551,7 +1539,7 @@ NdbBlob::setErrorCode(NdbOperation* anOp, bool invalidFlag)
}
void
-NdbBlob::setErrorCode(NdbConnection* aCon, bool invalidFlag)
+NdbBlob::setErrorCode(NdbTransaction* aCon, bool invalidFlag)
{
int code = 0;
if (theNdbCon != NULL && (code = theNdbCon->theError.code) != 0)
diff --git a/ndb/src/ndbapi/NdbCursorOperation.cpp b/ndb/src/ndbapi/NdbCursorOperation.cpp
deleted file mode 100644
index a9f84c4c110..00000000000
--- a/ndb/src/ndbapi/NdbCursorOperation.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*****************************************************************************
- * Name: NdbCursorOperation.cpp
- * Include:
- * Link:
- * Author: UABMASD Martin Sköld INN/V Alzato
- * Date: 2002-04-01
- * Version: 0.1
- * Description: Cursor support
- * Documentation:
- * Adjust: 2002-04-01 UABMASD First version.
- ****************************************************************************/
-
-#include <NdbCursorOperation.hpp>
-#include <NdbResultSet.hpp>
-
-NdbCursorOperation::NdbCursorOperation(Ndb* aNdb) :
-{
-}
-
-NdbCursorOperation::~NdbCursorOperation()
-{
- if (m_resultSet)
- delete m_resultSet;
-}
-
-void NdbCursorOperation::cursInit()
-{
- // Initialize result set
-}
-
-NdbResultSet* NdbCursorOperation::getResultSet()
-{
-}
-
-
diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp
index 00db5704949..79b6fb4c0e8 100644
--- a/ndb/src/ndbapi/NdbDictionary.cpp
+++ b/ndb/src/ndbapi/NdbDictionary.cpp
@@ -176,52 +176,16 @@ NdbDictionary::Column::getPrimaryKey() const {
return m_impl.m_pk;
}
-void
-NdbDictionary::Column::setTupleKey(bool val){
- m_impl.m_tupleKey = val;
-}
-
-bool
-NdbDictionary::Column::getTupleKey() const {
- return m_impl.m_tupleKey;
-}
-
void
-NdbDictionary::Column::setDistributionKey(bool val){
+NdbDictionary::Column::setPartitionKey(bool val){
m_impl.m_distributionKey = val;
}
bool
-NdbDictionary::Column::getDistributionKey() const{
+NdbDictionary::Column::getPartitionKey() const{
return m_impl.m_distributionKey;
}
-void
-NdbDictionary::Column::setDistributionGroup(bool val, int bits){
- m_impl.m_distributionGroup = val;
- m_impl.m_distributionGroupBits = bits;
-}
-
-bool
-NdbDictionary::Column::getDistributionGroup() const {
- return m_impl.m_distributionGroup;
-}
-
-int
-NdbDictionary::Column::getDistributionGroupBits() const{
- return m_impl.m_distributionGroupBits;
-}
-
-void
-NdbDictionary::Column::setIndexOnlyStorage(bool val){
- m_impl.m_indexOnly = val;
-}
-
-bool
-NdbDictionary::Column::getIndexOnlyStorage() const {
- return m_impl.m_indexOnly;
-}
-
const NdbDictionary::Table *
NdbDictionary::Column::getBlobTable() const {
NdbTableImpl * t = m_impl.m_blobTable;
@@ -267,6 +231,12 @@ NdbDictionary::Column::equal(const NdbDictionary::Column & col) const {
return m_impl.equal(col.m_impl);
}
+int
+NdbDictionary::Column::getSizeInBytes() const
+{
+ return m_impl.m_attrSize * m_impl.m_arraySize;
+}
+
/*****************************************************************
* Table facade
*/
@@ -462,13 +432,17 @@ NdbDictionary::Table::getRowSizeInBytes() const {
int sz = 0;
for(int i = 0; i<getNoOfColumns(); i++){
const NdbDictionary::Column * c = getColumn(i);
- const NdbColumnImpl & col = NdbColumnImpl::getImpl(* c);
- sz += (((col.m_attrSize * col.m_arraySize) + 3) / 4);
+ sz += (c->getSizeInBytes()+ 3) / 4;
}
return sz * 4;
}
int
+NdbDictionary::Table::getReplicaCount() const {
+ return m_impl.m_replicaCount;
+}
+
+int
NdbDictionary::Table::createTableInDb(Ndb* pNdb, bool equalOk) const {
const NdbDictionary::Table * pTab =
pNdb->getDictionary()->getTable(getName());
@@ -621,6 +595,13 @@ NdbDictionary::Event::Event(const char * name)
setName(name);
}
+NdbDictionary::Event::Event(const char * name, const Table& table)
+ : m_impl(* new NdbEventImpl(* this))
+{
+ setName(name);
+ setTable(table);
+}
+
NdbDictionary::Event::Event(NdbEventImpl & impl)
: m_impl(impl)
{
@@ -640,12 +621,30 @@ NdbDictionary::Event::setName(const char * name)
m_impl.setName(name);
}
+const char *
+NdbDictionary::Event::getName() const
+{
+ return m_impl.getName();
+}
+
+void
+NdbDictionary::Event::setTable(const Table& table)
+{
+ m_impl.setTable(table);
+}
+
void
NdbDictionary::Event::setTable(const char * table)
{
m_impl.setTable(table);
}
+const char*
+NdbDictionary::Event::getTableName() const
+{
+ return m_impl.getTableName();
+}
+
void
NdbDictionary::Event::addTableEvent(const TableEvent t)
{
@@ -653,11 +652,17 @@ NdbDictionary::Event::addTableEvent(const TableEvent t)
}
void
-NdbDictionary::Event::setDurability(const EventDurability d)
+NdbDictionary::Event::setDurability(EventDurability d)
{
m_impl.setDurability(d);
}
+NdbDictionary::Event::EventDurability
+NdbDictionary::Event::getDurability() const
+{
+ return m_impl.getDurability();
+}
+
void
NdbDictionary::Event::addColumn(const Column & c){
NdbColumnImpl* col = new NdbColumnImpl;
@@ -685,6 +690,11 @@ NdbDictionary::Event::addEventColumns(int n, const char ** names)
addEventColumn(names[i]);
}
+int NdbDictionary::Event::getNoOfEventColumns() const
+{
+ return m_impl.getNoOfEventColumns();
+}
+
NdbDictionary::Object::Status
NdbDictionary::Event::getObjectStatus() const
{
@@ -742,7 +752,8 @@ NdbDictionary::Dictionary::alterTable(const Table & t){
}
const NdbDictionary::Table *
-NdbDictionary::Dictionary::getTable(const char * name, void **data){
+NdbDictionary::Dictionary::getTable(const char * name, void **data) const
+{
NdbTableImpl * t = m_impl.getTable(name, data);
if(t)
return t->m_facade;
@@ -755,15 +766,18 @@ void NdbDictionary::Dictionary::set_local_table_data_size(unsigned sz)
}
const NdbDictionary::Table *
-NdbDictionary::Dictionary::getTable(const char * name){
+NdbDictionary::Dictionary::getTable(const char * name) const
+{
return getTable(name, 0);
}
void
NdbDictionary::Dictionary::invalidateTable(const char * name){
+ DBUG_ENTER("NdbDictionaryImpl::invalidateTable");
NdbTableImpl * t = m_impl.getTable(name);
if(t)
m_impl.invalidateObject(* t);
+ DBUG_VOID_RETURN;
}
void
@@ -788,7 +802,7 @@ NdbDictionary::Dictionary::dropIndex(const char * indexName,
const NdbDictionary::Index *
NdbDictionary::Dictionary::getIndex(const char * indexName,
- const char * tableName)
+ const char * tableName) const
{
NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
if(i)
@@ -799,11 +813,13 @@ NdbDictionary::Dictionary::getIndex(const char * indexName,
void
NdbDictionary::Dictionary::invalidateIndex(const char * indexName,
const char * tableName){
+ DBUG_ENTER("NdbDictionaryImpl::invalidateIndex");
NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
if(i) {
assert(i->m_table != 0);
m_impl.invalidateObject(* i->m_table);
}
+ DBUG_VOID_RETURN;
}
void
@@ -818,7 +834,7 @@ NdbDictionary::Dictionary::removeCachedIndex(const char * indexName,
const NdbDictionary::Table *
NdbDictionary::Dictionary::getIndexTable(const char * indexName,
- const char * tableName)
+ const char * tableName) const
{
NdbIndexImpl * i = m_impl.getIndex(indexName, tableName);
NdbTableImpl * t = m_impl.getTable(tableName);
@@ -858,6 +874,12 @@ NdbDictionary::Dictionary::listObjects(List& list, Object::Type type)
}
int
+NdbDictionary::Dictionary::listObjects(List& list, Object::Type type) const
+{
+ return m_impl.listObjects(list, type);
+}
+
+int
NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName)
{
const NdbDictionary::Table* tab= getTable(tableName);
@@ -868,6 +890,18 @@ NdbDictionary::Dictionary::listIndexes(List& list, const char * tableName)
return m_impl.listIndexes(list, tab->getTableId());
}
+int
+NdbDictionary::Dictionary::listIndexes(List& list,
+ const char * tableName) const
+{
+ const NdbDictionary::Table* tab= getTable(tableName);
+ if(tab == 0)
+ {
+ return -1;
+ }
+ return m_impl.listIndexes(list, tab->getTableId());
+}
+
const struct NdbError &
NdbDictionary::Dictionary::getNdbError() const {
return m_impl.getNdbError();
@@ -924,6 +958,12 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
case NdbDictionary::Column::Olddecimalunsigned:
out << "Olddecimalunsigned(" << col.getPrecision() << "," << col.getScale() << ")";
break;
+ case NdbDictionary::Column::Decimal:
+ out << "Decimal(" << col.getPrecision() << "," << col.getScale() << ")";
+ break;
+ case NdbDictionary::Column::Decimalunsigned:
+ out << "Decimalunsigned(" << col.getPrecision() << "," << col.getScale() << ")";
+ break;
case NdbDictionary::Column::Char:
out << "Char(" << col.getLength() << ";" << csname << ")";
break;
@@ -962,10 +1002,37 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
case NdbDictionary::Column::Undefined:
out << "Undefined";
break;
+ case NdbDictionary::Column::Bit:
+ out << "Bit(" << col.getLength() << ")";
+ break;
+ case NdbDictionary::Column::Longvarchar:
+ out << "Longvarchar(" << col.getLength() << ";" << csname << ")";
+ break;
+ case NdbDictionary::Column::Longvarbinary:
+ out << "Longvarbinary(" << col.getLength() << ")";
+ break;
default:
out << "Type" << (Uint32)col.getType();
break;
}
+ // show unusual (non-MySQL) array size
+ if (col.getLength() != 1) {
+ switch (col.getType()) {
+ case NdbDictionary::Column::Char:
+ case NdbDictionary::Column::Varchar:
+ case NdbDictionary::Column::Binary:
+ case NdbDictionary::Column::Varbinary:
+ case NdbDictionary::Column::Blob:
+ case NdbDictionary::Column::Text:
+ case NdbDictionary::Column::Bit:
+ case NdbDictionary::Column::Longvarchar:
+ case NdbDictionary::Column::Longvarbinary:
+ break;
+ default:
+ out << " [" << col.getLength() << "]";
+ break;
+ }
+ }
if (col.getPrimaryKey())
out << " PRIMARY KEY";
else if (! col.getNullable())
@@ -973,13 +1040,15 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col)
else
out << " NULL";
- if (col.getDistributionKey())
+ if(col.getDistributionKey())
out << " DISTRIBUTION KEY";
-
+
return out;
}
const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0;
+const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_MEMORY = 0;
const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0;
const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0;
-
+const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0;
+const NdbDictionary::Column * NdbDictionary::Column::RANGE_NO = 0;
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
index 8b1847502d9..8f37c9fe4a0 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp
@@ -70,16 +70,11 @@ NdbColumnImpl::operator=(const NdbColumnImpl& col)
m_scale = col.m_scale;
m_length = col.m_length;
m_pk = col.m_pk;
- m_tupleKey = col.m_tupleKey;
m_distributionKey = col.m_distributionKey;
- m_distributionGroup = col.m_distributionGroup;
- m_distributionGroupBits = col.m_distributionGroupBits;
m_nullable = col.m_nullable;
- m_indexOnly = col.m_indexOnly;
m_autoIncrement = col.m_autoIncrement;
m_autoIncrementInitialValue = col.m_autoIncrementInitialValue;
m_defaultValue = col.m_defaultValue;
- m_attrType = col.m_attrType;
m_attrSize = col.m_attrSize;
m_arraySize = col.m_arraySize;
m_keyInfoPos = col.m_keyInfoPos;
@@ -116,6 +111,8 @@ NdbColumnImpl::init(Type t)
break;
case Olddecimal:
case Olddecimalunsigned:
+ case Decimal:
+ case Decimalunsigned:
m_precision = 10;
m_scale = 0;
m_length = 1;
@@ -157,17 +154,32 @@ NdbColumnImpl::init(Type t)
m_length = 1;
m_cs = NULL;
break;
+ case Bit:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1;
+ m_cs = NULL;
+ break;
+ case Longvarchar:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1; // legal
+ m_cs = default_cs;
+ break;
+ case Longvarbinary:
+ m_precision = 0;
+ m_scale = 0;
+ m_length = 1; // legal
+ m_cs = NULL;
+ break;
+ default:
case Undefined:
assert(false);
break;
}
m_pk = false;
m_nullable = false;
- m_tupleKey = false;
- m_indexOnly = false;
m_distributionKey = false;
- m_distributionGroup = false;
- m_distributionGroupBits = 8;
m_keyInfoPos = 0;
// next 2 are set at run time
m_attrSize = 0;
@@ -184,50 +196,40 @@ NdbColumnImpl::~NdbColumnImpl()
bool
NdbColumnImpl::equal(const NdbColumnImpl& col) const
{
+ DBUG_ENTER("NdbColumnImpl::equal");
if(strcmp(m_name.c_str(), col.m_name.c_str()) != 0){
- return false;
+ DBUG_RETURN(false);
}
if(m_type != col.m_type){
- return false;
+ DBUG_RETURN(false);
}
if(m_pk != col.m_pk){
- return false;
+ DBUG_RETURN(false);
}
if(m_nullable != col.m_nullable){
- return false;
+ DBUG_RETURN(false);
}
+#ifdef ndb_dictionary_dkey_fixed
if(m_pk){
- if(m_tupleKey != col.m_tupleKey){
- return false;
- }
- if(m_indexOnly != col.m_indexOnly){
- return false;
- }
if(m_distributionKey != col.m_distributionKey){
- return false;
- }
- if(m_distributionGroup != col.m_distributionGroup){
- return false;
- }
- if(m_distributionGroup &&
- (m_distributionGroupBits != col.m_distributionGroupBits)){
- return false;
+ DBUG_RETURN(false);
}
}
+#endif
if (m_precision != col.m_precision ||
m_scale != col.m_scale ||
m_length != col.m_length ||
m_cs != col.m_cs) {
- return false;
+ DBUG_RETURN(false);
}
if (m_autoIncrement != col.m_autoIncrement){
- return false;
+ DBUG_RETURN(false);
}
if(strcmp(m_defaultValue.c_str(), col.m_defaultValue.c_str()) != 0){
- return false;
+ DBUG_RETURN(false);
}
- return true;
+ DBUG_RETURN(true);
}
NdbDictionary::Column *
@@ -239,6 +241,11 @@ NdbColumnImpl::create_psuedo(const char * name){
col->m_impl.m_attrId = AttributeHeader::FRAGMENT;
col->m_impl.m_attrSize = 4;
col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$FRAGMENT_MEMORY")){
+ col->setType(NdbDictionary::Column::Bigunsigned);
+ col->m_impl.m_attrId = AttributeHeader::FRAGMENT_MEMORY;
+ col->m_impl.m_attrSize = 8;
+ col->m_impl.m_arraySize = 1;
} else if(!strcmp(name, "NDB$ROW_COUNT")){
col->setType(NdbDictionary::Column::Bigunsigned);
col->m_impl.m_attrId = AttributeHeader::ROW_COUNT;
@@ -249,6 +256,16 @@ NdbColumnImpl::create_psuedo(const char * name){
col->m_impl.m_attrId = AttributeHeader::COMMIT_COUNT;
col->m_impl.m_attrSize = 8;
col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$ROW_SIZE")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::ROW_SIZE;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 1;
+ } else if(!strcmp(name, "NDB$RANGE_NO")){
+ col->setType(NdbDictionary::Column::Unsigned);
+ col->m_impl.m_attrId = AttributeHeader::RANGE_NO;
+ col->m_impl.m_attrSize = 4;
+ col->m_impl.m_arraySize = 1;
} else {
abort();
}
@@ -283,69 +300,86 @@ NdbTableImpl::~NdbTableImpl()
void
NdbTableImpl::init(){
- clearNewProperties();
+ m_changeMask= 0;
+ m_tableId= RNIL;
m_frm.clear();
- m_fragmentType = NdbDictionary::Object::FragAllSmall;
- m_logging = true;
- m_kvalue = 6;
- m_minLoadFactor = 78;
- m_maxLoadFactor = 80;
-
- m_index = 0;
- m_indexType = NdbDictionary::Index::Undefined;
-
- m_noOfKeys = 0;
- m_fragmentCount = 0;
- m_sizeOfKeysInWords = 0;
- m_noOfBlobs = 0;
+ m_fragmentType= NdbDictionary::Object::FragAllSmall;
+ m_hashValueMask= 0;
+ m_hashpointerValue= 0;
+ m_logging= true;
+ m_kvalue= 6;
+ m_minLoadFactor= 78;
+ m_maxLoadFactor= 80;
+ m_keyLenInWords= 0;
+ m_fragmentCount= 0;
+ m_dictionary= NULL;
+ m_index= NULL;
+ m_indexType= NdbDictionary::Index::Undefined;
+ m_noOfKeys= 0;
+ m_noOfDistributionKeys= 0;
+ m_noOfBlobs= 0;
+ m_replicaCount= 0;
}
bool
NdbTableImpl::equal(const NdbTableImpl& obj) const
{
+ DBUG_ENTER("NdbTableImpl::equal");
if ((m_internalName.c_str() == NULL) ||
(strcmp(m_internalName.c_str(), "") == 0) ||
(obj.m_internalName.c_str() == NULL) ||
(strcmp(obj.m_internalName.c_str(), "") == 0)) {
// Shallow equal
if(strcmp(getName(), obj.getName()) != 0){
- return false;
+ DBUG_PRINT("info",("name %s != %s",getName(),obj.getName()));
+ DBUG_RETURN(false);
}
} else
// Deep equal
if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){
- return false;
+ {
+ DBUG_PRINT("info",("m_internalName %s != %s",
+ m_internalName.c_str(),obj.m_internalName.c_str()));
+ DBUG_RETURN(false);
+ }
}
if(m_fragmentType != obj.m_fragmentType){
- return false;
+ DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,obj.m_fragmentType));
+ DBUG_RETURN(false);
}
if(m_columns.size() != obj.m_columns.size()){
- return false;
+ DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),obj.m_columns.size()));
+ DBUG_RETURN(false);
}
for(unsigned i = 0; i<obj.m_columns.size(); i++){
if(!m_columns[i]->equal(* obj.m_columns[i])){
- return false;
+ DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i));
+ DBUG_RETURN(false);
}
}
if(m_logging != obj.m_logging){
- return false;
+ DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging));
+ DBUG_RETURN(false);
}
if(m_kvalue != obj.m_kvalue){
- return false;
+ DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue));
+ DBUG_RETURN(false);
}
if(m_minLoadFactor != obj.m_minLoadFactor){
- return false;
+ DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,obj.m_minLoadFactor));
+ DBUG_RETURN(false);
}
if(m_maxLoadFactor != obj.m_maxLoadFactor){
- return false;
+ DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,obj.m_maxLoadFactor));
+ DBUG_RETURN(false);
}
- return true;
+ DBUG_RETURN(true);
}
void
@@ -375,8 +409,9 @@ NdbTableImpl::assign(const NdbTableImpl& org)
delete m_index;
m_index = org.m_index;
+ m_noOfDistributionKeys = org.m_noOfDistributionKeys;
m_noOfKeys = org.m_noOfKeys;
- m_sizeOfKeysInWords = org.m_sizeOfKeysInWords;
+ m_keyLenInWords = org.m_keyLenInWords;
m_noOfBlobs = org.m_noOfBlobs;
m_version = org.m_version;
@@ -397,19 +432,6 @@ NdbTableImpl::getName() const
return m_newExternalName.c_str();
}
-void NdbTableImpl::clearNewProperties()
-{
- m_newExternalName.assign("");
- m_changeMask = 0;
-}
-
-void NdbTableImpl::copyNewProperties()
-{
- if (!m_newExternalName.empty()) {
- m_externalName.assign(m_newExternalName);
- AlterTableReq::setNameFlag(m_changeMask, true);
- }
-}
void
NdbTableImpl::buildColumnHash(){
@@ -477,6 +499,26 @@ NdbTableImpl::buildColumnHash(){
}
#endif
}
+
+Uint32
+NdbTableImpl::get_nodes(Uint32 hashValue, const Uint16 ** nodes) const
+{
+ if(m_replicaCount > 0)
+ {
+ Uint32 fragmentId = hashValue & m_hashValueMask;
+ if(fragmentId < m_hashpointerValue)
+ {
+ fragmentId = hashValue & ((m_hashValueMask << 1) + 1);
+ }
+ Uint32 pos = fragmentId * m_replicaCount;
+ if(pos + m_replicaCount <= m_fragments.size())
+ {
+ * nodes = m_fragments.getBase()+pos;
+ return m_replicaCount;
+ }
+ }
+ return 0;
+}
/**
* NdbIndexImpl
@@ -486,14 +528,22 @@ NdbIndexImpl::NdbIndexImpl() :
NdbDictionary::Index(* this),
m_facade(this)
{
- m_logging = true;
+ init();
}
NdbIndexImpl::NdbIndexImpl(NdbDictionary::Index & f) :
NdbDictionary::Index(* this),
m_facade(&f)
{
- m_logging = true;
+ init();
+}
+
+void NdbIndexImpl::init()
+{
+ m_indexId= RNIL;
+ m_type= NdbDictionary::Index::Undefined;
+ m_logging= true;
+ m_table= NULL;
}
NdbIndexImpl::~NdbIndexImpl(){
@@ -538,20 +588,26 @@ NdbEventImpl::NdbEventImpl() :
NdbDictionary::Event(* this),
m_facade(this)
{
- mi_type = 0;
- m_dur = NdbDictionary::Event::ED_UNDEFINED;
- eventOp = NULL;
- m_tableImpl = NULL;
+ init();
}
NdbEventImpl::NdbEventImpl(NdbDictionary::Event & f) :
NdbDictionary::Event(* this),
m_facade(&f)
{
- mi_type = 0;
- m_dur = NdbDictionary::Event::ED_UNDEFINED;
- eventOp = NULL;
- m_tableImpl = NULL;
+ init();
+}
+
+void NdbEventImpl::init()
+{
+ m_eventId= RNIL;
+ m_eventKey= RNIL;
+ m_tableId= RNIL;
+ mi_type= 0;
+ m_dur= NdbDictionary::Event::ED_UNDEFINED;
+ m_tableImpl= NULL;
+ m_bufferId= RNIL;
+ eventOp= NULL;
}
NdbEventImpl::~NdbEventImpl()
@@ -565,24 +621,30 @@ void NdbEventImpl::setName(const char * name)
m_externalName.assign(name);
}
+const char *NdbEventImpl::getName() const
+{
+ return m_externalName.c_str();
+}
+
+void
+NdbEventImpl::setTable(const NdbDictionary::Table& table)
+{
+ m_tableImpl= &NdbTableImpl::getImpl(table);
+ m_tableName.assign(m_tableImpl->getName());
+}
+
void
NdbEventImpl::setTable(const char * table)
{
m_tableName.assign(table);
}
-const char *
-NdbEventImpl::getTable() const
+const char *
+NdbEventImpl::getTableName() const
{
return m_tableName.c_str();
}
-const char *
-NdbEventImpl::getName() const
-{
- return m_externalName.c_str();
-}
-
void
NdbEventImpl::addTableEvent(const NdbDictionary::Event::TableEvent t = NdbDictionary::Event::TE_ALL)
{
@@ -595,11 +657,22 @@ NdbEventImpl::addTableEvent(const NdbDictionary::Event::TableEvent t = NdbDicti
}
void
-NdbEventImpl::setDurability(const NdbDictionary::Event::EventDurability d)
+NdbEventImpl::setDurability(NdbDictionary::Event::EventDurability d)
{
m_dur = d;
}
+NdbDictionary::Event::EventDurability
+NdbEventImpl::getDurability() const
+{
+ return m_dur;
+}
+
+int NdbEventImpl::getNoOfEventColumns() const
+{
+ return m_attrIds.size() + m_columns.size();
+}
+
/**
* NdbDictionaryImpl
*/
@@ -643,11 +716,17 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
m_globalHash->lock();
if(--f_dictionary_count == 0){
delete NdbDictionary::Column::FRAGMENT;
+ delete NdbDictionary::Column::FRAGMENT_MEMORY;
delete NdbDictionary::Column::ROW_COUNT;
delete NdbDictionary::Column::COMMIT_COUNT;
+ delete NdbDictionary::Column::ROW_SIZE;
+ delete NdbDictionary::Column::RANGE_NO;
NdbDictionary::Column::FRAGMENT= 0;
+ NdbDictionary::Column::FRAGMENT_MEMORY= 0;
NdbDictionary::Column::ROW_COUNT= 0;
NdbDictionary::Column::COMMIT_COUNT= 0;
+ NdbDictionary::Column::ROW_SIZE= 0;
+ NdbDictionary::Column::RANGE_NO= 0;
}
m_globalHash->unlock();
} else {
@@ -656,19 +735,19 @@ NdbDictionaryImpl::~NdbDictionaryImpl()
}
Ndb_local_table_info *
-NdbDictionaryImpl::fetchGlobalTableImpl(const char * internalTableName)
+NdbDictionaryImpl::fetchGlobalTableImpl(const BaseString& internalTableName)
{
NdbTableImpl *impl;
m_globalHash->lock();
- impl = m_globalHash->get(internalTableName);
+ impl = m_globalHash->get(internalTableName.c_str());
m_globalHash->unlock();
if (impl == 0){
impl = m_receiver.getTable(internalTableName,
m_ndb.usingFullyQualifiedNames());
m_globalHash->lock();
- m_globalHash->put(internalTableName, impl);
+ m_globalHash->put(internalTableName.c_str(), impl);
m_globalHash->unlock();
if(impl == 0){
@@ -679,7 +758,7 @@ NdbDictionaryImpl::fetchGlobalTableImpl(const char * internalTableName)
Ndb_local_table_info *info=
Ndb_local_table_info::create(impl, m_local_table_data_size);
- m_localHash.put(internalTableName, info);
+ m_localHash.put(internalTableName.c_str(), info);
m_ndb.theFirstTupleId[impl->getTableId()] = ~0;
m_ndb.theLastTupleId[impl->getTableId()] = ~0;
@@ -710,10 +789,16 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
if(f_dictionary_count++ == 0){
NdbDictionary::Column::FRAGMENT=
NdbColumnImpl::create_psuedo("NDB$FRAGMENT");
+ NdbDictionary::Column::FRAGMENT_MEMORY=
+ NdbColumnImpl::create_psuedo("NDB$FRAGMENT_MEMORY");
NdbDictionary::Column::ROW_COUNT=
NdbColumnImpl::create_psuedo("NDB$ROW_COUNT");
NdbDictionary::Column::COMMIT_COUNT=
NdbColumnImpl::create_psuedo("NDB$COMMIT_COUNT");
+ NdbDictionary::Column::ROW_SIZE=
+ NdbColumnImpl::create_psuedo("NDB$ROW_SIZE");
+ NdbDictionary::Column::RANGE_NO=
+ NdbColumnImpl::create_psuedo("NDB$RANGE_NO");
}
m_globalHash->unlock();
return true;
@@ -721,14 +806,13 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb,
return false;
}
-NdbTableImpl *
-NdbDictionaryImpl::getIndexTable(NdbIndexImpl * index,
+NdbTableImpl *
+NdbDictionaryImpl::getIndexTable(NdbIndexImpl * index,
NdbTableImpl * table)
{
- const char * internalName =
- m_ndb.internalizeIndexName(table, index->getName());
-
- return getTable(m_ndb.externalizeTableName(internalName));
+ const BaseString internalName(
+ m_ndb.internalize_index_name(table, index->getName()));
+ return getTable(m_ndb.externalizeTableName(internalName.c_str()));
}
#if 0
@@ -745,7 +829,7 @@ NdbDictInterface::setTransporter(class TransporterFacade * tf)
execNodeStatus);
if ( m_blockNumber == -1 ) {
- m_error.code = 4105;
+ m_error.code= 4105;
return false; // no more free blocknumbers
}//if
Uint32 theNode = tf->ownId();
@@ -899,7 +983,7 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
aNodeId = m_transporter->get_an_alive_node();
}
if(aNodeId == 0){
- m_error.code = 4009;
+ m_error.code= 4009;
m_transporter->unlock_mutex();
DBUG_RETURN(-1);
}
@@ -926,7 +1010,7 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
}
}
- m_error.code = 0;
+ m_error.code= 0;
m_waiter.m_node = aNodeId;
m_waiter.m_state = wst;
@@ -960,7 +1044,7 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
for (int j=0; j < noerrcodes; j++)
if(m_error.code == errcodes[j]) {
doContinue = 1;
- continue;
+ break;
}
if (doContinue)
continue;
@@ -970,79 +1054,91 @@ NdbDictInterface::dictSignal(NdbApiSignal* signal,
}
DBUG_RETURN(-1);
}
+#if 0
+/*
+ Get dictionary information for a table using table id as reference
-/*****************************************************************
- * get tab info
+ DESCRIPTION
+ Sends a GET_TABINFOREQ signal containing the table id
*/
-NdbTableImpl *
+NdbTableImpl *
NdbDictInterface::getTable(int tableId, bool fullyQualifiedNames)
{
NdbApiSignal tSignal(m_reference);
- GetTabInfoReq * const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
-
+ GetTabInfoReq* const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
+
req->senderRef = m_reference;
req->senderData = 0;
- req->requestType =
+ req->requestType =
GetTabInfoReq::RequestById | GetTabInfoReq::LongSignalConf;
req->tableId = tableId;
tSignal.theReceiversBlockNumber = DBDICT;
tSignal.theVerId_signalNumber = GSN_GET_TABINFOREQ;
tSignal.theLength = GetTabInfoReq::SignalLength;
-
+
return getTable(&tSignal, 0, 0, fullyQualifiedNames);
}
+#endif
+
+
+/*
+ Get dictionary information for a table using table name as the reference
+
+ DESCRIPTION
+ Send GET_TABINFOREQ signal with the table name in the first
+ long section part
+*/
-NdbTableImpl *
-NdbDictInterface::getTable(const char * name, bool fullyQualifiedNames)
+NdbTableImpl *
+NdbDictInterface::getTable(const BaseString& name, bool fullyQualifiedNames)
{
NdbApiSignal tSignal(m_reference);
- GetTabInfoReq * const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
-
- const Uint32 strLen = strlen(name) + 1; // NULL Terminated
- if(strLen > MAX_TAB_NAME_SIZE) {//sizeof(req->tableName)){
- m_error.code = 4307;
- return 0;
- }
+ GetTabInfoReq* const req = CAST_PTR(GetTabInfoReq, tSignal.getDataPtrSend());
- // avoid alignment problem and memory overrun
- Uint32 name_buf[(MAX_TAB_NAME_SIZE + 3) / 4];
- strncpy((char*)name_buf, name, sizeof(name_buf)); // strncpy null-pads
- name = (char*)name_buf;
+ const Uint32 namelen= name.length() + 1; // NULL terminated
+ const Uint32 namelen_words= (namelen + 3) >> 2; // Size in words
- req->senderRef = m_reference;
- req->senderData = 0;
- req->requestType =
+ req->senderRef= m_reference;
+ req->senderData= 0;
+ req->requestType=
GetTabInfoReq::RequestByName | GetTabInfoReq::LongSignalConf;
- req->tableNameLen = strLen;
- tSignal.theReceiversBlockNumber = DBDICT;
- tSignal.theVerId_signalNumber = GSN_GET_TABINFOREQ;
- // tSignal.theLength = GetTabInfoReq::HeaderLength + ((strLen + 3) / 4);
- tSignal.theLength = GetTabInfoReq::SignalLength;
+ req->tableNameLen= namelen;
+ tSignal.theReceiversBlockNumber= DBDICT;
+ tSignal.theVerId_signalNumber= GSN_GET_TABINFOREQ;
+ tSignal.theLength= GetTabInfoReq::SignalLength;
+
+ // Copy name to m_buffer to get a word sized buffer
+ m_buffer.clear();
+ m_buffer.grow(namelen_words*4);
+ m_buffer.append(name.c_str(), namelen);
+
LinearSectionPtr ptr[1];
- ptr[0].p = (Uint32*)name;
- ptr[0].sz = (strLen + 3) / 4;
-
+ ptr[0].p= (Uint32*)m_buffer.get_data();
+ ptr[0].sz= namelen_words;
+
return getTable(&tSignal, ptr, 1, fullyQualifiedNames);
}
+
NdbTableImpl *
-NdbDictInterface::getTable(class NdbApiSignal * signal,
+NdbDictInterface::getTable(class NdbApiSignal * signal,
LinearSectionPtr ptr[3],
Uint32 noOfSections, bool fullyQualifiedNames)
{
- //GetTabInfoReq * const req = CAST_PTR(GetTabInfoReq, signal->getDataPtrSend());
+ int errCodes[] = {GetTabInfoRef::Busy };
+
int r = dictSignal(signal,ptr,noOfSections,
0/*do not use masternode id*/,
100,
WAIT_GET_TAB_INFO_REQ,
WAITFOR_RESPONSE_TIMEOUT,
- NULL,0);
+ errCodes, 1);
if (r) return 0;
NdbTableImpl * rt = 0;
- m_error.code = parseTableInfo(&rt,
- (Uint32*)m_buffer.get_data(),
- m_buffer.length() / 4, fullyQualifiedNames);
+ m_error.code= parseTableInfo(&rt,
+ (Uint32*)m_buffer.get_data(),
+ m_buffer.length() / 4, fullyQualifiedNames);
rt->buildColumnHash();
return rt;
}
@@ -1077,7 +1173,7 @@ NdbDictInterface::execGET_TABINFO_REF(NdbApiSignal * signal,
{
const GetTabInfoRef* ref = CAST_CONSTPTR(GetTabInfoRef, signal->getDataPtr());
- m_error.code = ref->errorCode;
+ m_error.code= ref->errorCode;
m_waiter.signal(NO_WAIT);
}
@@ -1133,8 +1229,6 @@ objectTypeMapping[] = {
{ DictTabInfo::SystemTable, NdbDictionary::Object::SystemTable },
{ DictTabInfo::UserTable, NdbDictionary::Object::UserTable },
{ DictTabInfo::UniqueHashIndex, NdbDictionary::Object::UniqueHashIndex },
- { DictTabInfo::HashIndex, NdbDictionary::Object::HashIndex },
- { DictTabInfo::UniqueOrderedIndex, NdbDictionary::Object::UniqueOrderedIndex },
{ DictTabInfo::OrderedIndex, NdbDictionary::Object::OrderedIndex },
{ DictTabInfo::HashIndexTrigger, NdbDictionary::Object::HashIndexTrigger },
{ DictTabInfo::IndexTrigger, NdbDictionary::Object::IndexTrigger },
@@ -1167,49 +1261,17 @@ static const
ApiKernelMapping
indexTypeMapping[] = {
{ DictTabInfo::UniqueHashIndex, NdbDictionary::Index::UniqueHashIndex },
- { DictTabInfo::HashIndex, NdbDictionary::Index::HashIndex },
- { DictTabInfo::UniqueOrderedIndex, NdbDictionary::Index::UniqueOrderedIndex},
{ DictTabInfo::OrderedIndex, NdbDictionary::Index::OrderedIndex },
{ -1, -1 }
};
-// TODO: remove, api-kernel type codes must match now
-static const
-ApiKernelMapping
-columnTypeMapping[] = {
- { DictTabInfo::ExtTinyint, NdbDictionary::Column::Tinyint },
- { DictTabInfo::ExtTinyunsigned, NdbDictionary::Column::Tinyunsigned },
- { DictTabInfo::ExtSmallint, NdbDictionary::Column::Smallint },
- { DictTabInfo::ExtSmallunsigned, NdbDictionary::Column::Smallunsigned },
- { DictTabInfo::ExtMediumint, NdbDictionary::Column::Mediumint },
- { DictTabInfo::ExtMediumunsigned, NdbDictionary::Column::Mediumunsigned },
- { DictTabInfo::ExtInt, NdbDictionary::Column::Int },
- { DictTabInfo::ExtUnsigned, NdbDictionary::Column::Unsigned },
- { DictTabInfo::ExtBigint, NdbDictionary::Column::Bigint },
- { DictTabInfo::ExtBigunsigned, NdbDictionary::Column::Bigunsigned },
- { DictTabInfo::ExtFloat, NdbDictionary::Column::Float },
- { DictTabInfo::ExtDouble, NdbDictionary::Column::Double },
- { DictTabInfo::ExtOlddecimal, NdbDictionary::Column::Olddecimal },
- { DictTabInfo::ExtOlddecimalunsigned, NdbDictionary::Column::Olddecimalunsigned },
- { DictTabInfo::ExtChar, NdbDictionary::Column::Char },
- { DictTabInfo::ExtVarchar, NdbDictionary::Column::Varchar },
- { DictTabInfo::ExtBinary, NdbDictionary::Column::Binary },
- { DictTabInfo::ExtVarbinary, NdbDictionary::Column::Varbinary },
- { DictTabInfo::ExtDatetime, NdbDictionary::Column::Datetime },
- { DictTabInfo::ExtDate, NdbDictionary::Column::Date },
- { DictTabInfo::ExtBlob, NdbDictionary::Column::Blob },
- { DictTabInfo::ExtText, NdbDictionary::Column::Text },
- { DictTabInfo::ExtTime, NdbDictionary::Column::Time },
- { DictTabInfo::ExtYear, NdbDictionary::Column::Year },
- { DictTabInfo::ExtTimestamp, NdbDictionary::Column::Timestamp },
- { -1, -1 }
-};
-
int
NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
const Uint32 * data, Uint32 len,
bool fullyQualifiedNames)
{
+ DBUG_ENTER("NdbDictInterface::parseTableInfo");
+
SimplePropertiesLinearReader it(data, len);
DictTabInfo::Table tableDesc; tableDesc.init();
SimpleProperties::UnpackStatus s;
@@ -1219,7 +1281,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
true, true);
if(s != SimpleProperties::Break){
- return 703;
+ DBUG_RETURN(703);
}
const char * internalName = tableDesc.TableName;
const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames);
@@ -1242,7 +1304,6 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_kvalue = tableDesc.TableKValue;
impl->m_minLoadFactor = tableDesc.MinLoadFactor;
impl->m_maxLoadFactor = tableDesc.MaxLoadFactor;
- impl->m_fragmentCount = tableDesc.FragmentCount;
impl->m_indexType = (NdbDictionary::Index::Type)
getApiConstant(tableDesc.TableType,
@@ -1259,8 +1320,10 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
Uint32 keyInfoPos = 0;
Uint32 keyCount = 0;
Uint32 blobCount = 0;
+ Uint32 distKeys = 0;
- for(Uint32 i = 0; i < tableDesc.NoOfAttributes; i++) {
+ Uint32 i;
+ for(i = 0; i < tableDesc.NoOfAttributes; i++) {
DictTabInfo::Attribute attrDesc; attrDesc.init();
s = SimpleProperties::unpack(it,
&attrDesc,
@@ -1269,21 +1332,19 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
true, true);
if(s != SimpleProperties::Break){
delete impl;
- return 703;
+ DBUG_RETURN(703);
}
NdbColumnImpl * col = new NdbColumnImpl();
col->m_attrId = attrDesc.AttributeId;
col->setName(attrDesc.AttributeName);
- col->m_type = (NdbDictionary::Column::Type)
- getApiConstant(attrDesc.AttributeExtType,
- columnTypeMapping,
- NdbDictionary::Column::Undefined);
- if (col->m_type == NdbDictionary::Column::Undefined) {
+
+ // check type and compute attribute size and array size
+ if (! attrDesc.translateExtType()) {
delete impl;
- return 703;
+ DBUG_RETURN(703);
}
- col->m_extType = attrDesc.AttributeExtType;
+ col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType;
col->m_precision = (attrDesc.AttributeExtPrecision & 0xFFFF);
col->m_scale = attrDesc.AttributeExtScale;
col->m_length = attrDesc.AttributeExtLength;
@@ -1292,32 +1353,26 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
// charset is defined exactly for char types
if (col->getCharType() != (cs_number != 0)) {
delete impl;
- return 703;
+ DBUG_RETURN(703);
}
if (col->getCharType()) {
col->m_cs = get_charset(cs_number, MYF(0));
if (col->m_cs == NULL) {
delete impl;
- return 743;
+ DBUG_RETURN(743);
}
}
-
- // translate to old kernel types and sizes
- if (! attrDesc.translateExtType()) {
- delete impl;
- return 703;
- }
- col->m_attrType =attrDesc.AttributeType;
col->m_attrSize = (1 << attrDesc.AttributeSize) / 8;
col->m_arraySize = attrDesc.AttributeArraySize;
+ if(attrDesc.AttributeSize == 0)
+ {
+ col->m_attrSize = 4;
+ col->m_arraySize = (attrDesc.AttributeArraySize + 31) >> 5;
+ }
col->m_pk = attrDesc.AttributeKeyFlag;
- col->m_tupleKey = 0;
col->m_distributionKey = attrDesc.AttributeDKey;
- col->m_distributionGroup = attrDesc.AttributeDGroup;
- col->m_distributionGroupBits = 16;
col->m_nullable = attrDesc.AttributeNullableFlag;
- col->m_indexOnly = (attrDesc.AttributeStoredInd ? false : true);
col->m_autoIncrement = (attrDesc.AttributeAutoIncrement ? true : false);
col->m_autoIncrementInitialValue = ~0;
col->m_defaultValue.assign(attrDesc.AttributeDefaultValue);
@@ -1326,6 +1381,9 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
col->m_keyInfoPos = keyInfoPos + 1;
keyInfoPos += ((col->m_attrSize * col->m_arraySize + 3) / 4);
keyCount++;
+
+ if(attrDesc.AttributeDKey)
+ distKeys++;
} else {
col->m_keyInfoPos = 0;
}
@@ -1336,7 +1394,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
if(impl->m_columns[attrDesc.AttributeId] != 0){
delete col;
delete impl;
- return 703;
+ DBUG_RETURN(703);
}
impl->m_columns[attrDesc.AttributeId] = col;
it.next();
@@ -1344,10 +1402,49 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret,
impl->m_noOfKeys = keyCount;
impl->m_keyLenInWords = keyInfoPos;
- impl->m_sizeOfKeysInWords = keyInfoPos;
impl->m_noOfBlobs = blobCount;
+ impl->m_noOfDistributionKeys = distKeys;
+
+ if(tableDesc.FragmentDataLen > 0)
+ {
+ Uint32 replicaCount = tableDesc.FragmentData[0];
+ Uint32 fragCount = tableDesc.FragmentData[1];
+
+ impl->m_replicaCount = replicaCount;
+ impl->m_fragmentCount = fragCount;
+
+ for(i = 0; i<(fragCount*replicaCount); i++)
+ {
+ impl->m_fragments.push_back(tableDesc.FragmentData[i+2]);
+ }
+
+ Uint32 topBit = (1 << 31);
+ for(; topBit && !(fragCount & topBit); ){
+ topBit >>= 1;
+ }
+ impl->m_hashValueMask = topBit - 1;
+ impl->m_hashpointerValue = fragCount - (impl->m_hashValueMask + 1);
+ }
+ else
+ {
+ impl->m_fragmentCount = tableDesc.FragmentCount;
+ impl->m_replicaCount = 0;
+ impl->m_hashValueMask = 0;
+ impl->m_hashpointerValue = 0;
+ }
+
+ if(distKeys == 0)
+ {
+ for(i = 0; i < tableDesc.NoOfAttributes; i++)
+ {
+ if(impl->m_columns[i]->getPrimaryKey())
+ impl->m_columns[i]->m_distributionKey = true;
+ }
+ }
+
* ret = impl;
- return 0;
+
+ DBUG_RETURN(0);
}
/*****************************************************************
@@ -1362,15 +1459,15 @@ NdbDictionaryImpl::createTable(NdbTableImpl &t)
return 0;
// update table def from DICT
Ndb_local_table_info *info=
- get_local_table_info(t.m_internalName.c_str(),false);
+ get_local_table_info(t.m_internalName,false);
if (info == NULL) {
- m_error.code = 709;
+ m_error.code= 709;
return -1;
}
if (createBlobTables(*(info->m_table_impl)) != 0) {
int save_code = m_error.code;
(void)dropTable(t);
- m_error.code = save_code;
+ m_error.code= save_code;
return -1;
}
return 0;
@@ -1389,7 +1486,7 @@ NdbDictionaryImpl::createBlobTables(NdbTableImpl &t)
return -1;
// Save BLOB table handle
Ndb_local_table_info *info=
- get_local_table_info(bt.m_internalName.c_str(),false);
+ get_local_table_info(bt.m_internalName, false);
if (info == 0) {
return -1;
}
@@ -1433,10 +1530,8 @@ NdbDictInterface::createTable(Ndb & ndb,
int NdbDictionaryImpl::alterTable(NdbTableImpl &impl)
{
- BaseString internalName = impl.m_internalName;
+ BaseString internalName(impl.m_internalName);
const char * originalInternalName = internalName.c_str();
- BaseString externalName = impl.m_externalName;
- const char * originalExternalName = externalName.c_str();
DBUG_ENTER("NdbDictionaryImpl::alterTable");
Ndb_local_table_info * local = 0;
@@ -1474,47 +1569,54 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
DBUG_ENTER("NdbDictInterface::createOrAlterTable");
unsigned i;
if((unsigned)impl.getNoOfPrimaryKeys() > NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY){
- m_error.code = 4317;
+ m_error.code= 4317;
DBUG_RETURN(-1);
}
unsigned sz = impl.m_columns.size();
if (sz > NDB_MAX_ATTRIBUTES_IN_TABLE){
- m_error.code = 4318;
+ m_error.code= 4318;
DBUG_RETURN(-1);
}
- impl.copyNewProperties();
+ if (!impl.m_newExternalName.empty()) {
+ impl.m_externalName.assign(impl.m_newExternalName);
+ AlterTableReq::setNameFlag(impl.m_changeMask, true);
+ }
+
//validate();
//aggregate();
- const char * internalName =
- ndb.internalizeTableName(impl.m_externalName.c_str());
+ const BaseString internalName(
+ ndb.internalize_table_name(impl.m_externalName.c_str()));
impl.m_internalName.assign(internalName);
UtilBufferWriter w(m_buffer);
DictTabInfo::Table tmpTab; tmpTab.init();
- BaseString::snprintf(tmpTab.TableName,
- sizeof(tmpTab.TableName),
- internalName);
+ BaseString::snprintf(tmpTab.TableName,
+ sizeof(tmpTab.TableName),
+ internalName.c_str());
bool haveAutoIncrement = false;
Uint64 autoIncrementValue = 0;
+ Uint32 distKeys= 0;
for(i = 0; i<sz; i++){
const NdbColumnImpl * col = impl.m_columns[i];
if(col == 0)
continue;
if (col->m_autoIncrement) {
if (haveAutoIncrement) {
- m_error.code = 4335;
+ m_error.code= 4335;
DBUG_RETURN(-1);
}
haveAutoIncrement = true;
autoIncrementValue = col->m_autoIncrementInitialValue;
- }
+ }
+ if (col->m_distributionKey)
+ distKeys++;
}
// Check max length of frm data
if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){
- m_error.code = 1229;
+ m_error.code= 1229;
DBUG_RETURN(-1);
}
tmpTab.FrmLen = impl.m_frm.length();
@@ -1542,6 +1644,10 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
abort();
}
+ if (distKeys == impl.m_noOfKeys)
+ distKeys= 0;
+ impl.m_noOfDistributionKeys= distKeys;
+
for(i = 0; i<sz; i++){
const NdbColumnImpl * col = impl.m_columns[i];
if(col == 0)
@@ -1551,27 +1657,33 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
BaseString::snprintf(tmpAttr.AttributeName, sizeof(tmpAttr.AttributeName),
col->m_name.c_str());
tmpAttr.AttributeId = i;
- tmpAttr.AttributeKeyFlag = col->m_pk || col->m_tupleKey;
+ tmpAttr.AttributeKeyFlag = col->m_pk;
tmpAttr.AttributeNullableFlag = col->m_nullable;
- tmpAttr.AttributeStoredInd = (col->m_indexOnly ? 0 : 1);
- tmpAttr.AttributeDKey = col->m_distributionKey;
- tmpAttr.AttributeDGroup = col->m_distributionGroup;
-
- tmpAttr.AttributeExtType =
- getKernelConstant(col->m_type,
- columnTypeMapping,
- DictTabInfo::ExtUndefined);
+ tmpAttr.AttributeDKey = distKeys ? col->m_distributionKey : 0;
+
+ tmpAttr.AttributeExtType = (Uint32)col->m_type;
tmpAttr.AttributeExtPrecision = ((unsigned)col->m_precision & 0xFFFF);
tmpAttr.AttributeExtScale = col->m_scale;
tmpAttr.AttributeExtLength = col->m_length;
+
+ // check type and compute attribute size and array size
+ if (! tmpAttr.translateExtType()) {
+ m_error.code= 703;
+ DBUG_RETURN(-1);
+ }
// charset is defined exactly for char types
if (col->getCharType() != (col->m_cs != NULL)) {
- m_error.code = 703;
+ m_error.code= 703;
DBUG_RETURN(-1);
}
// primary key type check
if (col->m_pk && ! NdbSqlUtil::usable_in_pk(col->m_type, col->m_cs)) {
- m_error.code = 743;
+ m_error.code= (col->m_cs != 0 ? 743 : 739);
+ DBUG_RETURN(-1);
+ }
+ // distribution key not supported for Char attribute
+ if (distKeys && col->m_distributionKey && col->m_cs != NULL) {
+ m_error.code= 745;
DBUG_RETURN(-1);
}
// charset in upper half of precision
@@ -1579,9 +1691,6 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
tmpAttr.AttributeExtPrecision |= (col->m_cs->number << 16);
}
- // DICT will ignore and recompute this
- (void)tmpAttr.translateExtType();
-
tmpAttr.AttributeAutoIncrement = col->m_autoIncrement;
BaseString::snprintf(tmpAttr.AttributeDefaultValue,
sizeof(tmpAttr.AttributeDefaultValue),
@@ -1596,7 +1705,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
NdbApiSignal tSignal(m_reference);
tSignal.theReceiversBlockNumber = DBDICT;
- LinearSectionPtr ptr[3];
+ LinearSectionPtr ptr[1];
ptr[0].p = (Uint32*)m_buffer.get_data();
ptr[0].sz = m_buffer.length() / 4;
int ret;
@@ -1632,7 +1741,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb,
if (!ndb.setAutoIncrementValue(impl.m_externalName.c_str(),
autoIncrementValue)) {
if (ndb.theError.code == 0) {
- m_error.code = 4336;
+ m_error.code= 4336;
ndb.theError = m_error;
} else
m_error= ndb.theError;
@@ -1668,11 +1777,12 @@ void
NdbDictInterface::execCREATE_TABLE_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
+#if 0
const CreateTableConf* const conf=
CAST_CONSTPTR(CreateTableConf, signal->getDataPtr());
Uint32 tableId= conf->tableId;
Uint32 tableVersion= conf->tableVersion;
-
+#endif
m_waiter.signal(NO_WAIT);
}
@@ -1682,7 +1792,7 @@ NdbDictInterface::execCREATE_TABLE_REF(NdbApiSignal * signal,
{
const CreateTableRef* const ref=
CAST_CONSTPTR(CreateTableRef, signal->getDataPtr());
- m_error.code = ref->errorCode;
+ m_error.code= ref->errorCode;
m_masterNodeId = ref->masterNodeId;
m_waiter.signal(NO_WAIT);
}
@@ -1726,7 +1836,7 @@ NdbDictInterface::execALTER_TABLE_REF(NdbApiSignal * signal,
{
const AlterTableRef * const ref =
CAST_CONSTPTR(AlterTableRef, signal->getDataPtr());
- m_error.code = ref->errorCode;
+ m_error.code= ref->errorCode;
m_masterNodeId = ref->masterNodeId;
m_waiter.signal(NO_WAIT);
}
@@ -1741,20 +1851,20 @@ NdbDictionaryImpl::dropTable(const char * name)
DBUG_PRINT("enter",("name: %s", name));
NdbTableImpl * tab = getTable(name);
if(tab == 0){
- return -1;
+ DBUG_RETURN(-1);
}
int ret = dropTable(* tab);
// If table stored in cache is incompatible with the one in the kernel
// we must clear the cache and try again
if (ret == INCOMPATIBLE_VERSION) {
- const char * internalTableName = m_ndb.internalizeTableName(name);
+ const BaseString internalTableName(m_ndb.internalize_table_name(name));
- DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName));
- m_localHash.drop(internalTableName);
+ DBUG_PRINT("info",("INCOMPATIBLE_VERSION internal_name: %s", internalTableName.c_str()));
+ m_localHash.drop(internalTableName.c_str());
m_globalHash->lock();
tab->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(tab);
- m_globalHash->unlock();
+ m_globalHash->unlock();
DBUG_RETURN(dropTable(name));
}
@@ -1771,7 +1881,7 @@ NdbDictionaryImpl::dropTable(NdbTableImpl & impl)
}
if (impl.m_indexType != NdbDictionary::Index::Undefined) {
- m_receiver.m_error.code = 1228;
+ m_receiver.m_error.code= 1228;
return -1;
}
@@ -1872,32 +1982,37 @@ void
NdbDictInterface::execDROP_TABLE_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
+ DBUG_ENTER("NdbDictInterface::execDROP_TABLE_CONF");
//DropTableConf* const conf = CAST_CONSTPTR(DropTableConf, signal->getDataPtr());
m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execDROP_TABLE_REF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
+ DBUG_ENTER("NdbDictInterface::execDROP_TABLE_REF");
const DropTableRef* const ref = CAST_CONSTPTR(DropTableRef, signal->getDataPtr());
- m_error.code = ref->errorCode;
+ m_error.code= ref->errorCode;
m_masterNodeId = ref->masterNodeId;
m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
int
NdbDictionaryImpl::invalidateObject(NdbTableImpl & impl)
{
const char * internalTableName = impl.m_internalName.c_str();
-
- m_localHash.drop(internalTableName);
+ DBUG_ENTER("NdbDictionaryImpl::invalidateObject");
+ DBUG_PRINT("enter", ("internal_name: %s", internalTableName));
+ m_localHash.drop(internalTableName);
m_globalHash->lock();
impl.m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(&impl);
m_globalHash->unlock();
- return 0;
+ DBUG_RETURN(0);
}
int
@@ -1916,8 +2031,8 @@ NdbDictionaryImpl::removeCachedObject(NdbTableImpl & impl)
* Get index info
*/
NdbIndexImpl*
-NdbDictionaryImpl::getIndexImpl(const char * externalName,
- const char * internalName)
+NdbDictionaryImpl::getIndexImpl(const char * externalName,
+ const BaseString& internalName)
{
Ndb_local_table_info * info = get_local_table_info(internalName,
false);
@@ -1938,7 +2053,7 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
m_error.code = 4243;
return 0;
}
-
+
/**
* Create index impl
*/
@@ -1957,7 +2072,7 @@ NdbDictionaryImpl::getIndexImpl(const char * externalName,
int
NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
- const NdbTableImpl* tab,
+ NdbTableImpl* tab,
const NdbTableImpl* prim){
NdbIndexImpl *idx = new NdbIndexImpl();
idx->m_version = tab->m_version;
@@ -1965,22 +2080,49 @@ NdbDictInterface::create_index_obj_from_table(NdbIndexImpl** dst,
idx->m_indexId = tab->m_tableId;
idx->m_externalName.assign(tab->getName());
idx->m_tableName.assign(prim->m_externalName);
- idx->m_type = tab->m_indexType;
+ NdbDictionary::Index::Type type = idx->m_type = tab->m_indexType;
idx->m_logging = tab->m_logging;
// skip last attribute (NDB$PK or NDB$TNODE)
- for(unsigned i = 0; i+1<tab->m_columns.size(); i++){
+
+ const Uint32 distKeys = prim->m_noOfDistributionKeys;
+ Uint32 keyCount = (distKeys ? distKeys : prim->m_noOfKeys);
+
+ unsigned i;
+ for(i = 0; i+1<tab->m_columns.size(); i++){
+ NdbColumnImpl* org = tab->m_columns[i];
+
NdbColumnImpl* col = new NdbColumnImpl;
// Copy column definition
- *col = *tab->m_columns[i];
+ *col = * org;
idx->m_columns.push_back(col);
+
/**
* reverse map
*/
- int key_id = prim->getColumn(col->getName())->getColumnNo();
+ const NdbColumnImpl* primCol = prim->getColumn(col->getName());
+ int key_id = primCol->getColumnNo();
int fill = -1;
idx->m_key_ids.fill(key_id, fill);
idx->m_key_ids[key_id] = i;
col->m_keyInfoPos = key_id;
+
+ if(type == NdbDictionary::Index::OrderedIndex &&
+ (primCol->m_distributionKey ||
+ (distKeys == 0 && primCol->getPrimaryKey())))
+ {
+ keyCount--;
+ org->m_distributionKey = 1;
+ }
+ }
+
+ if(keyCount == 0)
+ {
+ tab->m_noOfDistributionKeys = (distKeys ? distKeys : prim->m_noOfKeys);
+ }
+ else
+ {
+ for(i = 0; i+1<tab->m_columns.size(); i++)
+ tab->m_columns[i]->m_distributionKey = 0;
}
* dst = idx;
@@ -2016,12 +2158,11 @@ NdbDictInterface::createIndex(Ndb & ndb,
m_error.code = 4241;
return -1;
}
- const char * internalName =
- ndb.internalizeIndexName(&table, impl.getName());
-
+ const BaseString internalName(
+ ndb.internalize_index_name(&table, impl.getName()));
impl.m_internalName.assign(internalName);
- w.add(DictTabInfo::TableName, internalName);
+ w.add(DictTabInfo::TableName, internalName.c_str());
w.add(DictTabInfo::TableLoggedFlag, impl.m_logging);
NdbApiSignal tSignal(m_reference);
@@ -2059,10 +2200,6 @@ NdbDictInterface::createIndex(Ndb & ndb,
// Copy column definition
*impl.m_columns[i] = *col;
- if(col->m_pk && col->m_indexOnly){
- m_error.code = 4245;
- return -1;
- }
// index key type check
if (it == DictTabInfo::UniqueHashIndex &&
! NdbSqlUtil::usable_in_hash_index(col->m_type, col->m_cs) ||
@@ -2073,27 +2210,7 @@ NdbDictInterface::createIndex(Ndb & ndb,
}
attributeList.id[i] = col->m_attrId;
}
- if (it == DictTabInfo::UniqueHashIndex) {
- // Sort index attributes according to primary table (using insertion sort)
- for(i = 1; i < attributeList.sz; i++) {
- unsigned int temp = attributeList.id[i];
- unsigned int j = i;
- while((j > 0) && (attributeList.id[j - 1] > temp)) {
- attributeList.id[j] = attributeList.id[j - 1];
- j--;
- }
- attributeList.id[j] = temp;
- }
- // Check for illegal duplicate attributes
- for(i = 0; i<attributeList.sz; i++) {
- if ((i != (attributeList.sz - 1)) &&
- (attributeList.id[i] == attributeList.id[i+1])) {
- m_error.code = 4258;
- return -1;
- }
- }
- }
- LinearSectionPtr ptr[3];
+ LinearSectionPtr ptr[2];
ptr[0].p = (Uint32*)&attributeList;
ptr[0].sz = 1 + attributeList.sz;
ptr[1].p = (Uint32*)m_buffer.get_data();
@@ -2151,20 +2268,20 @@ NdbDictionaryImpl::dropIndex(const char * indexName,
// If index stored in cache is incompatible with the one in the kernel
// we must clear the cache and try again
if (ret == INCOMPATIBLE_VERSION) {
- const char * internalIndexName = (tableName)
+ const BaseString internalIndexName((tableName)
?
- m_ndb.internalizeIndexName(getTable(tableName), indexName)
+ m_ndb.internalize_index_name(getTable(tableName), indexName)
:
- m_ndb.internalizeTableName(indexName); // Index is also a table
-
- m_localHash.drop(internalIndexName);
+ m_ndb.internalize_table_name(indexName)); // Index is also a table
+
+ m_localHash.drop(internalIndexName.c_str());
m_globalHash->lock();
idx->m_table->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(idx->m_table);
- m_globalHash->unlock();
+ m_globalHash->unlock();
return dropIndex(indexName, tableName);
}
-
+
return ret;
}
@@ -2180,19 +2297,19 @@ NdbDictionaryImpl::dropIndex(NdbIndexImpl & impl, const char * tableName)
return -1;
}
- const char * internalIndexName = (tableName)
+ const BaseString internalIndexName((tableName)
?
- m_ndb.internalizeIndexName(getTable(tableName), indexName)
+ m_ndb.internalize_index_name(getTable(tableName), indexName)
:
- m_ndb.internalizeTableName(indexName); // Index is also a table
+ m_ndb.internalize_table_name(indexName)); // Index is also a table
if(impl.m_status == NdbDictionary::Object::New){
return dropIndex(indexName, tableName);
}
-
+
int ret = m_receiver.dropIndex(impl, *timpl);
if(ret == 0){
- m_localHash.drop(internalIndexName);
+ m_localHash.drop(internalIndexName.c_str());
m_globalHash->lock();
impl.m_table->m_status = NdbDictionary::Object::Invalid;
m_globalHash->drop(impl.m_table);
@@ -2269,13 +2386,12 @@ int
NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
{
int i;
- NdbTableImpl* tab = getTable(evnt.getTable());
+ NdbTableImpl* tab = getTable(evnt.getTableName());
if(tab == 0){
- // m_error.code = 3249;
- ndbout_c(":createEvent: table %s not found", evnt.getTable());
#ifdef EVENT_DEBUG
- ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s", evnt.getTable());
+ ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s",
+ evnt.getTableName());
#endif
return -1;
}
@@ -2297,7 +2413,8 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
evnt.m_facade->addColumn(*(col_impl->m_facade));
} else {
ndbout_c("Attr id %u in table %s not found", evnt.m_attrIds[i],
- evnt.getTable());
+ evnt.getTableName());
+ m_error.code= 4713;
return -1;
}
}
@@ -2316,7 +2433,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
const NdbColumnImpl* col =
table.getColumn(evnt.m_columns[i]->m_name.c_str());
if(col == 0){
- m_error.code = 4247;
+ m_error.code= 4247;
return -1;
}
// Copy column definition
@@ -2342,7 +2459,7 @@ NdbDictionaryImpl::createEvent(NdbEventImpl & evnt)
// Check for illegal duplicate attributes
for(i = 1; i<attributeList_sz; i++) {
if (evnt.m_columns[i-1]->m_attrId == evnt.m_columns[i]->m_attrId) {
- m_error.code = 4258;
+ m_error.code= 4258;
return -1;
}
}
@@ -2390,17 +2507,21 @@ NdbDictInterface::createEvent(class Ndb & ndb,
const size_t len = strlen(evnt.m_externalName.c_str()) + 1;
if(len > MAX_TAB_NAME_SIZE) {
- m_error.code = 4241;
+ m_error.code= 4241;
return -1;
}
w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str());
if (getFlag == 0)
+ {
+ const BaseString internal_tabname(
+ ndb.internalize_table_name(evnt.m_tableName.c_str()));
w.add(SimpleProperties::StringValue,
- ndb.internalizeTableName(evnt.m_tableName.c_str()));
+ internal_tabname.c_str());
+ }
- LinearSectionPtr ptr[3];
+ LinearSectionPtr ptr[1];
ptr[0].p = (Uint32*)m_buffer.get_data();
ptr[0].sz = (m_buffer.length()+3) >> 2;
@@ -2463,6 +2584,7 @@ int
NdbDictInterface::executeSubscribeEvent(class Ndb & ndb,
NdbEventImpl & evnt)
{
+ DBUG_ENTER("NdbDictInterface::executeSubscribeEvent");
NdbApiSignal tSignal(m_reference);
// tSignal.theReceiversBlockNumber = SUMA;
tSignal.theReceiversBlockNumber = DBDICT;
@@ -2477,7 +2599,7 @@ NdbDictInterface::executeSubscribeEvent(class Ndb & ndb,
sumaStart->subscriberData = evnt.m_bufferId & 0xFF;
sumaStart->subscriberRef = m_reference;
- return executeSubscribeEvent(&tSignal, NULL);
+ DBUG_RETURN(executeSubscribeEvent(&tSignal, NULL));
}
int
@@ -2503,9 +2625,7 @@ int
NdbDictInterface::stopSubscribeEvent(class Ndb & ndb,
NdbEventImpl & evnt)
{
-#ifdef EVENT_DEBUG
- ndbout_c("SUB_STOP_REQ");
-#endif
+ DBUG_ENTER("NdbDictInterface::stopSubscribeEvent");
NdbApiSignal tSignal(m_reference);
// tSignal.theReceiversBlockNumber = SUMA;
@@ -2521,7 +2641,7 @@ NdbDictInterface::stopSubscribeEvent(class Ndb & ndb,
sumaStop->part = (Uint32) SubscriptionData::TableData;
sumaStop->subscriberRef = m_reference;
- return stopSubscribeEvent(&tSignal, NULL);
+ DBUG_RETURN(stopSubscribeEvent(&tSignal, NULL));
}
int
@@ -2555,8 +2675,8 @@ NdbDictionaryImpl::getEvent(const char * eventName)
}
// We only have the table name with internal name
- ev->setTable(m_ndb.externalizeTableName(ev->getTable()));
- ev->m_tableImpl = getTable(ev->getTable());
+ ev->setTable(m_ndb.externalizeTableName(ev->getTableName()));
+ ev->m_tableImpl = getTable(ev->getTableName());
// get the columns from the attrListBitmask
@@ -2581,7 +2701,7 @@ NdbDictionaryImpl::getEvent(const char * eventName)
#ifdef EVENT_DEBUG
ndbout_c("NdbDictionaryImpl::getEvent could not find column id %d", id);
#endif
- m_error.code = 4247;
+ m_error.code= 4247;
delete ev;
return NULL;
}
@@ -2599,9 +2719,8 @@ void
NdbDictInterface::execCREATE_EVNT_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "NdbDictionaryImpl.cpp: execCREATE_EVNT_CONF" << endl;
-#endif
+ DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_CONF");
+
m_buffer.clear();
unsigned int len = signal->getLength() << 2;
m_buffer.append((char *)&len, sizeof(len));
@@ -2611,118 +2730,123 @@ NdbDictInterface::execCREATE_EVNT_CONF(NdbApiSignal * signal,
m_buffer.append((char *)ptr[0].p, strlen((char *)ptr[0].p)+1);
}
+ const CreateEvntConf * const createEvntConf=
+ CAST_CONSTPTR(CreateEvntConf, signal->getDataPtr());
+
+ Uint32 subscriptionId = createEvntConf->getEventId();
+ Uint32 subscriptionKey = createEvntConf->getEventKey();
+
+ DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d",
+ subscriptionId,subscriptionKey));
m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execCREATE_EVNT_REF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "NdbDictionaryImpl.cpp: execCREATE_EVNT_REF" << endl;
- ndbout << "Exiting" << endl;
- exit(-1);
-#endif
+ DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_REF");
- const CreateEvntRef* const ref = CAST_CONSTPTR(CreateEvntRef, signal->getDataPtr());
- m_error.code = ref->getErrorCode();
-#ifdef EVENT_DEBUG
- ndbout_c("execCREATE_EVNT_REF");
- ndbout_c("ErrorCode %u", ref->getErrorCode());
- ndbout_c("Errorline %u", ref->getErrorLine());
- ndbout_c("ErrorNode %u", ref->getErrorNode());
-#endif
- m_waiter.signal(NO_WAIT);
+ const CreateEvntRef* const ref=
+ CAST_CONSTPTR(CreateEvntRef, signal->getDataPtr());
+ m_error.code= ref->getErrorCode();
+ DBUG_PRINT("error",("error=%d,line=%d,node=%d",ref->getErrorCode(),
+ ref->getErrorLine(),ref->getErrorNode()));
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execSUB_STOP_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "Got GSN_SUB_STOP_CONF" << endl;
-#endif
- // SubRemoveConf * const sumaRemoveConf = CAST_CONSTPTR(SubRemoveConf, signal->getDataPtr());
+ DBUG_ENTER("NdbDictInterface::execSUB_STOP_CONF");
+ const SubStopConf * const subStopConf=
+ CAST_CONSTPTR(SubStopConf, signal->getDataPtr());
- // Uint32 subscriptionId = sumaRemoveConf->subscriptionId;
- // Uint32 subscriptionKey = sumaRemoveConf->subscriptionKey;
- // Uint32 senderData = sumaRemoveConf->senderData;
+ Uint32 subscriptionId = subStopConf->subscriptionId;
+ Uint32 subscriptionKey = subStopConf->subscriptionKey;
+ Uint32 subscriberData = subStopConf->subscriberData;
+ DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d",
+ subscriptionId,subscriptionKey,subscriberData));
m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execSUB_STOP_REF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "Got GSN_SUB_STOP_REF" << endl;
-#endif
- // SubRemoveConf * const sumaRemoveRef = CAST_CONSTPTR(SubRemoveRef, signal->getDataPtr());
+ DBUG_ENTER("NdbDictInterface::execSUB_STOP_REF");
+ const SubStopRef * const subStopRef=
+ CAST_CONSTPTR(SubStopRef, signal->getDataPtr());
- // Uint32 subscriptionId = sumaRemoveRef->subscriptionId;
- // Uint32 subscriptionKey = sumaRemoveRef->subscriptionKey;
- // Uint32 senderData = sumaRemoveRef->senderData;
+ Uint32 subscriptionId = subStopRef->subscriptionId;
+ Uint32 subscriptionKey = subStopRef->subscriptionKey;
+ Uint32 subscriberData = subStopRef->subscriberData;
+ m_error.code= subStopRef->errorCode;
- m_error.code = 1;
+ DBUG_PRINT("error",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d,error=%d",
+ subscriptionId,subscriptionKey,subscriberData,m_error.code));
m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execSUB_START_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "Got GSN_SUB_START_CONF" << endl;
-#endif
- const SubStartConf * const sumaStartConf = CAST_CONSTPTR(SubStartConf, signal->getDataPtr());
+ DBUG_ENTER("NdbDictInterface::execSUB_START_CONF");
+ const SubStartConf * const subStartConf=
+ CAST_CONSTPTR(SubStartConf, signal->getDataPtr());
- // Uint32 subscriptionId = sumaStartConf->subscriptionId;
- // Uint32 subscriptionKey = sumaStartConf->subscriptionKey;
+ Uint32 subscriptionId = subStartConf->subscriptionId;
+ Uint32 subscriptionKey = subStartConf->subscriptionKey;
SubscriptionData::Part part =
- (SubscriptionData::Part)sumaStartConf->part;
- // Uint32 subscriberData = sumaStartConf->subscriberData;
+ (SubscriptionData::Part)subStartConf->part;
+ Uint32 subscriberData = subStartConf->subscriberData;
switch(part) {
case SubscriptionData::MetaData: {
-#ifdef EVENT_DEBUG
- ndbout << "SubscriptionData::MetaData" << endl;
-#endif
- m_error.code = 1;
+ DBUG_PRINT("error",("SubscriptionData::MetaData"));
+ m_error.code= 1;
break;
}
case SubscriptionData::TableData: {
-#ifdef EVENT_DEBUG
- ndbout << "SubscriptionData::TableData" << endl;
-#endif
+ DBUG_PRINT("info",("SubscriptionData::TableData"));
break;
}
default: {
-#ifdef EVENT_DEBUG
- ndbout_c("NdbDictInterface::execSUB_START_CONF wrong data");
-#endif
- m_error.code = 1;
+ DBUG_PRINT("error",("wrong data"));
+ m_error.code= 2;
break;
}
}
+ DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d",
+ subscriptionId,subscriptionKey,subscriberData));
m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execSUB_START_REF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "Got GSN_SUB_START_REF" << endl;
-#endif
- m_error.code = 1;
- m_waiter.signal(NO_WAIT);
+ DBUG_ENTER("NdbDictInterface::execSUB_START_REF");
+ const SubStartRef * const subStartRef=
+ CAST_CONSTPTR(SubStartRef, signal->getDataPtr());
+ m_error.code= subStartRef->errorCode;
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execSUB_GCP_COMPLETE_REP(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
- const SubGcpCompleteRep * const rep = CAST_CONSTPTR(SubGcpCompleteRep, signal->getDataPtr());
+ const SubGcpCompleteRep * const rep=
+ CAST_CONSTPTR(SubGcpCompleteRep, signal->getDataPtr());
const Uint32 gci = rep->gci;
// const Uint32 senderRef = rep->senderRef;
@@ -2733,7 +2857,8 @@ NdbDictInterface::execSUB_GCP_COMPLETE_REP(NdbApiSignal * signal,
const Uint32 ref = signal->theSendersBlockRef;
NdbApiSignal tSignal(m_reference);
- SubGcpCompleteAcc * acc = CAST_PTR(SubGcpCompleteAcc, tSignal.getDataPtrSend());
+ SubGcpCompleteAcc * acc=
+ CAST_PTR(SubGcpCompleteAcc, tSignal.getDataPtrSend());
acc->rep = *rep;
@@ -2791,9 +2916,9 @@ NdbDictInterface::execSUB_TABLE_DATA(NdbApiSignal * signal,
int
NdbDictionaryImpl::dropEvent(const char * eventName)
{
- NdbEventImpl *ev = new NdbEventImpl();
+ NdbEventImpl *ev= new NdbEventImpl();
ev->setName(eventName);
- int ret = m_receiver.dropEvent(*ev);
+ int ret= m_receiver.dropEvent(*ev);
delete ev;
// printf("__________________RET %u\n", ret);
@@ -2842,31 +2967,25 @@ void
NdbDictInterface::execDROP_EVNT_CONF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "NdbDictionaryImpl.cpp: execDROP_EVNT_CONF" << endl;
-#endif
-
+ DBUG_ENTER("NdbDictInterface::execDROP_EVNT_CONF");
m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
void
NdbDictInterface::execDROP_EVNT_REF(NdbApiSignal * signal,
LinearSectionPtr ptr[3])
{
-#ifdef EVENT_DEBUG
- ndbout << "NdbDictionaryImpl.cpp: execDROP_EVNT_REF" << endl;
-#endif
- const DropEvntRef* const ref = CAST_CONSTPTR(DropEvntRef, signal->getDataPtr());
- m_error.code = ref->getErrorCode();
+ DBUG_ENTER("NdbDictInterface::execDROP_EVNT_REF");
+ const DropEvntRef* const ref=
+ CAST_CONSTPTR(DropEvntRef, signal->getDataPtr());
+ m_error.code= ref->getErrorCode();
-#if 0
- ndbout_c("execDROP_EVNT_REF");
- ndbout_c("ErrorCode %u", ref->getErrorCode());
- ndbout_c("Errorline %u", ref->getErrorLine());
- ndbout_c("ErrorNode %u", ref->getErrorNode());
-#endif
+ DBUG_PRINT("info",("ErrorCode=%u Errorline=%u ErrorNode=%u",
+ ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode()));
- m_waiter.signal(NO_WAIT);
+ m_waiter.signal(NO_WAIT);
+ DBUG_VOID_RETURN;
}
/*****************************************************************
@@ -2931,7 +3050,7 @@ NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list,
}
if (! ok) {
// bad signal data
- m_error.code = 4213;
+ m_error.code= 4213;
return -1;
}
list.count = count;
@@ -2953,8 +3072,6 @@ NdbDictInterface::listObjects(NdbDictionary::Dictionary::List& list,
BaseString schemaName;
BaseString objectName;
if ((element.type == NdbDictionary::Object::UniqueHashIndex) ||
- (element.type == NdbDictionary::Object::HashIndex) ||
- (element.type == NdbDictionary::Object::UniqueOrderedIndex) ||
(element.type == NdbDictionary::Object::OrderedIndex)) {
char * indexName = new char[n << 2];
memcpy(indexName, &data[pos], n << 2);
@@ -2999,7 +3116,7 @@ NdbDictInterface::listObjects(NdbApiSignal* signal)
m_transporter->lock_mutex();
Uint16 aNodeId = m_transporter->get_an_alive_node();
if (aNodeId == 0) {
- m_error.code = 4009;
+ m_error.code= 4009;
m_transporter->unlock_mutex();
return -1;
}
@@ -3007,7 +3124,7 @@ NdbDictInterface::listObjects(NdbApiSignal* signal)
m_transporter->unlock_mutex();
continue;
}
- m_error.code = 0;
+ m_error.code= 0;
m_waiter.m_node = aNodeId;
m_waiter.m_state = WAIT_LIST_TABLES_CONF;
m_waiter.wait(WAITFOR_RESPONSE_TIMEOUT);
@@ -3036,6 +3153,7 @@ NdbDictInterface::execLIST_TABLES_CONF(NdbApiSignal* signal,
}
template class Vector<int>;
+template class Vector<Uint16>;
template class Vector<Uint32>;
template class Vector<Vector<Uint32> >;
template class Vector<NdbTableImpl*>;
diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
index 602a2d6b6ca..754d0000718 100644
--- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp
+++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp
@@ -19,7 +19,6 @@
#include <ndb_types.h>
#include <kernel_types.h>
-#include <ndb_limits.h>
#include <NdbError.hpp>
#include <BaseString.hpp>
#include <Vector.hpp>
@@ -63,12 +62,8 @@ public:
CHARSET_INFO * m_cs; // not const in MySQL
bool m_pk;
- bool m_tupleKey;
bool m_distributionKey;
- bool m_distributionGroup;
- int m_distributionGroupBits;
bool m_nullable;
- bool m_indexOnly;
bool m_autoIncrement;
Uint64 m_autoIncrementInitialValue;
BaseString m_defaultValue;
@@ -77,13 +72,13 @@ public:
/**
* Internal types and sizes, and aggregates
*/
- Uint32 m_attrType; // type outsize API and DICT
Uint32 m_attrSize; // element size (size when arraySize==1)
Uint32 m_arraySize; // length or length+2 for Var* types
Uint32 m_keyInfoPos;
- Uint32 m_extType; // used by restore (kernel type in versin v2x)
+ // TODO: use bits in attr desc 2
bool getInterpretableType() const ;
bool getCharType() const;
+ bool getStringType() const;
bool getBlobType() const;
/**
@@ -123,13 +118,20 @@ public:
Vector<Uint32> m_columnHash;
Vector<NdbColumnImpl *> m_columns;
void buildColumnHash();
-
+
+ /**
+ * Fragment info
+ */
+ Uint32 m_hashValueMask;
+ Uint32 m_hashpointerValue;
+ Vector<Uint16> m_fragments;
+
bool m_logging;
int m_kvalue;
int m_minLoadFactor;
int m_maxLoadFactor;
- int m_keyLenInWords;
- int m_fragmentCount;
+ Uint16 m_keyLenInWords;
+ Uint16 m_fragmentCount;
NdbDictionaryImpl * m_dictionary;
NdbIndexImpl * m_index;
@@ -147,21 +149,26 @@ public:
/**
* Aggregates
*/
- Uint32 m_noOfKeys;
- unsigned short m_sizeOfKeysInWords;
- unsigned short m_noOfBlobs;
+ Uint8 m_noOfKeys;
+ Uint8 m_noOfDistributionKeys;
+ Uint8 m_noOfBlobs;
+
+ Uint8 m_replicaCount;
/**
* Equality/assign
*/
bool equal(const NdbTableImpl&) const;
void assign(const NdbTableImpl&);
- void clearNewProperties();
- void copyNewProperties();
static NdbTableImpl & getImpl(NdbDictionary::Table & t);
static NdbTableImpl & getImpl(const NdbDictionary::Table & t);
NdbDictionary::Table * m_facade;
+
+ /**
+ * Return count
+ */
+ Uint32 get_nodes(Uint32 hashValue, const Uint16** nodes) const ;
};
class NdbIndexImpl : public NdbDictionary::Index, public NdbDictObjectImpl {
@@ -170,6 +177,7 @@ public:
NdbIndexImpl(NdbDictionary::Index &);
~NdbIndexImpl();
+ void init();
void setName(const char * name);
const char * getName() const;
void setTable(const char * table);
@@ -199,13 +207,17 @@ public:
NdbEventImpl(NdbDictionary::Event &);
~NdbEventImpl();
+ void init();
void setName(const char * name);
const char * getName() const;
+ void setTable(const NdbDictionary::Table& table);
void setTable(const char * table);
- const char * getTable() const;
+ const char * getTableName() const;
void addTableEvent(const NdbDictionary::Event::TableEvent t);
- void setDurability(const NdbDictionary::Event::EventDurability d);
+ void setDurability(NdbDictionary::Event::EventDurability d);
+ NdbDictionary::Event::EventDurability getDurability() const;
void addEventColumn(const NdbColumnImpl &c);
+ int getNoOfEventColumns() const;
void print() {
ndbout_c("NdbEventImpl: id=%d, key=%d",
@@ -296,8 +308,8 @@ public:
int listObjects(NdbDictionary::Dictionary::List& list, Uint32 requestData, bool fullyQualifiedNames);
int listObjects(NdbApiSignal* signal);
- NdbTableImpl * getTable(int tableId, bool fullyQualifiedNames);
- NdbTableImpl * getTable(const char * name, bool fullyQualifiedNames);
+/* NdbTableImpl * getTable(int tableId, bool fullyQualifiedNames); */
+ NdbTableImpl * getTable(const BaseString& name, bool fullyQualifiedNames);
NdbTableImpl * getTable(class NdbApiSignal * signal,
LinearSectionPtr ptr[3],
Uint32 noOfSections, bool fullyQualifiedNames);
@@ -307,8 +319,8 @@ public:
bool fullyQualifiedNames);
static int create_index_obj_from_table(NdbIndexImpl ** dst,
- const NdbTableImpl*,
- const NdbTableImpl*);
+ NdbTableImpl* index_table,
+ const NdbTableImpl* primary_table);
NdbError & m_error;
private:
@@ -365,7 +377,7 @@ public:
bool setTransporter(class Ndb * ndb, class TransporterFacade * tf);
bool setTransporter(class TransporterFacade * tf);
-
+
int createTable(NdbTableImpl &t);
int createBlobTables(NdbTableImpl &);
int addBlobTables(NdbTableImpl &);
@@ -391,13 +403,12 @@ public:
int listObjects(List& list, NdbDictionary::Object::Type type);
int listIndexes(List& list, Uint32 indexId);
-
+
NdbTableImpl * getTable(const char * tableName, void **data= 0);
- Ndb_local_table_info * get_local_table_info(const char * internalName,
- bool do_add_blob_tables);
+ Ndb_local_table_info* get_local_table_info(
+ const BaseString& internalTableName, bool do_add_blob_tables);
NdbIndexImpl * getIndex(const char * indexName,
const char * tableName);
- NdbIndexImpl * getIndexImpl(const char * name, const char * internalName);
NdbEventImpl * getEvent(const char * eventName);
NdbEventImpl * getEventImpl(const char * internalName);
@@ -415,7 +426,9 @@ public:
NdbDictInterface m_receiver;
Ndb & m_ndb;
private:
- Ndb_local_table_info * fetchGlobalTableImpl(const char * internalName);
+ NdbIndexImpl * getIndexImpl(const char * name,
+ const BaseString& internalName);
+ Ndb_local_table_info * fetchGlobalTableImpl(const BaseString& internalName);
};
inline
@@ -454,7 +467,19 @@ bool
NdbColumnImpl::getCharType() const {
return (m_type == NdbDictionary::Column::Char ||
m_type == NdbDictionary::Column::Varchar ||
- m_type == NdbDictionary::Column::Text);
+ m_type == NdbDictionary::Column::Text ||
+ m_type == NdbDictionary::Column::Longvarchar);
+}
+
+inline
+bool
+NdbColumnImpl::getStringType() const {
+ return (m_type == NdbDictionary::Column::Char ||
+ m_type == NdbDictionary::Column::Varchar ||
+ m_type == NdbDictionary::Column::Longvarchar ||
+ m_type == NdbDictionary::Column::Binary ||
+ m_type == NdbDictionary::Column::Varbinary ||
+ m_type == NdbDictionary::Column::Longvarbinary);
}
inline
@@ -535,7 +560,7 @@ NdbTableImpl::getColumn(const char * name){
do {
if(hashValue == (tmp & 0xFFFE)){
NdbColumnImpl* col = cols[tmp >> 16];
- if(strcmp(name, col->m_name.c_str()) == 0){
+ if(strncmp(name, col->m_name.c_str(), col->m_name.length()) == 0){
return col;
}
}
@@ -611,26 +636,27 @@ NdbDictionaryImpl::getImpl(const NdbDictionary::Dictionary & t){
*/
inline
-NdbTableImpl *
-NdbDictionaryImpl::getTable(const char * tableName, void **data)
+NdbTableImpl *
+NdbDictionaryImpl::getTable(const char * table_name, void **data)
{
+ const BaseString internal_tabname(m_ndb.internalize_table_name(table_name));
Ndb_local_table_info *info=
- get_local_table_info(m_ndb.internalizeTableName(tableName), true);
- if (info == 0) {
+ get_local_table_info(internal_tabname, true);
+ if (info == 0)
return 0;
- }
- if (data) {
+
+ if (data)
*data= info->m_local_data;
- }
+
return info->m_table_impl;
}
inline
Ndb_local_table_info *
-NdbDictionaryImpl::get_local_table_info(const char * internalTableName,
+NdbDictionaryImpl::get_local_table_info(const BaseString& internalTableName,
bool do_add_blob_tables)
{
- Ndb_local_table_info *info= m_localHash.get(internalTableName);
+ Ndb_local_table_info *info= m_localHash.get(internalTableName.c_str());
if (info == 0) {
info= fetchGlobalTableImpl(internalTableName);
if (info == 0) {
@@ -645,34 +671,35 @@ NdbDictionaryImpl::get_local_table_info(const char * internalTableName,
inline
NdbIndexImpl *
-NdbDictionaryImpl::getIndex(const char * indexName,
- const char * tableName)
+NdbDictionaryImpl::getIndex(const char * index_name,
+ const char * table_name)
{
- if (tableName || m_ndb.usingFullyQualifiedNames()) {
- const char * internalIndexName = 0;
- if (tableName) {
- NdbTableImpl * t = getTable(tableName);
- if (t != 0)
- internalIndexName = m_ndb.internalizeIndexName(t, indexName);
- } else {
- internalIndexName =
- m_ndb.internalizeTableName(indexName); // Index is also a table
- }
- if (internalIndexName) {
- Ndb_local_table_info * info = get_local_table_info(internalIndexName,
- false);
- if (info) {
- NdbTableImpl * tab = info->m_table_impl;
+ if (table_name || m_ndb.usingFullyQualifiedNames())
+ {
+ const BaseString internal_indexname(
+ (table_name)
+ ?
+ m_ndb.internalize_index_name(getTable(table_name), index_name)
+ :
+ m_ndb.internalize_table_name(index_name)); // Index is also a table
+
+ if (internal_indexname.length())
+ {
+ Ndb_local_table_info * info=
+ get_local_table_info(internal_indexname, false);
+ if (info)
+ {
+ NdbTableImpl * tab= info->m_table_impl;
if (tab->m_index == 0)
- tab->m_index = getIndexImpl(indexName, internalIndexName);
+ tab->m_index= getIndexImpl(index_name, internal_indexname);
if (tab->m_index != 0)
- tab->m_index->m_table = tab;
+ tab->m_index->m_table= tab;
return tab->m_index;
}
}
}
- m_error.code = 4243;
+ m_error.code= 4243;
return 0;
}
diff --git a/ndb/src/ndbapi/NdbEventOperation.cpp b/ndb/src/ndbapi/NdbEventOperation.cpp
index d209293f8b0..e99cad918c5 100644
--- a/ndb/src/ndbapi/NdbEventOperation.cpp
+++ b/ndb/src/ndbapi/NdbEventOperation.cpp
@@ -15,23 +15,9 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbEventOperation.cpp
- * Include:
- * Link:
- * Author: Tomas Ulin MySQL AB
- * Date: 2003-11-21
- * Version: 0.1
- * Description: Event support
- * Documentation:
- * Adjust: 2003-11-21 Tomas Ulin First version.
- ****************************************************************************/
-
#include <Ndb.hpp>
-#include <signaldata/SumaImpl.hpp>
+#include <NdbError.hpp>
#include <portlib/NdbMem.h>
-#include <transporter/TransporterDefinitions.hpp>
-#include <NdbEventOperation.hpp>
#include "NdbEventOperationImpl.hpp"
#include "NdbDictionaryImpl.hpp"
@@ -123,3 +109,7 @@ NdbEventOperation::wait(void *p, int aMillisecondNumber)
NdbEventOperation::NdbEventOperation(NdbEventOperationImpl& impl)
: m_impl(impl) {}
+const struct NdbError &
+NdbEventOperation::getNdbError() const {
+ return m_impl.getNdbError();
+}
diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
index 87bbca5fc71..208525bfc15 100644
--- a/ndb/src/ndbapi/NdbEventOperationImpl.cpp
+++ b/ndb/src/ndbapi/NdbEventOperationImpl.cpp
@@ -55,14 +55,17 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
const char* eventName,
const int bufferLength)
: NdbEventOperation(*this), m_ndb(theNdb),
- m_state(ERROR), m_bufferL(bufferLength)
+ m_state(EO_ERROR), m_bufferL(bufferLength)
{
-
m_eventId = 0;
- theFirstRecAttrs[0] = NULL;
- theCurrentRecAttrs[0] = NULL;
- theFirstRecAttrs[1] = NULL;
- theCurrentRecAttrs[1] = NULL;
+ theFirstPkAttrs[0] = NULL;
+ theCurrentPkAttrs[0] = NULL;
+ theFirstPkAttrs[1] = NULL;
+ theCurrentPkAttrs[1] = NULL;
+ theFirstDataAttrs[0] = NULL;
+ theCurrentDataAttrs[0] = NULL;
+ theFirstDataAttrs[1] = NULL;
+ theCurrentDataAttrs[1] = NULL;
sdata = NULL;
ptr[0].p = NULL;
ptr[1].p = NULL;
@@ -71,16 +74,17 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
// we should lookup id in Dictionary, TODO
// also make sure we only have one listener on each event
- if (!m_ndb) { ndbout_c("m_ndb=NULL"); return; }
+ if (!m_ndb) abort();
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
- if (!myDict) { ndbout_c("getDictionary=NULL"); return; }
+ if (!myDict) { m_error.code= m_ndb->getNdbError().code; return; }
const NdbDictionary::Event *myEvnt = myDict->getEvent(eventName);
- if (!myEvnt) { ndbout_c("getEvent()=NULL"); return; }
+ if (!myEvnt) { m_error.code= myDict->getNdbError().code; return; }
m_eventImpl = &myEvnt->m_impl;
- if (!m_eventImpl) { ndbout_c("m_impl=NULL"); return; }
+
+ m_eventId = m_eventImpl->m_eventId;
m_bufferHandle = m_ndb->getGlobalEventBufferHandle();
if (m_bufferHandle->m_bufferL > 0)
@@ -88,25 +92,30 @@ NdbEventOperationImpl::NdbEventOperationImpl(NdbEventOperation &N,
else
m_bufferHandle->m_bufferL = m_bufferL;
- m_state = CREATED;
+ m_state = EO_CREATED;
}
NdbEventOperationImpl::~NdbEventOperationImpl()
{
int i;
- if (sdata) NdbMem_Free(sdata);
- for (i=0 ; i<3; i++) {
- if (ptr[i].p) NdbMem_Free(ptr[i].p);
+ if (sdata) NdbMem_Free((char*)sdata);
+ for (i=0 ; i<2; i++) {
+ NdbRecAttr *p = theFirstPkAttrs[i];
+ while (p) {
+ NdbRecAttr *p_next = p->next();
+ m_ndb->releaseRecAttr(p);
+ p = p_next;
+ }
}
for (i=0 ; i<2; i++) {
- NdbRecAttr *p = theFirstRecAttrs[i];
+ NdbRecAttr *p = theFirstDataAttrs[i];
while (p) {
NdbRecAttr *p_next = p->next();
m_ndb->releaseRecAttr(p);
p = p_next;
}
}
- if (m_state == NdbEventOperation::EXECUTING) {
+ if (m_state == EO_EXECUTING) {
stop();
// m_bufferHandle->dropSubscribeEvent(m_bufferId);
; // We should send stop signal here
@@ -122,36 +131,50 @@ NdbEventOperationImpl::getState()
NdbRecAttr*
NdbEventOperationImpl::getValue(const char *colName, char *aValue, int n)
{
- if (m_state != NdbEventOperation::CREATED) {
+ DBUG_ENTER("NdbEventOperationImpl::getValue");
+ if (m_state != EO_CREATED) {
ndbout_c("NdbEventOperationImpl::getValue may only be called between instantiation and execute()");
- return NULL;
+ DBUG_RETURN(NULL);
}
NdbColumnImpl *tAttrInfo = m_eventImpl->m_tableImpl->getColumn(colName);
if (tAttrInfo == NULL) {
ndbout_c("NdbEventOperationImpl::getValue attribute %s not found",colName);
- return NULL;
+ DBUG_RETURN(NULL);
}
- return NdbEventOperationImpl::getValue(tAttrInfo, aValue, n);
+ DBUG_RETURN(NdbEventOperationImpl::getValue(tAttrInfo, aValue, n));
}
NdbRecAttr*
NdbEventOperationImpl::getValue(const NdbColumnImpl *tAttrInfo, char *aValue, int n)
{
+ DBUG_ENTER("NdbEventOperationImpl::getValue");
// Insert Attribute Id into ATTRINFO part.
- NdbRecAttr *&theFirstRecAttr = theFirstRecAttrs[n];
- NdbRecAttr *&theCurrentRecAttr = theCurrentRecAttrs[n];
-
+
+ NdbRecAttr **theFirstAttr;
+ NdbRecAttr **theCurrentAttr;
+
+ if (tAttrInfo->getPrimaryKey())
+ {
+ theFirstAttr = &theFirstPkAttrs[n];
+ theCurrentAttr = &theCurrentPkAttrs[n];
+ }
+ else
+ {
+ theFirstAttr = &theFirstDataAttrs[n];
+ theCurrentAttr = &theCurrentDataAttrs[n];
+ }
+
/************************************************************************
* Get a Receive Attribute object and link it into the operation object.
************************************************************************/
- NdbRecAttr *tRecAttr = m_ndb->getRecAttr();
- if (tRecAttr == NULL) {
+ NdbRecAttr *tAttr = m_ndb->getRecAttr();
+ if (tAttr == NULL) {
exit(-1);
//setErrorCodeAbort(4000);
- return NULL;
+ DBUG_RETURN(NULL);
}
/**********************************************************************
@@ -159,63 +182,65 @@ NdbEventOperationImpl::getValue(const NdbColumnImpl *tAttrInfo, char *aValue, in
* the RecAttr object
* Also set attribute size, array size and attribute type
********************************************************************/
- if (tRecAttr->setup(tAttrInfo, aValue)) {
+ if (tAttr->setup(tAttrInfo, aValue)) {
//setErrorCodeAbort(4000);
- m_ndb->releaseRecAttr(tRecAttr);
+ m_ndb->releaseRecAttr(tAttr);
exit(-1);
- return NULL;
+ DBUG_RETURN(NULL);
}
//theErrorLine++;
- tRecAttr->setNULL();
+ tAttr->setUNDEFINED();
// We want to keep the list sorted to make data insertion easier later
- if (theFirstRecAttr == NULL) {
- theFirstRecAttr = tRecAttr;
- theCurrentRecAttr = tRecAttr;
- tRecAttr->next(NULL);
+
+ if (*theFirstAttr == NULL) {
+ *theFirstAttr = tAttr;
+ *theCurrentAttr = tAttr;
+ tAttr->next(NULL);
} else {
Uint32 tAttrId = tAttrInfo->m_attrId;
- if (tAttrId > theCurrentRecAttr->attrId()) { // right order
- theCurrentRecAttr->next(tRecAttr);
- tRecAttr->next(NULL);
- theCurrentRecAttr = tRecAttr;
- } else if (theFirstRecAttr->next() == NULL || // only one in list
- theFirstRecAttr->attrId() > tAttrId) {// or first
- tRecAttr->next(theFirstRecAttr);
- theFirstRecAttr = tRecAttr;
+ if (tAttrId > (*theCurrentAttr)->attrId()) { // right order
+ (*theCurrentAttr)->next(tAttr);
+ tAttr->next(NULL);
+ *theCurrentAttr = tAttr;
+ } else if ((*theFirstAttr)->next() == NULL || // only one in list
+ (*theFirstAttr)->attrId() > tAttrId) {// or first
+ tAttr->next(*theFirstAttr);
+ *theFirstAttr = tAttr;
} else { // at least 2 in list and not first and not last
- NdbRecAttr *p = theFirstRecAttr;
+ NdbRecAttr *p = *theFirstAttr;
NdbRecAttr *p_next = p->next();
while (tAttrId > p_next->attrId()) {
p = p_next;
p_next = p->next();
}
if (tAttrId == p_next->attrId()) { // Using same attribute twice
- tRecAttr->release(); // do I need to do this?
- m_ndb->releaseRecAttr(tRecAttr);
+ tAttr->release(); // do I need to do this?
+ m_ndb->releaseRecAttr(tAttr);
exit(-1);
- return NULL;
+ DBUG_RETURN(NULL);
}
// this is it, between p and p_next
- p->next(tRecAttr);
- tRecAttr->next(p_next);
+ p->next(tAttr);
+ tAttr->next(p_next);
}
}
-
- return tRecAttr;
+ DBUG_RETURN(tAttr);
}
int
NdbEventOperationImpl::execute()
{
+ DBUG_ENTER("NdbEventOperationImpl::execute");
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
if (!myDict) {
- ndbout_c("NdbEventOperation::execute(): getDictionary=NULL");
- return 0;
+ m_error.code= m_ndb->getNdbError().code;
+ DBUG_RETURN(-1);
}
- if (theFirstRecAttrs[0] == NULL) { // defaults to get all
+ if (theFirstPkAttrs[0] == NULL &&
+ theFirstDataAttrs[0] == NULL) { // defaults to get all
}
@@ -223,13 +248,18 @@ NdbEventOperationImpl::execute()
int hasSubscriber;
- m_bufferId =
- m_bufferHandle->prepareAddSubscribeEvent(m_eventImpl->m_eventId,
- hasSubscriber /* return value */);
+ int r= m_bufferHandle->prepareAddSubscribeEvent(this,
+ hasSubscriber /*return value*/);
+ m_error.code= 4709;
+
+ if (r < 0)
+ {
+ DBUG_RETURN(-1);
+ }
- m_eventImpl->m_bufferId = m_bufferId;
+ m_eventImpl->m_bufferId = m_bufferId = (Uint32)r;
- int r = -1;
+ r = -1;
if (m_bufferId >= 0) {
// now we check if there's already a subscriber
@@ -241,30 +271,33 @@ NdbEventOperationImpl::execute()
if (r) {
//Error
m_bufferHandle->unprepareAddSubscribeEvent(m_bufferId);
- m_state = NdbEventOperation::ERROR;
+ m_state = EO_ERROR;
} else {
m_bufferHandle->addSubscribeEvent(m_bufferId, this);
- m_state = NdbEventOperation::EXECUTING;
+ m_state = EO_EXECUTING;
}
} else {
//Error
- m_state = NdbEventOperation::ERROR;
+ m_state = EO_ERROR;
}
- return r;
+ DBUG_RETURN(r);
}
int
NdbEventOperationImpl::stop()
{
- if (m_state != NdbEventOperation::EXECUTING)
- return -1;
+ DBUG_ENTER("NdbEventOperationImpl::stop");
+ if (m_state != EO_EXECUTING)
+ {
+ DBUG_RETURN(-1);
+ }
// ndbout_c("NdbEventOperation::stopping()");
NdbDictionary::Dictionary *myDict = m_ndb->getDictionary();
if (!myDict) {
- ndbout_c("NdbEventOperation::stop(): getDictionary=NULL");
- return 0;
+ m_error.code= m_ndb->getNdbError().code;
+ DBUG_RETURN(-1);
}
NdbDictionaryImpl & myDictImpl = NdbDictionaryImpl::getImpl(*myDict);
@@ -275,8 +308,8 @@ NdbEventOperationImpl::stop()
hasSubscriber /* return value */);
if (ret < 0) {
- ndbout_c("prepareDropSubscribeEvent failed");
- return -1;
+ m_error.code= 4712;
+ DBUG_RETURN(-1);
}
// m_eventImpl->m_bufferId = m_bufferId;
@@ -293,17 +326,17 @@ NdbEventOperationImpl::stop()
if (r) {
//Error
m_bufferHandle->unprepareDropSubscribeEvent(m_bufferId);
- m_state = NdbEventOperation::ERROR;
+ m_error.code= myDictImpl.m_error.code;
+ m_state = EO_ERROR;
} else {
#ifdef EVENT_DEBUG
ndbout_c("NdbEventOperation::dropping()");
#endif
m_bufferHandle->dropSubscribeEvent(m_bufferId);
- m_state = NdbEventOperation::CREATED;
+ m_state = EO_CREATED;
}
-
- return r;
+ DBUG_RETURN(r);
}
bool
@@ -327,6 +360,7 @@ NdbEventOperationImpl::getLatestGCI()
int
NdbEventOperationImpl::next(int *pOverrun)
{
+ DBUG_ENTER("NdbEventOperationImpl::next");
int nr = 10000; // a high value
int tmpOverrun = 0;
int *ptmpOverrun;
@@ -343,7 +377,10 @@ NdbEventOperationImpl::next(int *pOverrun)
*pOverrun = tmpOverrun;
}
- if (r <= 0) return r; // no data
+ if (r <= 0)
+ {
+ DBUG_RETURN(r); // no data
+ }
if (r < nr) r = nr; else nr--; // we don't want to be stuck here forever
@@ -352,8 +389,13 @@ NdbEventOperationImpl::next(int *pOverrun)
#endif
// now move the data into the RecAttrs
- if ((theFirstRecAttrs[0] == NULL) &&
- (theFirstRecAttrs[1] == NULL)) return r;
+ if ((theFirstPkAttrs[0] == NULL) &&
+ (theFirstPkAttrs[1] == NULL) &&
+ (theFirstDataAttrs[0] == NULL) &&
+ (theFirstDataAttrs[1] == NULL))
+ {
+ DBUG_RETURN(r);
+ }
// no copying since no RecAttr's
@@ -364,20 +406,37 @@ NdbEventOperationImpl::next(int *pOverrun)
#ifdef EVENT_DEBUG
int i;
printf("after values sz=%u\n", ptr[1].sz);
- for (i=0; i < ptr[1].sz; i++)
+ for(i=0; i < (int)ptr[1].sz; i++)
printf ("H'%.8X ",ptr[1].p[i]);
printf("\n");
printf("before values sz=%u\n", ptr[2].sz);
- for (i=0; i < ptr[2].sz; i++)
+ for(i=0; i < (int)ptr[2].sz; i++)
printf ("H'%.8X ",ptr[2].p[i]);
printf("\n");
#endif
- NdbRecAttr *tWorkingRecAttr = theFirstRecAttrs[0];
-
// copy data into the RecAttr's
// we assume that the respective attribute lists are sorted
+ // first the pk's
+ {
+ NdbRecAttr *tAttr= theFirstPkAttrs[0];
+ while(tAttr)
+ {
+ assert(aAttrPtr < aAttrEndPtr);
+ unsigned tDataSz= AttributeHeader(*aAttrPtr).getDataSize();
+ assert(tAttr->attrId() ==
+ AttributeHeader(*aAttrPtr).getAttributeId());
+ assert(tAttr->receive_data(aDataPtr, tDataSz));
+ // next
+ aAttrPtr++;
+ aDataPtr+= tDataSz;
+ tAttr= tAttr->next();
+ }
+ }
+
+ NdbRecAttr *tWorkingRecAttr = theFirstDataAttrs[0];
+
Uint32 tRecAttrId;
Uint32 tAttrId;
Uint32 tDataSz;
@@ -389,7 +448,7 @@ NdbEventOperationImpl::next(int *pOverrun)
while (tAttrId > tRecAttrId) {
//printf("[%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
- tWorkingRecAttr->setNULL();
+ tWorkingRecAttr->setUNDEFINED();
tWorkingRecAttr = tWorkingRecAttr->next();
if (tWorkingRecAttr == NULL)
break;
@@ -401,32 +460,25 @@ NdbEventOperationImpl::next(int *pOverrun)
//printf("[%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
if (tAttrId == tRecAttrId) {
- if (!m_eventImpl->m_tableImpl->getColumn(tRecAttrId)->getPrimaryKey())
- hasSomeData++;
+ hasSomeData++;
//printf("set!\n");
- tWorkingRecAttr->receive_data(aDataPtr, tDataSz);
-
- // move forward, data has already moved forward
- aAttrPtr++;
- aDataPtr += tDataSz;
+ assert(tWorkingRecAttr->receive_data(aDataPtr, tDataSz));
tWorkingRecAttr = tWorkingRecAttr->next();
- } else {
- // move only attr forward
- aAttrPtr++;
- aDataPtr += tDataSz;
}
+ aAttrPtr++;
+ aDataPtr += tDataSz;
}
while (tWorkingRecAttr != NULL) {
tRecAttrId = tWorkingRecAttr->attrId();
//printf("set undefined [%u] %u %u [%u]\n", tAttrId, tDataSz, *aDataPtr, tRecAttrId);
- tWorkingRecAttr->setNULL();
+ tWorkingRecAttr->setUNDEFINED();
tWorkingRecAttr = tWorkingRecAttr->next();
}
- tWorkingRecAttr = theFirstRecAttrs[1];
+ tWorkingRecAttr = theFirstDataAttrs[1];
aDataPtr = ptr[2].p;
Uint32 *aDataEndPtr = aDataPtr + ptr[2].sz;
while ((aDataPtr < aDataEndPtr) && (tWorkingRecAttr != NULL)) {
@@ -435,7 +487,7 @@ NdbEventOperationImpl::next(int *pOverrun)
tDataSz = AttributeHeader(*aDataPtr).getDataSize();
aDataPtr++;
while (tAttrId > tRecAttrId) {
- tWorkingRecAttr->setNULL();
+ tWorkingRecAttr->setUNDEFINED();
tWorkingRecAttr = tWorkingRecAttr->next();
if (tWorkingRecAttr == NULL)
break;
@@ -444,27 +496,25 @@ NdbEventOperationImpl::next(int *pOverrun)
if (tWorkingRecAttr == NULL)
break;
if (tAttrId == tRecAttrId) {
- if (!m_eventImpl->m_tableImpl->getColumn(tRecAttrId)->getPrimaryKey())
- hasSomeData++;
+ assert(!m_eventImpl->m_tableImpl->getColumn(tRecAttrId)->getPrimaryKey());
+ hasSomeData++;
- tWorkingRecAttr->receive_data(aDataPtr, tDataSz);
- aDataPtr += tDataSz;
- // move forward, data+attr has already moved forward
+ assert(tWorkingRecAttr->receive_data(aDataPtr, tDataSz));
tWorkingRecAttr = tWorkingRecAttr->next();
- } else {
- // move only data+attr forward
- aDataPtr += tDataSz;
}
+ aDataPtr += tDataSz;
}
while (tWorkingRecAttr != NULL) {
- tWorkingRecAttr->setNULL();
+ tWorkingRecAttr->setUNDEFINED();
tWorkingRecAttr = tWorkingRecAttr->next();
}
if (hasSomeData)
- return r;
+ {
+ DBUG_RETURN(r);
+ }
}
- return 0;
+ DBUG_RETURN(0);
}
NdbDictionary::Event::TableEvent
@@ -487,10 +537,20 @@ NdbEventOperationImpl::getEventType()
void
NdbEventOperationImpl::print()
{
+ int i;
ndbout << "EventId " << m_eventId << "\n";
- for (int i = 0; i < 2; i++) {
- NdbRecAttr *p = theFirstRecAttrs[i];
+ for (i = 0; i < 2; i++) {
+ NdbRecAttr *p = theFirstPkAttrs[i];
+ ndbout << " %u " << i;
+ while (p) {
+ ndbout << " : " << p->attrId() << " = " << *p;
+ p = p->next();
+ }
+ ndbout << "\n";
+ }
+ for (i = 0; i < 2; i++) {
+ NdbRecAttr *p = theFirstDataAttrs[i];
ndbout << " %u " << i;
while (p) {
ndbout << " : " << p->attrId() << " = " << *p;
@@ -639,23 +699,28 @@ NdbGlobalEventBufferHandle::~NdbGlobalEventBufferHandle()
void
NdbGlobalEventBufferHandle::addBufferId(int bufferId)
{
+ DBUG_ENTER("NdbGlobalEventBufferHandle::addBufferId");
+ DBUG_PRINT("enter",("bufferId=%d",bufferId));
if (m_nids >= NDB_MAX_ACTIVE_EVENTS) {
ndbout_c("NdbGlobalEventBufferHandle::addBufferId error in paramerer setting");
exit(-1);
}
m_bufferIds[m_nids] = bufferId;
m_nids++;
+ DBUG_VOID_RETURN;
}
void
NdbGlobalEventBufferHandle::dropBufferId(int bufferId)
{
+ DBUG_ENTER("NdbGlobalEventBufferHandle::dropBufferId");
+ DBUG_PRINT("enter",("bufferId=%d",bufferId));
for (int i = 0; i < m_nids; i++)
if (m_bufferIds[i] == bufferId) {
m_nids--;
for (; i < m_nids; i++)
m_bufferIds[i] = m_bufferIds[i+1];
- return;
+ DBUG_VOID_RETURN;
}
ndbout_c("NdbGlobalEventBufferHandle::dropBufferId %d does not exist",
bufferId);
@@ -674,10 +739,11 @@ NdbGlobalEventBufferHandle::drop(NdbGlobalEventBufferHandle *handle)
}
*/
int
-NdbGlobalEventBufferHandle::prepareAddSubscribeEvent(Uint32 eventId,
- int& hasSubscriber)
+NdbGlobalEventBufferHandle::prepareAddSubscribeEvent
+(NdbEventOperationImpl *eventOp, int& hasSubscriber)
{
- ADD_DROP_LOCK_GUARDR(int,real_prepareAddSubscribeEvent(this, eventId, hasSubscriber));
+ ADD_DROP_LOCK_GUARDR(int,real_prepareAddSubscribeEvent(this, eventOp,
+ hasSubscriber));
}
void
NdbGlobalEventBufferHandle::addSubscribeEvent
@@ -830,57 +896,68 @@ NdbGlobalEventBuffer::~NdbGlobalEventBuffer()
// NdbMem_Deallocate(m_eventBufferIdToEventId);
}
void
-NdbGlobalEventBuffer::real_init (NdbGlobalEventBufferHandle *h,
+NdbGlobalEventBuffer::real_init (NdbGlobalEventBufferHandle *h,
int MAX_NUMBER_ACTIVE_EVENTS)
{
- if (m_handlers.size() == 0) { // First init
+ DBUG_ENTER("NdbGlobalEventBuffer::real_init");
+ DBUG_PRINT("enter",("m_handles.size()=%u %u", m_handlers.size(), h));
+ if (m_handlers.size() == 0)
+ { // First init
+ DBUG_PRINT("info",("first to come"));
m_max = MAX_NUMBER_ACTIVE_EVENTS;
m_buf = new BufItem[m_max];
- // (BufItem *)NdbMem_Allocate(m_max*sizeof(BufItem));
-
for (int i=0; i<m_max; i++) {
- m_buf[i].gId = 0;
+ m_buf[i].gId= 0;
}
}
+ assert(m_max == MAX_NUMBER_ACTIVE_EVENTS);
// TODO make sure we don't hit roof
- // m_handlers[m_nhandlers] = h;
m_handlers.push_back(h);
- // ndbout_c("NdbGlobalEventBuffer::real_init(), m_handles=%u %u", m_nhandlers, h);
+ DBUG_VOID_RETURN;
}
void
NdbGlobalEventBuffer::real_remove(NdbGlobalEventBufferHandle *h)
{
- // ndbout_c("NdbGlobalEventBuffer::real_init_remove(), m_handles=%u %u", m_nhandlers, h);
- for (Uint32 i=0 ; i < m_handlers.size(); i++) {
- // ndbout_c("%u %u %u", i, m_handlers[i], h);
- if (m_handlers[i] == h) {
+ DBUG_ENTER("NdbGlobalEventBuffer::real_remove");
+ DBUG_PRINT("enter",("m_handles.size()=%u %u", m_handlers.size(), h));
+ for (Uint32 i=0 ; i < m_handlers.size(); i++)
+ {
+ DBUG_PRINT("info",("m_handlers[%u] %u", i, m_handlers[i]));
+ if (m_handlers[i] == h)
+ {
m_handlers.erase(i);
- if (m_handlers.size() == 0) {
- // ndbout_c("last to go");
+ if (m_handlers.size() == 0)
+ {
+ DBUG_PRINT("info",("last to go"));
delete[] m_buf;
m_buf = NULL;
- // NdbMem_Free((char*)m_buf);
}
- return;
+ DBUG_VOID_RETURN;
}
}
- ndbout_c("NdbGlobalEventBuffer::real_init_remove() non-existing handle");
- exit(-1);
+ ndbout_c("NdbGlobalEventBuffer::real_remove() non-existing handle");
+ DBUG_PRINT("error",("non-existing handle"));
+ abort();
+ DBUG_VOID_RETURN;
}
-int
+int
NdbGlobalEventBuffer::real_prepareAddSubscribeEvent
-(NdbGlobalEventBufferHandle *aHandle, Uint32 eventId, int& hasSubscriber)
+(NdbGlobalEventBufferHandle *aHandle, NdbEventOperationImpl *eventOp,
+ int& hasSubscriber)
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_prepareAddSubscribeEvent");
int i;
- int bufferId = -1;
+ int bufferId= -1;
+ Uint32 eventId= eventOp->m_eventId;
+ DBUG_PRINT("enter",("eventId: %u", eventId));
// add_drop_lock(); // only one thread can do add or drop at a time
// Find place where eventId already set
for (i=0; i<m_no; i++) {
if (m_buf[i].gId == eventId) {
- bufferId = i;
+ bufferId= i;
break;
}
}
@@ -888,53 +965,55 @@ NdbGlobalEventBuffer::real_prepareAddSubscribeEvent
// find space for new bufferId
for (i=0; i<m_no; i++) {
if (m_buf[i].gId == 0) {
- bufferId = i; // we found an empty spot
- break;
+ bufferId= i; // we found an empty spot
+ goto found_bufferId;
}
}
if (bufferId < 0 &&
m_no < m_max) {
// room for more so get that
- bufferId=m_no;
- m_buf[m_no].gId = 0;
+ bufferId= m_no;
+ m_buf[m_no].gId= 0;
m_no++;
} else {
- ndbout_c("prepareAddSubscribeEvent: Can't accept more subscribers");
- // add_drop_unlock();
- return -1;
+ // add_drop_unlock();
+ DBUG_PRINT("error",("Can't accept more subscribers:"
+ " bufferId=%d, m_no=%d, m_max=%d",
+ bufferId, m_no, m_max));
+ DBUG_RETURN(-1);
}
}
+found_bufferId:
- BufItem &b = m_buf[ID(bufferId)];
+ BufItem &b= m_buf[ID(bufferId)];
if (b.gId == 0) { // first subscriber needs some initialization
- bufferId = NO_ID(0, bufferId);
+ bufferId= NO_ID(0, bufferId);
- b.gId = eventId;
+ b.gId= eventId;
+ b.eventType= (Uint32)eventOp->m_eventImpl->mi_type;
- if ((b.p_buf_mutex = NdbMutex_Create()) == NULL) {
+ if ((b.p_buf_mutex= NdbMutex_Create()) == NULL) {
ndbout_c("NdbGlobalEventBuffer: NdbMutex_Create() failed");
- exit(-1);
+ abort();
}
- b.subs = 0;
- b.f = 0;
- b.sz = 0;
- b.max_sz = aHandle->m_bufferL;
- b.data =
+ b.subs= 0;
+ b.f= 0;
+ b.sz= 0;
+ b.max_sz= aHandle->m_bufferL;
+ b.data=
(BufItem::Data *)NdbMem_Allocate(b.max_sz*sizeof(BufItem::Data));
for (int i = 0; i < b.max_sz; i++) {
- b.data[i].sdata = NULL;
- b.data[i].ptr[0].p = NULL;
- b.data[i].ptr[1].p = NULL;
- b.data[i].ptr[2].p = NULL;
+ b.data[i].sdata= NULL;
+ b.data[i].ptr[0].p= NULL;
+ b.data[i].ptr[1].p= NULL;
+ b.data[i].ptr[2].p= NULL;
}
} else {
-#ifdef EVENT_DEBUG
- ndbout_c("NdbGlobalEventBuffer::prepareAddSubscribeEvent: TRYING handle one subscriber per event b.subs = %u", b.subs);
-#endif
-
+ DBUG_PRINT("info",
+ ("TRYING handle one subscriber per event b.subs=%u",b.subs));
int ni = -1;
for(int i=0; i < b.subs;i++) {
if (b.ps[i].theHandle == NULL) {
@@ -946,9 +1025,10 @@ NdbGlobalEventBuffer::real_prepareAddSubscribeEvent
if (b.subs < MAX_SUBSCRIBERS_PER_EVENT) {
ni = b.subs;
} else {
- ndbout_c("prepareAddSubscribeEvent: Can't accept more subscribers");
+ DBUG_PRINT("error",
+ ("Can't accept more subscribers: b.subs=%d",b.subs));
// add_drop_unlock();
- return -1;
+ DBUG_RETURN(-1);
}
}
bufferId = NO_ID(ni, bufferId);
@@ -969,23 +1049,25 @@ NdbGlobalEventBuffer::real_prepareAddSubscribeEvent
else
hasSubscriber = 0;
-#ifdef EVENT_DEBUG
- ndbout_c("prepareAddSubscribeEvent: handed out bufferId %d for eventId %d",
- bufferId, eventId);
-#endif
+ DBUG_PRINT("info",("handed out bufferId=%d for eventId=%d hasSubscriber=%d",
+ bufferId, eventId, hasSubscriber));
/* we now have a lock on the prepare so that no one can mess with this
* unlock comes in unprepareAddSubscribeEvent or addSubscribeEvent
*/
- return bufferId;
+ DBUG_RETURN(bufferId);
}
void
NdbGlobalEventBuffer::real_unprepareAddSubscribeEvent(int bufferId)
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_unprepareAddSubscribeEvent");
BufItem &b = m_buf[ID(bufferId)];
int n = NO(bufferId);
+ DBUG_PRINT("enter", ("bufferId=%d,ID(bufferId)=%d,NO(bufferId)=%d",
+ bufferId, ID(bufferId), NO(bufferId)));
+
b.ps[n].theHandle = NULL;
// remove subscribers from the end,
@@ -998,10 +1080,8 @@ NdbGlobalEventBuffer::real_unprepareAddSubscribeEvent(int bufferId)
break;
if (b.subs == 0) {
-#ifdef EVENT_DEBUG
- ndbout_c("unprepareAddSubscribeEvent: no more subscribers left on eventId %d", b.gId);
-#endif
- b.gId = 0; // We don't have any subscribers, reuse BufItem
+ DBUG_PRINT("info",("no more subscribers left on eventId %d", b.gId));
+ b.gId= 0; // We don't have any subscribers, reuse BufItem
if (b.data) {
NdbMem_Free((void *)b.data);
b.data = NULL;
@@ -1012,12 +1092,14 @@ NdbGlobalEventBuffer::real_unprepareAddSubscribeEvent(int bufferId)
}
}
// add_drop_unlock();
+ DBUG_VOID_RETURN;
}
void
NdbGlobalEventBuffer::real_addSubscribeEvent(int bufferId,
void *ndbEventOperation)
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_addSubscribeEvent");
BufItem &b = m_buf[ID(bufferId)];
int n = NO(bufferId);
@@ -1025,9 +1107,8 @@ NdbGlobalEventBuffer::real_addSubscribeEvent(int bufferId,
b.ps[n].theHandle->addBufferId(bufferId);
// add_drop_unlock();
-#ifdef EVENT_DEBUG
- ndbout_c("addSubscribeEvent:: added bufferId %d", bufferId);
-#endif
+ DBUG_PRINT("info",("added bufferId %d", bufferId));
+ DBUG_VOID_RETURN;
}
void
@@ -1040,6 +1121,7 @@ int
NdbGlobalEventBuffer::real_prepareDropSubscribeEvent(int bufferId,
int& hasSubscriber)
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_prepareDropSubscribeEvent");
// add_drop_lock(); // only one thread can do add or drop at a time
BufItem &b = m_buf[ID(bufferId)];
@@ -1055,14 +1137,17 @@ NdbGlobalEventBuffer::real_prepareDropSubscribeEvent(int bufferId,
else if (n == 1)
hasSubscriber = 0;
else
- return -1;
+ {
+ DBUG_RETURN(-1);
+ }
- return 0;
+ DBUG_RETURN(0);
}
void
NdbGlobalEventBuffer::real_dropSubscribeEvent(int bufferId)
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_dropSubscribeEvent");
// add_drop_lock(); // only one thread can do add-drop at a time
BufItem &b = m_buf[ID(bufferId)];
@@ -1078,6 +1163,7 @@ NdbGlobalEventBuffer::real_dropSubscribeEvent(int bufferId)
#ifdef EVENT_DEBUG
ndbout_c("dropSubscribeEvent:: dropped bufferId %d", bufferId);
#endif
+ DBUG_VOID_RETURN;
}
void
@@ -1100,10 +1186,13 @@ NdbGlobalEventBuffer::real_insertDataL(int bufferId,
const SubTableData * const sdata,
LinearSectionPtr ptr[3])
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_insertDataL");
BufItem &b = m_buf[ID(bufferId)];
#ifdef EVENT_DEBUG
int n = NO(bufferId);
#endif
+
+ if ( b.eventType & (1 << (Uint32)sdata->operation) )
{
if (b.subs) {
#ifdef EVENT_DEBUG
@@ -1112,7 +1201,9 @@ NdbGlobalEventBuffer::real_insertDataL(int bufferId,
// move front forward
if (copy_data_alloc(sdata, ptr,
b.data[b.f].sdata, b.data[b.f].ptr))
- return -1;
+ {
+ DBUG_RETURN(-1);
+ }
for (int i=0; i < b.subs; i++) {
NdbGlobalEventBuffer::BufItem::Ps &e = b.ps[i];
if (e.theHandle) { // active subscriber
@@ -1120,7 +1211,7 @@ NdbGlobalEventBuffer::real_insertDataL(int bufferId,
if (e.bufferempty == 0) {
e.overrun++; // another item has been overwritten
e.b++; // move next-to-read next since old item was overwritten
- if (e.b == b.max_sz) e.b = 0; // start from beginning
+ if (e.b == b.max_sz) e.b= 0; // start from beginning
}
}
e.bufferempty = 0;
@@ -1140,21 +1231,35 @@ NdbGlobalEventBuffer::real_insertDataL(int bufferId,
#endif
}
}
- return 0;
+ else
+ {
+#ifdef EVENT_DEBUG
+ ndbout_c("skipped");
+#endif
+ }
+
+ DBUG_RETURN(0);
}
int NdbGlobalEventBuffer::hasData(int bufferId) {
+ DBUG_ENTER("NdbGlobalEventBuffer::hasData");
BufItem &b = m_buf[ID(bufferId)];
int n = NO(bufferId);
NdbGlobalEventBuffer::BufItem::Ps &e = b.ps[n];
if(e.bufferempty)
- return 0;
+ {
+ DBUG_RETURN(0);
+ }
if (b.f <= e.b)
- return b.max_sz-e.b + b.f;
+ {
+ DBUG_RETURN(b.max_sz-e.b + b.f);
+ }
else
- return b.f-e.b;
+ {
+ DBUG_RETURN(b.f-e.b);
+ }
}
int NdbGlobalEventBuffer::real_getDataL(const int bufferId,
@@ -1162,6 +1267,7 @@ int NdbGlobalEventBuffer::real_getDataL(const int bufferId,
LinearSectionPtr ptr[3],
int *pOverrun)
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_getDataL");
BufItem &b = m_buf[ID(bufferId)];
int n = NO(bufferId);
NdbGlobalEventBuffer::BufItem::Ps &e = b.ps[n];
@@ -1172,13 +1278,20 @@ int NdbGlobalEventBuffer::real_getDataL(const int bufferId,
}
if (e.bufferempty)
- return 0; // nothing to get
+ {
+ DBUG_RETURN(0); // nothing to get
+ }
+
+ DBUG_PRINT("info",("ID(bufferId) %d NO(bufferId) %d e.b %d",
+ ID(bufferId), NO(bufferId), e.b));
if (copy_data_alloc(b.data[e.b].sdata, b.data[e.b].ptr,
sdata, ptr))
- return -1;
+ {
+ DBUG_RETURN(-1);
+ }
- e.b++; if (e.b == b.max_sz) e.b = 0; // move next-to-read forward
+ e.b++; if (e.b == b.max_sz) e.b= 0; // move next-to-read forward
if (b.f == e.b) // back has cought up with front
e.bufferempty = 1;
@@ -1187,7 +1300,7 @@ int NdbGlobalEventBuffer::real_getDataL(const int bufferId,
ndbout_c("getting data from buffer %d with eventId %d", bufferId, b.gId);
#endif
- return hasData(bufferId)+1;
+ DBUG_RETURN(hasData(bufferId)+1);
}
int
NdbGlobalEventBuffer::copy_data_alloc(const SubTableData * const f_sdata,
@@ -1195,49 +1308,59 @@ NdbGlobalEventBuffer::copy_data_alloc(const SubTableData * const f_sdata,
SubTableData * &t_sdata,
LinearSectionPtr t_ptr[3])
{
- if (t_sdata == NULL) {
- t_sdata = (SubTableData *)NdbMem_Allocate(sizeof(SubTableData));
- }
+ DBUG_ENTER("NdbGlobalEventBuffer::copy_data_alloc");
+ unsigned sz4= (sizeof(SubTableData)+3)>>2;
+ Uint32 *ptr= (Uint32*)NdbMem_Allocate((sz4 +
+ f_ptr[0].sz +
+ f_ptr[1].sz +
+ f_ptr[2].sz) * sizeof(Uint32));
+ if (t_sdata)
+ NdbMem_Free((char*)t_sdata);
+ t_sdata= (SubTableData *)ptr;
memcpy(t_sdata,f_sdata,sizeof(SubTableData));
+ ptr+= sz4;
+
for (int i = 0; i < 3; i++) {
LinearSectionPtr & f_p = f_ptr[i];
LinearSectionPtr & t_p = t_ptr[i];
if (f_p.sz > 0) {
- if (t_p.p == NULL) {
- t_p.p = (Uint32 *)NdbMem_Allocate(sizeof(Uint32)*f_p.sz);
- } else if (t_p.sz != f_p.sz) {
- NdbMem_Free(t_p.p);
- t_p.p = (Uint32 *)NdbMem_Allocate(sizeof(Uint32)*f_p.sz);
- }
+ t_p.p= (Uint32 *)ptr;
memcpy(t_p.p, f_p.p, sizeof(Uint32)*f_p.sz);
- } else if (t_p.p != NULL) {
- NdbMem_Free(t_p.p);
- t_p.p = NULL;
+ ptr+= f_p.sz;
+ t_p.sz= f_p.sz;
+ } else {
+ t_p.p= NULL;
+ t_p.sz= 0;
}
- t_p.sz = f_p.sz;
}
- return 0;
+ DBUG_RETURN(0);
}
int
NdbGlobalEventBuffer::real_wait(NdbGlobalEventBufferHandle *h,
int aMillisecondNumber)
{
+ DBUG_ENTER("NdbGlobalEventBuffer::real_wait");
// check if there are anything in any of the buffers
int i;
int n = 0;
for (i = 0; i < h->m_nids; i++)
n += hasData(h->m_bufferIds[i]);
- if (n) return n;
+ if (n)
+ {
+ DBUG_RETURN(n);
+ }
int r = NdbCondition_WaitTimeout(h->p_cond, ndb_global_event_buffer_mutex,
aMillisecondNumber);
if (r > 0)
- return -1;
+ {
+ DBUG_RETURN(-1);
+ }
n = 0;
for (i = 0; i < h->m_nids; i++)
n += hasData(h->m_bufferIds[i]);
- return n;
+ DBUG_RETURN(n);
}
template class Vector<NdbGlobalEventBufferHandle*>;
diff --git a/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/ndb/src/ndbapi/NdbEventOperationImpl.hpp
index f67c998e639..96958979c76 100644
--- a/ndb/src/ndbapi/NdbEventOperationImpl.hpp
+++ b/ndb/src/ndbapi/NdbEventOperationImpl.hpp
@@ -14,21 +14,13 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbEventOperationImpl.hpp
- * Include:
- * Link:
- * Author: Tomas Ulin MySQL AB
- * Date: 2003-11-21
- * Version: 0.1
- * Description: Event support
- * Documentation:
- * Adjust: 2003-11-21 Tomas Ulin First version.
- ****************************************************************************/
-
#ifndef NdbEventOperationImpl_H
#define NdbEventOperationImpl_H
+#include <NdbEventOperation.hpp>
+#include <signaldata/SumaImpl.hpp>
+#include <transporter/TransporterDefinitions.hpp>
+
class NdbGlobalEventBufferHandle;
class NdbEventOperationImpl : public NdbEventOperation {
public:
@@ -61,12 +53,17 @@ public:
void print();
void printAll();
+ const NdbError & getNdbError() const;
+ NdbError m_error;
+
Ndb *m_ndb;
NdbEventImpl *m_eventImpl;
NdbGlobalEventBufferHandle *m_bufferHandle;
- NdbRecAttr *theFirstRecAttrs[2];
- NdbRecAttr *theCurrentRecAttrs[2];
+ NdbRecAttr *theFirstPkAttrs[2];
+ NdbRecAttr *theCurrentPkAttrs[2];
+ NdbRecAttr *theFirstDataAttrs[2];
+ NdbRecAttr *theCurrentDataAttrs[2];
NdbEventOperation::State m_state;
Uint32 m_eventId;
@@ -84,7 +81,7 @@ public:
//static NdbGlobalEventBufferHandle *init(int MAX_NUMBER_ACTIVE_EVENTS);
// returns bufferId 0-N if ok otherwise -1
- int prepareAddSubscribeEvent(Uint32 eventId, int& hasSubscriber);
+ int prepareAddSubscribeEvent(NdbEventOperationImpl *, int& hasSubscriber);
void unprepareAddSubscribeEvent(int bufferId);
void addSubscribeEvent(int bufferId,
NdbEventOperationImpl *ndbEventOperationImpl);
@@ -138,7 +135,8 @@ private:
int MAX_NUMBER_ACTIVE_EVENTS);
int real_prepareAddSubscribeEvent(NdbGlobalEventBufferHandle *h,
- Uint32 eventId, int& hasSubscriber);
+ NdbEventOperationImpl *,
+ int& hasSubscriber);
void real_unprepareAddSubscribeEvent(int bufferId);
void real_addSubscribeEvent(int bufferId, void *ndbEventOperation);
@@ -182,6 +180,7 @@ private:
// local mutex for each event/buffer
NdbMutex *p_buf_mutex;
Uint32 gId;
+ Uint32 eventType;
struct Data {
SubTableData *sdata;
LinearSectionPtr ptr[3];
diff --git a/ndb/src/ndbapi/NdbImpl.hpp b/ndb/src/ndbapi/NdbImpl.hpp
index 33aaca8de96..d50f1fa84fe 100644
--- a/ndb/src/ndbapi/NdbImpl.hpp
+++ b/ndb/src/ndbapi/NdbImpl.hpp
@@ -75,11 +75,22 @@ public:
int m_optimized_node_selection;
+ BaseString m_dbname; // Database name
+ BaseString m_schemaname; // Schema name
+
+ BaseString m_prefix; // Buffer for preformatted internal name <db>/<schema>/
+
+ void update_prefix()
+ {
+ m_prefix.assfmt("%s%c%s%c", m_dbname.c_str(), table_name_separator,
+ m_schemaname.c_str(), table_name_separator);
+ }
+
/**
* NOTE free lists must be _after_ theNdbObjectIdMap take
* assure that destructors are run in correct order
*/
- Ndb_free_list_t<NdbConnection> theConIdleList;
+ Ndb_free_list_t<NdbTransaction> theConIdleList;
Ndb_free_list_t<NdbOperation> theOpIdleList;
Ndb_free_list_t<NdbIndexScanOperation> theScanOpIdleList;
Ndb_free_list_t<NdbIndexOperation> theIndexOpIdleList;
@@ -121,9 +132,9 @@ Ndb::void2rec(void* val){
}
inline
-NdbConnection *
+NdbTransaction *
Ndb::void2con(void* val){
- return (NdbConnection*)val;
+ return (NdbTransaction*)val;
}
inline
@@ -139,7 +150,7 @@ Ndb::void2rec_iop(void* val){
}
inline
-NdbConnection *
+NdbTransaction *
NdbReceiver::getTransaction(){
return ((NdbOperation*)m_owner)->theNdbCon;
}
diff --git a/ndb/src/ndbapi/NdbIndexOperation.cpp b/ndb/src/ndbapi/NdbIndexOperation.cpp
index 39da9c5f5d0..4cedffed4a2 100644
--- a/ndb/src/ndbapi/NdbIndexOperation.cpp
+++ b/ndb/src/ndbapi/NdbIndexOperation.cpp
@@ -16,9 +16,8 @@
#include <ndb_global.h>
#include <NdbIndexOperation.hpp>
-#include <NdbResultSet.hpp>
#include <Ndb.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include "NdbApiSignal.hpp"
#include <AttributeHeader.hpp>
#include <signaldata/TcIndx.hpp>
@@ -28,9 +27,7 @@
NdbIndexOperation::NdbIndexOperation(Ndb* aNdb) :
NdbOperation(aNdb),
- m_theIndex(NULL),
- m_theIndexLen(0),
- m_theNoOfIndexDefined(0)
+ m_theIndex(NULL)
{
m_tcReqGSN = GSN_TCINDXREQ;
m_attrInfoGSN = GSN_INDXATTRINFO;
@@ -56,7 +53,7 @@ NdbIndexOperation::~NdbIndexOperation()
int
NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex,
const NdbTableImpl * aTable,
- NdbConnection* myConnection)
+ NdbTransaction* myConnection)
{
NdbOperation::init(aTable, myConnection);
@@ -64,25 +61,13 @@ NdbIndexOperation::indxInit(const NdbIndexImpl * anIndex,
case(NdbDictionary::Index::UniqueHashIndex):
break;
case(NdbDictionary::Index::Undefined):
- case(NdbDictionary::Index::HashIndex):
- case(NdbDictionary::Index::UniqueOrderedIndex):
case(NdbDictionary::Index::OrderedIndex):
setErrorCodeAbort(4003);
return -1;
}
m_theIndex = anIndex;
- m_thePrimaryTable = aTable;
m_accessTable = anIndex->m_table;
- m_theIndexLen = 0;
- m_theNoOfIndexDefined = 0;
- for (Uint32 i=0; i<NDB_MAX_ATTRIBUTES_IN_INDEX; i++)
- for (int j=0; j<3; j++)
- m_theIndexDefined[i][j] = false;
-
- TcIndxReq * const tcIndxReq = CAST_PTR(TcIndxReq, theTCREQ->getDataPtrSend());
- tcIndxReq->scanInfo = 0;
- theKEYINFOptr = &tcIndxReq->keyInfo[0];
- theATTRINFOptr = &tcIndxReq->attrInfo[0];
+ theNoOfTupKeyLeft = m_accessTable->getNoOfPrimaryKeys();
return 0;
}
@@ -179,312 +164,6 @@ int NdbIndexOperation::interpretedDeleteTuple()
return NdbOperation::interpretedDeleteTuple();
}
-int NdbIndexOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
- const char* aValuePassed,
- Uint32 aVariableKeyLen)
-{
- register Uint32 tAttrId;
-
- Uint32 tData;
- Uint32 tKeyInfoPosition;
- const char* aValue = aValuePassed;
- Uint32 xfrmData[1024];
- Uint32 tempData[1024];
-
- if ((theStatus == OperationDefined) &&
- (aValue != NULL) &&
- (tAttrInfo != NULL )) {
- /************************************************************************
- * Start by checking that the attribute is an index key.
- * This value is also the word order in the tuple key of this
- * tuple key attribute.
- * Then check that this tuple key has not already been defined.
- * Finally check if all tuple key attributes have been defined. If
- * this is true then set Operation state to tuple key defined.
- ************************************************************************/
- tAttrId = tAttrInfo->m_attrId;
- tKeyInfoPosition = tAttrInfo->m_keyInfoPos;
- Uint32 i = 0;
-
- // Check that the attribute is part if the index attributes
- // by checking if it is a primary key attribute of index table
- if (tAttrInfo->m_pk) {
- Uint32 tKeyDefined = theTupleKeyDefined[0][2];
- Uint32 tKeyAttrId = theTupleKeyDefined[0][0];
- do {
- if (tKeyDefined == false) {
- goto keyEntryFound;
- } else {
- if (tKeyAttrId != tAttrId) {
- /******************************************************************
- * We read the key defined variable in advance.
- * It could potentially read outside its area when
- * i = MAXNROFTUPLEKEY - 1,
- * it is not a problem as long as the variable
- * theTupleKeyDefined is defined
- * in the middle of the object.
- * Reading wrong data and not using it causes no problems.
- *****************************************************************/
- i++;
- tKeyAttrId = theTupleKeyDefined[i][0];
- tKeyDefined = theTupleKeyDefined[i][2];
- continue;
- } else {
- goto equal_error2;
- }//if
- }//if
- } while (i < NDB_MAX_ATTRIBUTES_IN_INDEX);
- goto equal_error2;
- } else {
- goto equal_error1;
- }
- /**************************************************************************
- * Now it is time to retrieve the tuple key data from the pointer supplied
- * by the application.
- * We have to retrieve the size of the attribute in words and bits.
- *************************************************************************/
- keyEntryFound:
- m_theIndexDefined[i][0] = tAttrId;
- m_theIndexDefined[i][1] = tKeyInfoPosition;
- m_theIndexDefined[i][2] = true;
-
- Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
- {
- /*************************************************************************
- * Check if the pointer of the value passed is aligned on a 4 byte
- * boundary. If so only assign the pointer to the internal variable
- * aValue. If it is not aligned then we start by copying the value to
- * tempData and use this as aValue instead.
- *************************************************************************/
- const int attributeSize = sizeInBytes;
- const int slack = sizeInBytes & 3;
- if ((((UintPtr)aValue & 3) != 0) || (slack != 0)){
- memcpy(&tempData[0], aValue, attributeSize);
- aValue = (char*)&tempData[0];
- if(slack != 0) {
- char * tmp = (char*)&tempData[0];
- memset(&tmp[attributeSize], 0, (4 - slack));
- }//if
- }//if
- }
- const char* aValueToWrite = aValue;
-
- CHARSET_INFO* cs = tAttrInfo->m_cs;
- if (cs != 0) {
- // current limitation: strxfrm does not increase length
- assert(cs->strxfrm_multiply <= 1);
- unsigned n =
- (*cs->coll->strnxfrm)(cs,
- (uchar*)xfrmData, sizeof(xfrmData),
- (const uchar*)aValue, sizeInBytes);
- while (n < sizeInBytes)
- ((uchar*)xfrmData)[n++] = 0x20;
- aValue = (char*)xfrmData;
- }
-
- Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
- Uint32 totalSizeInWords = (sizeInBytes + 3)/4;// Inc. bits in last word
- Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
-
- if (true){ //tArraySize != 0) {
- Uint32 tIndexLen = m_theIndexLen;
-
- m_theIndexLen = tIndexLen + totalSizeInWords;
- if ((aVariableKeyLen == sizeInBytes) ||
- (aVariableKeyLen == 0)) {
- ;
- } else {
- goto equal_error3;
- }
- }
-#if 0
- else {
- /************************************************************************
- * The attribute is a variable array. We need to use the length parameter
- * to know the size of this attribute in the key information and
- * variable area. A key is however not allowed to be larger than 4
- * kBytes and this is checked for variable array attributes
- * used as keys.
- ***********************************************************************/
- Uint32 tMaxVariableKeyLenInWord = (MAXTUPLEKEYLENOFATTERIBUTEINWORD -
- tKeyInfoPosition);
- tAttrSizeInBits = aVariableKeyLen << 3;
- tAttrSizeInWords = tAttrSizeInBits >> 5;
- tAttrBitsInLastWord = tAttrSizeInBits - (tAttrSizeInWords << 5);
- tAttrLenInWords = ((tAttrSizeInBits + 31) >> 5);
- if (tAttrLenInWords > tMaxVariableKeyLenInWord) {
- setErrorCodeAbort(4207);
- return -1;
- }//if
- m_theIndexLen = m_theIndexLen + tAttrLenInWords;
- }//if
-#endif
- int tDistrKey = tAttrInfo->m_distributionKey;
- int tDistrGroup = tAttrInfo->m_distributionGroup;
- OperationType tOpType = theOperationType;
- if ((tDistrKey != 1) && (tDistrGroup != 1)) {
- ;
- } else if (tDistrKey == 1) {
- theDistrKeySize += totalSizeInWords;
- theDistrKeyIndicator = 1;
- } else {
- Uint32 TsizeInBytes = sizeInBytes;
- Uint32 TbyteOrderFix = 0;
- char* TcharByteOrderFix = (char*)&TbyteOrderFix;
- if (tAttrInfo->m_distributionGroupBits == 8) {
- char tFirstChar = aValue[TsizeInBytes - 2];
- char tSecondChar = aValue[TsizeInBytes - 2];
- TcharByteOrderFix[0] = tFirstChar;
- TcharByteOrderFix[1] = tSecondChar;
- TcharByteOrderFix[2] = 0x30;
- TcharByteOrderFix[3] = 0x30;
- theDistrGroupType = 0;
- } else {
- TbyteOrderFix = ((aValue[TsizeInBytes - 2] - 0x30) * 10)
- + (aValue[TsizeInBytes - 1] - 0x30);
- theDistrGroupType = 1;
- }//if
- theDistributionGroup = TbyteOrderFix;
- theDistrGroupIndicator = 1;
- }//if
- /**************************************************************************
- * If the operation is a write request and the attribute is stored then
- * we also set the value in the stored part through putting the
- * information in the INDXATTRINFO signals.
- *************************************************************************/
- if ((tOpType == WriteRequest)) {
- if (!tAttrInfo->m_indexOnly){
- int dummy_error;
- // invalid data can crash kernel
- if (cs != NULL &&
- (*cs->cset->well_formed_len)(cs,
- aValueToWrite,
- aValueToWrite + sizeInBytes,
- sizeInBytes,
- &dummy_error) != sizeInBytes)
- goto equal_error4;
- Uint32 ahValue;
- Uint32 sz = totalSizeInWords;
- /*
- * XXX should be linked in metadata but cannot now because
- * things can be defined in arbitrary order
- */
- const NdbColumnImpl* primaryCol = m_thePrimaryTable->getColumn(tAttrInfo->m_name.c_str());
- assert(primaryCol != NULL);
- AttributeHeader::init(&ahValue, primaryCol->m_attrId, sz);
- insertATTRINFO( ahValue );
- insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
- if (bitsInLastWord != 0) {
- tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
- tData = convertEndian(tData);
- tData = tData & ((1 << bitsInLastWord) - 1);
- tData = convertEndian(tData);
- insertATTRINFO( tData );
- }//if
- }//if
- }//if
- /**************************************************************************
- * Store the Key information in the TCINDXREQ and INDXKEYINFO signals.
- *************************************************************************/
- if (insertKEYINFO(aValue, tKeyInfoPosition,
- totalSizeInWords, bitsInLastWord) != -1) {
- /************************************************************************
- * Add one to number of tuple key attributes defined.
- * If all have been defined then set the operation state to indicate
- * that tuple key is defined.
- * Thereby no more search conditions are allowed in this version.
- ***********************************************************************/
- Uint32 tNoIndexDef = m_theNoOfIndexDefined;
- Uint32 tErrorLine = theErrorLine;
- int tNoIndexAttrs = m_theIndex->m_columns.size();
- unsigned char tInterpretInd = theInterpretIndicator;
- tNoIndexDef++;
- m_theNoOfIndexDefined = tNoIndexDef;
- tErrorLine++;
- theErrorLine = tErrorLine;
- if (int(tNoIndexDef) == tNoIndexAttrs) {
- if (tOpType == UpdateRequest) {
- if (tInterpretInd == 1) {
- theStatus = GetValue;
- } else {
- theStatus = SetValue;
- }//if
- return 0;
- } else if ((tOpType == ReadRequest) || (tOpType == DeleteRequest) ||
- (tOpType == ReadExclusive)) {
- theStatus = GetValue;
- // create blob handles automatically
- if (tOpType == DeleteRequest && m_currentTable->m_noOfBlobs != 0) {
- for (unsigned i = 0; i < m_currentTable->m_columns.size(); i++) {
- NdbColumnImpl* c = m_currentTable->m_columns[i];
- assert(c != 0);
- if (c->getBlobType()) {
- if (getBlobHandle(theNdbCon, c) == NULL)
- return -1;
- }
- }
- }
- return 0;
- } else if ((tOpType == InsertRequest) || (tOpType == WriteRequest)) {
- theStatus = SetValue;
- return 0;
- } else {
- setErrorCodeAbort(4005);
- return -1;
- }//if
- }//if
- return 0;
- } else {
-
- return -1;
- }//if
- } else {
- if (theStatus != OperationDefined) {
- return -1;
- }//if
-
- if (aValue == NULL) {
- setErrorCodeAbort(4505);
- return -1;
- }//if
-
- if ( tAttrInfo == NULL ) {
- setErrorCodeAbort(4004);
- return -1;
- }//if
- }//if
- return -1;
-
- equal_error1:
- setErrorCodeAbort(4205);
- return -1;
-
- equal_error2:
- setErrorCodeAbort(4206);
- return -1;
-
- equal_error3:
- setErrorCodeAbort(4209);
- return -1;
-
- equal_error4:
- setErrorCodeAbort(744);
- return -1;
-}
-
-int NdbIndexOperation::executeCursor(int aProcessorId)
-{
- printf("NdbIndexOperation::executeCursor NYI\n");
- // NYI
- return -1;
-}
-void
-NdbIndexOperation::setLastFlag(NdbApiSignal* signal, Uint32 lastFlag)
-{
- TcIndxReq * const req = CAST_PTR(TcIndxReq, signal->getDataPtrSend());
- TcKeyReq::setExecuteFlag(req->requestInfo, lastFlag);
-}
-
int
NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
{
@@ -525,18 +204,18 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
// We start by filling in the first 8 unconditional words of the
// TCINDXREQ signal.
//-------------------------------------------------------------
- TcIndxReq * const tcIndxReq =
- CAST_PTR(TcIndxReq, theTCREQ->getDataPtrSend());
+ TcKeyReq * tcKeyReq =
+ CAST_PTR(TcKeyReq, theTCREQ->getDataPtrSend());
Uint32 tTotalCurrAI_Len = theTotalCurrAI_Len;
Uint32 tIndexId = m_theIndex->m_indexId;
Uint32 tSchemaVersion = m_theIndex->m_version;
- tcIndxReq->apiConnectPtr = aTC_ConnectPtr;
- tcIndxReq->senderData = ptr2int();
- tcIndxReq->attrLen = tTotalCurrAI_Len;
- tcIndxReq->indexId = tIndexId;
- tcIndxReq->indexSchemaVersion = tSchemaVersion;
+ tcKeyReq->apiConnectPtr = aTC_ConnectPtr;
+ tcKeyReq->senderData = ptr2int();
+ tcKeyReq->attrLen = tTotalCurrAI_Len;
+ tcKeyReq->tableId = tIndexId;
+ tcKeyReq->tableSchemaVersion = tSchemaVersion;
tTransId1 = (Uint32) aTransactionId;
tTransId2 = (Uint32) (aTransactionId >> 32);
@@ -564,59 +243,53 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
Uint8 tSimpleState = tReadInd & tSimpleAlt;
//theNdbCon->theSimpleState = tSimpleState;
- tcIndxReq->transId1 = tTransId1;
- tcIndxReq->transId2 = tTransId2;
+ tcKeyReq->transId1 = tTransId1;
+ tcKeyReq->transId2 = tTransId2;
tReqInfo = 0;
- if (tTotalCurrAI_Len <= TcIndxReq::MaxAttrInfo) {
- tcIndxReq->setAIInTcIndxReq(tReqInfo, tTotalCurrAI_Len);
+ if (tTotalCurrAI_Len <= TcKeyReq::MaxAttrInfo) {
+ tcKeyReq->setAIInTcKeyReq(tReqInfo, tTotalCurrAI_Len);
} else {
- tcIndxReq->setAIInTcIndxReq(tReqInfo, TcIndxReq::MaxAttrInfo);
+ tcKeyReq->setAIInTcKeyReq(tReqInfo, TcKeyReq::MaxAttrInfo);
}//if
- tcIndxReq->setSimpleFlag(tReqInfo, tSimpleIndicator);
- tcIndxReq->setCommitFlag(tReqInfo, tCommitIndicator);
- tcIndxReq->setStartFlag(tReqInfo, tStartIndicator);
+ tcKeyReq->setSimpleFlag(tReqInfo, tSimpleIndicator);
+ tcKeyReq->setCommitFlag(tReqInfo, tCommitIndicator);
+ tcKeyReq->setStartFlag(tReqInfo, tStartIndicator);
const Uint8 tInterpretIndicator = theInterpretIndicator;
- tcIndxReq->setInterpretedFlag(tReqInfo, tInterpretIndicator);
+ tcKeyReq->setInterpretedFlag(tReqInfo, tInterpretIndicator);
Uint8 tDirtyIndicator = theDirtyIndicator;
OperationType tOperationType = theOperationType;
- Uint32 tIndexLen = m_theIndexLen;
+ Uint32 tIndexLen = theTupKeyLen;
Uint8 abortOption = theNdbCon->m_abortOption;
- tcIndxReq->setDirtyFlag(tReqInfo, tDirtyIndicator);
- tcIndxReq->setOperationType(tReqInfo, tOperationType);
- tcIndxReq->setIndexLength(tReqInfo, tIndexLen);
- tcIndxReq->setCommitType(tReqInfo, abortOption);
+ tcKeyReq->setDirtyFlag(tReqInfo, tDirtyIndicator);
+ tcKeyReq->setOperationType(tReqInfo, tOperationType);
+ tcKeyReq->setKeyLength(tReqInfo, tIndexLen);
+ tcKeyReq->setAbortOption(tReqInfo, abortOption);
- Uint8 tDistrKeyIndicator = theDistrKeyIndicator;
- Uint8 tDistrGroupIndicator = theDistrGroupIndicator;
- Uint8 tDistrGroupType = theDistrGroupType;
+ Uint8 tDistrKeyIndicator = theDistrKeyIndicator_;
Uint8 tScanIndicator = theScanInfo & 1;
- tcIndxReq->setDistributionGroupFlag(tReqInfo, tDistrGroupIndicator);
- tcIndxReq->setDistributionGroupTypeFlag(tReqInfo, tDistrGroupType);
- tcIndxReq->setDistributionKeyFlag(tReqInfo, tDistrKeyIndicator);
- tcIndxReq->setScanIndFlag(tReqInfo, tScanIndicator);
+ tcKeyReq->setDistributionKeyFlag(tReqInfo, tDistrKeyIndicator);
+ tcKeyReq->setScanIndFlag(tReqInfo, tScanIndicator);
- tcIndxReq->requestInfo = tReqInfo;
+ tcKeyReq->requestInfo = tReqInfo;
//-------------------------------------------------------------
// The next step is to fill in the upto three conditional words.
//-------------------------------------------------------------
- Uint32* tOptionalDataPtr = &tcIndxReq->scanInfo;
+ Uint32* tOptionalDataPtr = &tcKeyReq->scanInfo;
Uint32 tDistrGHIndex = tScanIndicator;
- Uint32 tDistrKeyIndex = tDistrGHIndex + tDistrGroupIndicator;
+ Uint32 tDistrKeyIndex = tDistrGHIndex;
Uint32 tScanInfo = theScanInfo;
- Uint32 tDistributionGroup = theDistributionGroup;
- Uint32 tDistrKeySize = theDistrKeySize;
+ Uint32 tDistrKey = theDistributionKey;
tOptionalDataPtr[0] = tScanInfo;
- tOptionalDataPtr[tDistrGHIndex] = tDistributionGroup;
- tOptionalDataPtr[tDistrKeyIndex] = tDistrKeySize;
+ tOptionalDataPtr[tDistrKeyIndex] = tDistrKey;
//-------------------------------------------------------------
// The next is step is to compress the key data part of the
@@ -624,10 +297,10 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
//-------------------------------------------------------------
Uint32 tKeyIndex = tDistrKeyIndex + tDistrKeyIndicator;
Uint32* tKeyDataPtr = &tOptionalDataPtr[tKeyIndex];
- Uint32 Tdata1 = tcIndxReq->keyInfo[0];
- Uint32 Tdata2 = tcIndxReq->keyInfo[1];
- Uint32 Tdata3 = tcIndxReq->keyInfo[2];
- Uint32 Tdata4 = tcIndxReq->keyInfo[3];
+ Uint32 Tdata1 = tcKeyReq->keyInfo[0];
+ Uint32 Tdata2 = tcKeyReq->keyInfo[1];
+ Uint32 Tdata3 = tcKeyReq->keyInfo[2];
+ Uint32 Tdata4 = tcKeyReq->keyInfo[3];
Uint32 Tdata5;
tKeyDataPtr[0] = Tdata1;
@@ -635,10 +308,10 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
tKeyDataPtr[2] = Tdata3;
tKeyDataPtr[3] = Tdata4;
if (tIndexLen > 4) {
- Tdata1 = tcIndxReq->keyInfo[4];
- Tdata2 = tcIndxReq->keyInfo[5];
- Tdata3 = tcIndxReq->keyInfo[6];
- Tdata4 = tcIndxReq->keyInfo[7];
+ Tdata1 = tcKeyReq->keyInfo[4];
+ Tdata2 = tcKeyReq->keyInfo[5];
+ Tdata3 = tcKeyReq->keyInfo[6];
+ Tdata4 = tcKeyReq->keyInfo[7];
tKeyDataPtr[4] = Tdata1;
tKeyDataPtr[5] = Tdata2;
@@ -652,12 +325,12 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
//-------------------------------------------------------------
Uint32 tAttrInfoIndex;
- if (tIndexLen > TcIndxReq::MaxKeyInfo) {
+ if (tIndexLen > TcKeyReq::MaxKeyInfo) {
/**
* Set transid and TC connect ptr in the INDXKEYINFO signals
*/
- NdbApiSignal* tSignal = theFirstKEYINFO;
- Uint32 remainingKey = tIndexLen - TcIndxReq::MaxKeyInfo;
+ NdbApiSignal* tSignal = theTCREQ->next();
+ Uint32 remainingKey = tIndexLen - TcKeyReq::MaxKeyInfo;
do {
Uint32* tSigDataPtr = tSignal->getDataPtrSend();
@@ -678,7 +351,7 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
}
tSignal = tnextSignal;
} while (tSignal != NULL);
- tAttrInfoIndex = tKeyIndex + TcIndxReq::MaxKeyInfo;
+ tAttrInfoIndex = tKeyIndex + TcKeyReq::MaxKeyInfo;
} else {
tAttrInfoIndex = tKeyIndex + tIndexLen;
}//if
@@ -688,14 +361,14 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
// above.
//-------------------------------------------------------------
Uint32* tAIDataPtr = &tOptionalDataPtr[tAttrInfoIndex];
- Tdata1 = tcIndxReq->attrInfo[0];
- Tdata2 = tcIndxReq->attrInfo[1];
- Tdata3 = tcIndxReq->attrInfo[2];
- Tdata4 = tcIndxReq->attrInfo[3];
- Tdata5 = tcIndxReq->attrInfo[4];
-
- theTCREQ->setLength(tcIndxReq->getAIInTcIndxReq(tReqInfo) +
- tAttrInfoIndex + TcIndxReq::StaticLength);
+ Tdata1 = tcKeyReq->attrInfo[0];
+ Tdata2 = tcKeyReq->attrInfo[1];
+ Tdata3 = tcKeyReq->attrInfo[2];
+ Tdata4 = tcKeyReq->attrInfo[3];
+ Tdata5 = tcKeyReq->attrInfo[4];
+
+ theTCREQ->setLength(tcKeyReq->getAIInTcKeyReq(tReqInfo) +
+ tAttrInfoIndex + TcKeyReq::StaticLength);
tAIDataPtr[0] = Tdata1;
tAIDataPtr[1] = Tdata2;
tAIDataPtr[2] = Tdata3;
@@ -724,11 +397,6 @@ NdbIndexOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransactionId)
return 0;
}
-void NdbIndexOperation::closeScan()
-{
- printf("NdbIndexOperation::closeScan NYI\n");
-}
-
/***************************************************************************
int receiveTCINDXREF( NdbApiSignal* aSignal)
@@ -740,17 +408,5 @@ Remark: Handles the reception of the TCKEYREF signal.
int
NdbIndexOperation::receiveTCINDXREF( NdbApiSignal* aSignal)
{
- const TcIndxRef * const tcIndxRef = CAST_CONSTPTR(TcIndxRef, aSignal->getDataPtr());
-
- if (checkState_TransId(aSignal) == -1) {
- return -1;
- }//if
-
- theStatus = Finished;
-
- theNdbCon->theReturnStatus = NdbConnection::ReturnFailure;
- Uint32 errorCode = tcIndxRef->errorCode;
- theError.code = errorCode;
- theNdbCon->setOperationErrorCodeAbort(errorCode);
- return theNdbCon->OpCompleteFailure(theNdbCon->m_abortOption);
+ return receiveTCKEYREF(aSignal);
}//NdbIndexOperation::receiveTCINDXREF()
diff --git a/ndb/src/ndbapi/NdbLinHash.hpp b/ndb/src/ndbapi/NdbLinHash.hpp
index f245a261a04..05670534c95 100644
--- a/ndb/src/ndbapi/NdbLinHash.hpp
+++ b/ndb/src/ndbapi/NdbLinHash.hpp
@@ -192,7 +192,7 @@ template <class C>
inline
Int32
NdbLinHash<C>::insertKey( const char* str, Uint32 len, Uint32 lkey1, C* data )
-{
+{
const Uint32 hash = Hash(str, len);
int dir, seg;
getBucket(hash, &dir, &seg);
@@ -219,8 +219,9 @@ NdbLinHash<C>::insertKey( const char* str, Uint32 len, Uint32 lkey1, C* data )
chain->localkey1 = lkey1;
chain->next = 0;
chain->theData = data;
+ len++; // Null terminated
chain->str = new Uint32[((len + 3) >> 2)];
- memcpy( &chain->str[0], str, len );
+ memcpy( &chain->str[0], str, len);
if (oldChain != 0)
oldChain->next = chain;
else
diff --git a/ndb/src/ndbapi/NdbOperation.cpp b/ndb/src/ndbapi/NdbOperation.cpp
index 88d8a000d50..c9143444908 100644
--- a/ndb/src/ndbapi/NdbOperation.cpp
+++ b/ndb/src/ndbapi/NdbOperation.cpp
@@ -15,7 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <NdbOperation.hpp>
#include "NdbApiSignal.hpp"
#include "NdbRecAttr.hpp"
@@ -49,7 +49,6 @@ NdbOperation::NdbOperation(Ndb* aNdb) :
theCurrentATTRINFO(NULL),
theTotalCurrAI_Len(0),
theAI_LenInCurrAI(0),
- theFirstKEYINFO(NULL),
theLastKEYINFO(NULL),
theFirstLabel(NULL),
@@ -68,13 +67,11 @@ NdbOperation::NdbOperation(Ndb* aNdb) :
//theSchemaVersion(0),
theTotalNrOfKeyWordInSignal(8),
theTupKeyLen(0),
- theNoOfTupKeyDefined(0),
+ theNoOfTupKeyLeft(0),
theOperationType(NotDefined),
theStatus(Init),
theMagicNumber(0xFE11D0),
theScanInfo(0),
- theDistrKeySize(0),
- theDistributionGroup(0),
m_tcReqGSN(GSN_TCKEYREQ),
m_keyInfoGSN(GSN_KEYINFO),
m_attrInfoGSN(GSN_ATTRINFO),
@@ -131,7 +128,7 @@ NdbOperation::setErrorCodeAbort(int anErrorCode)
*****************************************************************************/
int
-NdbOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection){
+NdbOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection){
NdbApiSignal* tSignal;
theStatus = Init;
theError.code = 0;
@@ -145,14 +142,11 @@ NdbOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection){
theFirstATTRINFO = NULL;
theCurrentATTRINFO = NULL;
- theFirstKEYINFO = NULL;
theLastKEYINFO = NULL;
- theTupKeyLen = 0;
- theNoOfTupKeyDefined = 0;
- theDistrKeySize = 0;
- theDistributionGroup = 0;
+ theTupKeyLen = 0;
+ theNoOfTupKeyLeft = tab->getNoOfPrimaryKeys();
theTotalCurrAI_Len = 0;
theAI_LenInCurrAI = 0;
@@ -161,9 +155,7 @@ NdbOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection){
theSimpleIndicator = 0;
theDirtyIndicator = 0;
theInterpretIndicator = 0;
- theDistrGroupIndicator= 0;
- theDistrGroupType = 0;
- theDistrKeyIndicator = 0;
+ theDistrKeyIndicator_ = 0;
theScanInfo = 0;
theTotalNrOfKeyWordInSignal = 8;
theMagicNumber = 0xABCDEF01;
@@ -210,11 +202,16 @@ NdbOperation::release()
NdbBlob* tBlob;
NdbBlob* tSaveBlob;
- if (theTCREQ != NULL)
+ tSignal = theTCREQ;
+ while (tSignal != NULL)
{
- theNdb->releaseSignal(theTCREQ);
- }
+ tSaveSignal = tSignal;
+ tSignal = tSignal->next();
+ theNdb->releaseSignal(tSaveSignal);
+ }
theTCREQ = NULL;
+ theLastKEYINFO = NULL;
+
tSignal = theFirstATTRINFO;
while (tSignal != NULL)
{
@@ -224,15 +221,7 @@ NdbOperation::release()
}
theFirstATTRINFO = NULL;
theCurrentATTRINFO = NULL;
- tSignal = theFirstKEYINFO;
- while (tSignal != NULL)
- {
- tSaveSignal = tSignal;
- tSignal = tSignal->next();
- theNdb->releaseSignal(tSaveSignal);
- }
- theFirstKEYINFO = NULL;
- theLastKEYINFO = NULL;
+
if (theInterpretIndicator == 1)
{
tBranch = theFirstBranch;
@@ -403,3 +392,9 @@ NdbOperation::getTableName() const
{
return m_currentTable->m_externalName.c_str();
}
+
+const NdbDictionary::Table*
+NdbOperation::getTable() const
+{
+ return m_currentTable;
+}
diff --git a/ndb/src/ndbapi/NdbOperationDefine.cpp b/ndb/src/ndbapi/NdbOperationDefine.cpp
index dfc54133e6c..835e33dfb40 100644
--- a/ndb/src/ndbapi/NdbOperationDefine.cpp
+++ b/ndb/src/ndbapi/NdbOperationDefine.cpp
@@ -14,28 +14,17 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*****************************************************************************
- * Name: NdbOperationDefine.C
- * Include:
- * Link:
- * Author: UABMNST Mona Natterkvist UAB/B/SD
- * Date: 970829
- * Version: 0.1
- * Description: Interface between TIS and NDB
- * Documentation:
- * Adjust: 971022 UABMNST First version.
- *****************************************************************************/
-#include "NdbOperation.hpp"
+#include <ndb_global.h>
+#include <NdbOperation.hpp>
#include "NdbApiSignal.hpp"
-#include "NdbConnection.hpp"
-#include "Ndb.hpp"
-#include "NdbRecAttr.hpp"
+#include <NdbTransaction.hpp>
+#include <Ndb.hpp>
+#include <NdbRecAttr.hpp>
#include "NdbUtil.hpp"
#include "NdbOut.hpp"
#include "NdbImpl.hpp"
#include <NdbIndexScanOperation.hpp>
-#include "NdbBlob.hpp"
+#include <NdbBlob.hpp>
#include <Interpreter.hpp>
@@ -48,7 +37,7 @@
int
NdbOperation::insertTuple()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -68,7 +57,7 @@ NdbOperation::insertTuple()
int
NdbOperation::updateTuple()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -88,7 +77,7 @@ NdbOperation::updateTuple()
int
NdbOperation::writeTuple()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -128,7 +117,7 @@ NdbOperation::readTuple(NdbOperation::LockMode lm)
int
NdbOperation::readTuple()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -149,7 +138,7 @@ NdbOperation::readTuple()
int
NdbOperation::deleteTuple()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -170,7 +159,7 @@ NdbOperation::deleteTuple()
int
NdbOperation::readTupleExclusive()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -247,7 +236,7 @@ NdbOperation::committedRead()
int
NdbOperation::dirtyUpdate()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -270,7 +259,7 @@ NdbOperation::dirtyUpdate()
int
NdbOperation::dirtyWrite()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -293,7 +282,7 @@ NdbOperation::dirtyWrite()
int
NdbOperation::interpretedUpdateTuple()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -316,7 +305,7 @@ NdbOperation::interpretedUpdateTuple()
int
NdbOperation::interpretedDeleteTuple()
{
- NdbConnection* tNdbCon = theNdbCon;
+ NdbTransaction* tNdbCon = theNdbCon;
int tErrorLine = theErrorLine;
if (theStatus == Init) {
theStatus = OperationDefined;
@@ -350,7 +339,6 @@ NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
{
NdbRecAttr* tRecAttr;
if ((tAttrInfo != NULL) &&
- (!tAttrInfo->m_indexOnly) &&
(theStatus != Init)){
if (theStatus != GetValue) {
if (theInterpretIndicator == 1) {
@@ -398,10 +386,6 @@ NdbOperation::getValue_impl(const NdbColumnImpl* tAttrInfo, char* aValue)
setErrorCodeAbort(4004);
return NULL;
}//if
- if (tAttrInfo->m_indexOnly){
- setErrorCodeAbort(4208);
- return NULL;
- }//if
}//if
setErrorCodeAbort(4200);
return NULL;
@@ -422,6 +406,14 @@ int
NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
const char* aValuePassed, Uint32 len)
{
+ DBUG_ENTER("NdbOperation::setValue");
+ DBUG_PRINT("enter", ("col=%s op=%d val=0x%x len=%u",
+ tAttrInfo->m_name.c_str(),
+ theOperationType,
+ aValuePassed, len));
+ if (aValuePassed != NULL)
+ DBUG_DUMP("value", (char*)aValuePassed, len);
+
int tReturnCode;
Uint32 tAttrId;
Uint32 tData;
@@ -437,7 +429,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
;
} else {
setErrorCodeAbort(4234);
- return -1;
+ DBUG_RETURN(-1);
}//if
} else {
if (tStatus == GetValue) {
@@ -448,7 +440,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
// to set values in the tuple by setValue.
//--------------------------------------------------------------------
if (insertATTRINFO(Interpreter::EXIT_OK) == -1){
- return -1;
+ DBUG_RETURN(-1);
}
theInterpretedSize = theTotalCurrAI_Len -
(theInitialReadSize + 5);
@@ -459,47 +451,47 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
// setValue used in the wrong context. Application coding error.
//-------------------------------------------------------------------
setErrorCodeAbort(4234); //Wrong error code
- return -1;
+ DBUG_RETURN(-1);
}//if
theStatus = SetValueInterpreted;
}//if
} else if (tOpType == InsertRequest) {
if ((theStatus != SetValue) && (theStatus != OperationDefined)) {
setErrorCodeAbort(4234);
- return -1;
+ DBUG_RETURN(-1);
}//if
} else if (tOpType == ReadRequest || tOpType == ReadExclusive) {
setErrorCodeAbort(4504);
- return -1;
+ DBUG_RETURN(-1);
} else if (tOpType == DeleteRequest) {
setErrorCodeAbort(4504);
- return -1;
+ DBUG_RETURN(-1);
} else if (tOpType == OpenScanRequest || tOpType == OpenRangeScanRequest) {
setErrorCodeAbort(4228);
- return -1;
+ DBUG_RETURN(-1);
} else {
//---------------------------------------------------------------------
// setValue with undefined operation type.
// Probably application coding error.
//---------------------------------------------------------------------
setErrorCodeAbort(4108);
- return -1;
+ DBUG_RETURN(-1);
}//if
if (tAttrInfo == NULL) {
setErrorCodeAbort(4004);
- return -1;
+ DBUG_RETURN(-1);
}//if
if (tAttrInfo->m_pk) {
if (theOperationType == InsertRequest) {
- return equal_impl(tAttrInfo, aValuePassed, len);
+ DBUG_RETURN(equal_impl(tAttrInfo, aValuePassed, len));
} else {
setErrorCodeAbort(4202);
- return -1;
+ DBUG_RETURN(-1);
}//if
}//if
if (len > 8000) {
setErrorCodeAbort(4216);
- return -1;
+ DBUG_RETURN(-1);
}//if
tAttrId = tAttrInfo->m_attrId;
@@ -512,33 +504,19 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
insertATTRINFO(ahValue);
// Insert Attribute Id with the value
// NULL into ATTRINFO part.
- return 0;
+ DBUG_RETURN(0);
} else {
/***********************************************************************
* Setting a NULL value on a NOT NULL attribute is not allowed.
**********************************************************************/
setErrorCodeAbort(4203);
- return -1;
+ DBUG_RETURN(-1);
}//if
}//if
// Insert Attribute Id into ATTRINFO part.
const Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
- CHARSET_INFO* cs = tAttrInfo->m_cs;
- int dummy_error;
- // invalid data can crash kernel
- if (cs != NULL &&
- // fast fix bug#7340
- tAttrInfo->m_type != NdbDictionary::Column::Text &&
- (*cs->cset->well_formed_len)(cs,
- aValue,
- aValue + sizeInBytes,
- sizeInBytes,
- &dummy_error) != sizeInBytes) {
- setErrorCodeAbort(744);
- return -1;
- }
#if 0
tAttrSize = tAttrInfo->theAttrSize;
tArraySize = tAttrInfo->theArraySize;
@@ -552,7 +530,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
const Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
if (len != sizeInBytes && (len != 0)) {
setErrorCodeAbort(4209);
- return -1;
+ DBUG_RETURN(-1);
}//if
const Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Including bits in last word
const Uint32 sizeInWords = sizeInBytes / 4; // Excluding bits in last word
@@ -580,7 +558,7 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
tReturnCode = insertATTRINFOloop((Uint32*)aValue, sizeInWords);
if (tReturnCode == -1) {
- return tReturnCode;
+ DBUG_RETURN(tReturnCode);
}//if
if (bitsInLastWord != 0) {
tData = *(Uint32*)(aValue + sizeInWords*4);
@@ -589,15 +567,15 @@ NdbOperation::setValue( const NdbColumnImpl* tAttrInfo,
tData = convertEndian(tData);
tReturnCode = insertATTRINFO(tData);
if (tReturnCode == -1) {
- return tReturnCode;
+ DBUG_RETURN(tReturnCode);
}//if
}//if
theErrorLine++;
- return 0;
+ DBUG_RETURN(0);
}//NdbOperation::setValue()
NdbBlob*
-NdbOperation::getBlobHandle(NdbConnection* aCon, const NdbColumnImpl* tAttrInfo)
+NdbOperation::getBlobHandle(NdbTransaction* aCon, const NdbColumnImpl* tAttrInfo)
{
NdbBlob* tBlob = theBlobList;
NdbBlob* tLastBlob = NULL;
diff --git a/ndb/src/ndbapi/NdbOperationExec.cpp b/ndb/src/ndbapi/NdbOperationExec.cpp
index 48882a479b9..58a816e3c1a 100644
--- a/ndb/src/ndbapi/NdbOperationExec.cpp
+++ b/ndb/src/ndbapi/NdbOperationExec.cpp
@@ -16,7 +16,7 @@
#include <ndb_global.h>
#include <NdbOperation.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include "NdbApiSignal.hpp"
#include <Ndb.hpp>
#include <NdbRecAttr.hpp>
@@ -65,7 +65,7 @@ NdbOperation::doSend(int aNodeId, Uint32 lastFlag)
if (tReturnCode == -1) {
return -1;
}
- NdbApiSignal *tSignal = theFirstKEYINFO;
+ NdbApiSignal *tSignal = theTCREQ->next();
while (tSignal != NULL) {
NdbApiSignal* tnextSignal = tSignal->next();
tReturnCode = tp->sendSignal(tSignal, aNodeId);
@@ -208,13 +208,9 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
abortOption = tSimpleIndicator ? AO_IgnoreError : abortOption;
tcKeyReq->setAbortOption(tReqInfo, abortOption);
- Uint8 tDistrKeyIndicator = theDistrKeyIndicator;
- Uint8 tDistrGroupIndicator = theDistrGroupIndicator;
- Uint8 tDistrGroupType = theDistrGroupType;
+ Uint8 tDistrKeyIndicator = theDistrKeyIndicator_;
Uint8 tScanIndicator = theScanInfo & 1;
- tcKeyReq->setDistributionGroupFlag(tReqInfo, tDistrGroupIndicator);
- tcKeyReq->setDistributionGroupTypeFlag(tReqInfo, tDistrGroupType);
tcKeyReq->setDistributionKeyFlag(tReqInfo, tDistrKeyIndicator);
tcKeyReq->setScanIndFlag(tReqInfo, tScanIndicator);
@@ -225,15 +221,13 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
//-------------------------------------------------------------
Uint32* tOptionalDataPtr = &tcKeyReq->scanInfo;
Uint32 tDistrGHIndex = tScanIndicator;
- Uint32 tDistrKeyIndex = tDistrGHIndex + tDistrGroupIndicator;
+ Uint32 tDistrKeyIndex = tDistrGHIndex;
Uint32 tScanInfo = theScanInfo;
- Uint32 tDistributionGroup = theDistributionGroup;
- Uint32 tDistrKeySize = theDistrKeySize;
+ Uint32 tDistrKey = theDistributionKey;
tOptionalDataPtr[0] = tScanInfo;
- tOptionalDataPtr[tDistrGHIndex] = tDistributionGroup;
- tOptionalDataPtr[tDistrKeyIndex] = tDistrKeySize;
+ tOptionalDataPtr[tDistrKeyIndex] = tDistrKey;
//-------------------------------------------------------------
// The next is step is to compress the key data part of the
@@ -273,7 +267,7 @@ NdbOperation::prepareSend(Uint32 aTC_ConnectPtr, Uint64 aTransId)
/**
* Set transid, TC connect ptr and length in the KEYINFO signals
*/
- NdbApiSignal* tSignal = theFirstKEYINFO;
+ NdbApiSignal* tSignal = theTCREQ->next();
Uint32 remainingKey = tTupKeyLen - TcKeyReq::MaxKeyInfo;
do {
Uint32* tSigDataPtr = tSignal->getDataPtrSend();
@@ -555,10 +549,11 @@ NdbOperation::receiveTCKEYREF( NdbApiSignal* aSignal)
theStatus = Finished;
// blobs want this
if (m_abortOption != AO_IgnoreError)
- theNdbCon->theReturnStatus = NdbConnection::ReturnFailure;
-
+ {
+ theNdbCon->theReturnStatus = NdbTransaction::ReturnFailure;
+ }
theError.code = aSignal->readData(4);
- theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4), m_abortOption);
+ theNdbCon->setOperationErrorCodeAbort(aSignal->readData(4), ao);
if(theOperationType != ReadRequest || !theSimpleIndicator) // not simple read
return theNdbCon->OpCompleteFailure(ao, m_abortOption != AO_IgnoreError);
diff --git a/ndb/src/ndbapi/NdbOperationInt.cpp b/ndb/src/ndbapi/NdbOperationInt.cpp
index ace90e35ca4..41e0cb1d140 100644
--- a/ndb/src/ndbapi/NdbOperationInt.cpp
+++ b/ndb/src/ndbapi/NdbOperationInt.cpp
@@ -14,13 +14,12 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
#include <ndb_global.h>
#include <NdbOperation.hpp>
#include "NdbApiSignal.hpp"
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <Ndb.hpp>
-#include "NdbRecAttr.hpp"
+#include <NdbRecAttr.hpp>
#include "NdbUtil.hpp"
#include "Interpreter.hpp"
#include <NdbIndexScanOperation.hpp>
@@ -84,7 +83,7 @@ NdbOperation::incCheck(const NdbColumnImpl* tNdbColumnImpl)
}
return tNdbColumnImpl->m_attrId;
} else {
- if (theNdbCon->theCommitStatus == NdbConnection::Started)
+ if (theNdbCon->theCommitStatus == NdbTransaction::Started)
setErrorCodeAbort(4200);
}
return -1;
@@ -136,7 +135,7 @@ NdbOperation::write_attrCheck(const NdbColumnImpl* tNdbColumnImpl)
}
return tNdbColumnImpl->m_attrId;
} else {
- if (theNdbCon->theCommitStatus == NdbConnection::Started)
+ if (theNdbCon->theCommitStatus == NdbTransaction::Started)
setErrorCodeAbort(4200);
}
return -1;
@@ -184,7 +183,7 @@ NdbOperation::read_attrCheck(const NdbColumnImpl* tNdbColumnImpl)
}
return tNdbColumnImpl->m_attrId;
} else {
- if (theNdbCon->theCommitStatus == NdbConnection::Started)
+ if (theNdbCon->theCommitStatus == NdbTransaction::Started)
setErrorCodeAbort(4200);
}
return -1;
@@ -220,7 +219,7 @@ NdbOperation::initial_interpreterCheck()
}
return 0;
} else {
- if (theNdbCon->theCommitStatus == NdbConnection::Started)
+ if (theNdbCon->theCommitStatus == NdbTransaction::Started)
setErrorCodeAbort(4200);
}
return -1;
@@ -246,7 +245,7 @@ NdbOperation::labelCheck()
}
return 0;
} else {
- if (theNdbCon->theCommitStatus == NdbConnection::Started)
+ if (theNdbCon->theCommitStatus == NdbTransaction::Started)
setErrorCodeAbort(4200);
}
return -1;
@@ -266,7 +265,7 @@ NdbOperation::intermediate_interpreterCheck()
}
return 0;
} else {
- if (theNdbCon->theCommitStatus == NdbConnection::Started)
+ if (theNdbCon->theCommitStatus == NdbTransaction::Started)
setErrorCodeAbort(4200);
}
return -1;
@@ -1012,33 +1011,56 @@ NdbOperation::insertCall(Uint32 aCall)
int
NdbOperation::branch_col(Uint32 type,
- Uint32 ColId, const char * val, Uint32 len,
+ Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
+ DBUG_ENTER("NdbOperation::branch_col");
+ DBUG_PRINT("enter", ("type=%u col=%u val=0x%x len=%u label=%u",
+ type, ColId, val, len, Label));
+ if (val != NULL)
+ DBUG_DUMP("value", (char*)val, len);
+
if (initial_interpreterCheck() == -1)
- return -1;
+ DBUG_RETURN(-1);
Interpreter::BinaryCondition c = (Interpreter::BinaryCondition)type;
-
- const NdbDictionary::Column * col =
+
+ const NdbColumnImpl * col =
m_currentTable->getColumn(ColId);
if(col == 0){
abort();
}
- Uint32 vc = col->getType() == NdbDictionary::Column::Varchar;
- Uint32 colLen = col->getLength() + 2 * vc;
- Uint32 al = (4 - (colLen & 3)) & 0x3;
-
- if (insertATTRINFO(Interpreter::BranchCol(c, al, vc, nopad)) == -1)
- return -1;
+ if (val == NULL)
+ len = 0;
+ else {
+ if (! col->getStringType()) {
+ // prevent assert in NdbSqlUtil on length error
+ Uint32 sizeInBytes = col->m_attrSize * col->m_arraySize;
+ if (len != 0 && len != sizeInBytes)
+ {
+ setErrorCodeAbort(4209);
+ DBUG_RETURN(-1);
+ }
+ len = sizeInBytes;
+ }
+ }
+
+ Uint32 tempData[2000];
+ if (((UintPtr)val & 3) != 0) {
+ memcpy(tempData, val, len);
+ val = tempData;
+ }
+
+ if (insertATTRINFO(Interpreter::BranchCol(c, 0, 0, false)) == -1)
+ DBUG_RETURN(-1);
if (insertBranch(Label) == -1)
- return -1;
+ DBUG_RETURN(-1);
if (insertATTRINFO(Interpreter::BranchCol_2(ColId, len)))
- return -1;
+ DBUG_RETURN(-1);
Uint32 len2 = Interpreter::mod4(len);
if(len2 == len){
@@ -1049,64 +1071,64 @@ NdbOperation::branch_col(Uint32 type,
Uint32 tmp = 0;
for (Uint32 i = 0; i < len-len2; i++) {
char* p = (char*)&tmp;
- p[i] = val[len2+i];
+ p[i] = ((char*)val)[len2+i];
}
insertATTRINFO(tmp);
}
theErrorLine++;
- return 0;
+ DBUG_RETURN(0);
}
int
-NdbOperation::branch_col_eq(Uint32 ColId, const char * val, Uint32 len,
+NdbOperation::branch_col_eq(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_eq %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
return branch_col(Interpreter::EQ, ColId, val, len, nopad, Label);
}
int
-NdbOperation::branch_col_ne(Uint32 ColId, const char * val, Uint32 len,
+NdbOperation::branch_col_ne(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_ne %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
return branch_col(Interpreter::NE, ColId, val, len, nopad, Label);
}
int
-NdbOperation::branch_col_lt(Uint32 ColId, const char * val, Uint32 len,
+NdbOperation::branch_col_lt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_lt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
return branch_col(Interpreter::LT, ColId, val, len, nopad, Label);
}
int
-NdbOperation::branch_col_le(Uint32 ColId, const char * val, Uint32 len,
+NdbOperation::branch_col_le(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_le %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
return branch_col(Interpreter::LE, ColId, val, len, nopad, Label);
}
int
-NdbOperation::branch_col_gt(Uint32 ColId, const char * val, Uint32 len,
+NdbOperation::branch_col_gt(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_gt %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
return branch_col(Interpreter::GT, ColId, val, len, nopad, Label);
}
int
-NdbOperation::branch_col_ge(Uint32 ColId, const char * val, Uint32 len,
+NdbOperation::branch_col_ge(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_ge %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
return branch_col(Interpreter::GE, ColId, val, len, nopad, Label);
}
int
-NdbOperation::branch_col_like(Uint32 ColId, const char * val, Uint32 len,
+NdbOperation::branch_col_like(Uint32 ColId, const void * val, Uint32 len,
bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_like %u %.*s(%u,%d) -> %u", ColId, len, val, len, nopad, Label));
return branch_col(Interpreter::LIKE, ColId, val, len, nopad, Label);
}
int
-NdbOperation::branch_col_notlike(Uint32 ColId, const char * val, Uint32 len,
- bool nopad, Uint32 Label){
+NdbOperation::branch_col_notlike(Uint32 ColId, const void * val, Uint32 len,
+ bool nopad, Uint32 Label){
INT_DEBUG(("branch_col_notlike %u %.*s(%u,%d) -> %u", ColId,len,val,len,nopad,Label));
return branch_col(Interpreter::NOT_LIKE, ColId, val, len, nopad, Label);
}
diff --git a/ndb/src/ndbapi/NdbOperationSearch.cpp b/ndb/src/ndbapi/NdbOperationSearch.cpp
index 97b2393e5ed..06d8ddd412b 100644
--- a/ndb/src/ndbapi/NdbOperationSearch.cpp
+++ b/ndb/src/ndbapi/NdbOperationSearch.cpp
@@ -31,14 +31,16 @@ Adjust: 971022 UABMNST First version.
#include <NdbOperation.hpp>
#include "NdbApiSignal.hpp"
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <Ndb.hpp>
#include "NdbImpl.hpp"
#include <NdbOut.hpp>
#include <AttributeHeader.hpp>
#include <signaldata/TcKeyReq.hpp>
+#include <signaldata/KeyInfo.hpp>
#include "NdbDictionaryImpl.hpp"
+#include <md5_hash.hpp>
/******************************************************************************
CondIdType equal(const char* anAttrName, char* aValue, Uint32 aVarKeylen);
@@ -55,13 +57,20 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
const char* aValuePassed,
Uint32 aVariableKeyLen)
{
- register Uint32 tAttrId;
+ DBUG_ENTER("NdbOperation::equal_impl");
+ DBUG_PRINT("enter", ("col=%s op=%d val=0x%x len=%u",
+ tAttrInfo->m_name.c_str(),
+ theOperationType,
+ aValuePassed, aVariableKeyLen));
+ if (aValuePassed != NULL)
+ DBUG_DUMP("value", (char*)aValuePassed, aVariableKeyLen);
+ register Uint32 tAttrId;
+
Uint32 tData;
Uint32 tKeyInfoPosition;
const char* aValue = aValuePassed;
- Uint32 xfrmData[1024];
- Uint32 tempData[1024];
+ Uint64 tempData[512];
if ((theStatus == OperationDefined) &&
(aValue != NULL) &&
@@ -76,6 +85,8 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
*****************************************************************************/
tAttrId = tAttrInfo->m_attrId;
tKeyInfoPosition = tAttrInfo->m_keyInfoPos;
+ bool tDistrKey = tAttrInfo->m_distributionKey;
+
Uint32 i = 0;
if (tAttrInfo->m_pk) {
Uint32 tKeyDefined = theTupleKeyDefined[0][2];
@@ -117,44 +128,29 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
theTupleKeyDefined[i][1] = tKeyInfoPosition;
theTupleKeyDefined[i][2] = true;
+ OperationType tOpType = theOperationType;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+
{
- /***************************************************************************
- * Check if the pointer of the value passed is aligned on a 4 byte
- * boundary. If so only assign the pointer to the internal variable
- * aValue. If it is not aligned then we start by copying the value to
- * tempData and use this as aValue instead.
- *****************************************************************************/
+ /************************************************************************
+ * Check if the pointer of the value passed is aligned on a 4 byte
+ * boundary. If so only assign the pointer to the internal variable
+ * aValue. If it is not aligned then we start by copying the value to
+ * tempData and use this as aValue instead.
+ ***********************************************************************/
const int attributeSize = sizeInBytes;
const int slack = sizeInBytes & 3;
-
- if ((((UintPtr)aValue & 3) != 0) || (slack != 0)){
+ const int align = UintPtr(aValue) & 7;
+
+ if (((align & 3) != 0) || (slack != 0) || (tDistrKey && (align != 0)))
+ {
+ ((Uint32*)tempData)[attributeSize >> 2] = 0;
memcpy(&tempData[0], aValue, attributeSize);
aValue = (char*)&tempData[0];
- if(slack != 0) {
- char * tmp = (char*)&tempData[0];
- memset(&tmp[attributeSize], 0, (4 - slack));
- }//if
}//if
}
- const char* aValueToWrite = aValue;
-
- CHARSET_INFO* cs = tAttrInfo->m_cs;
- if (cs != 0) {
- // current limitation: strxfrm does not increase length
- assert(cs->strxfrm_multiply <= 1);
- unsigned n =
- (*cs->coll->strnxfrm)(cs,
- (uchar*)xfrmData, sizeof(xfrmData),
- (const uchar*)aValue, sizeInBytes);
- while (n < sizeInBytes)
- ((uchar*)xfrmData)[n++] = 0x20;
- aValue = (char*)xfrmData;
- }
- Uint32 bitsInLastWord = 8 * (sizeInBytes & 3) ;
Uint32 totalSizeInWords = (sizeInBytes + 3)/4; // Inc. bits in last word
- Uint32 sizeInWords = sizeInBytes / 4; // Exc. bits in last word
if (true){ //tArraySize != 0) {
Uint32 tTupKeyLen = theTupKeyLen;
@@ -190,93 +186,58 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
}//if
#endif
- int tDistrKey = tAttrInfo->m_distributionKey;
- int tDistrGroup = tAttrInfo->m_distributionGroup;
- OperationType tOpType = theOperationType;
- if ((tDistrKey != 1) && (tDistrGroup != 1)) {
- ;
- } else if (tDistrKey == 1) {
- theDistrKeySize += totalSizeInWords;
- theDistrKeyIndicator = 1;
- } else {
- Uint32 TsizeInBytes = sizeInBytes;
- Uint32 TbyteOrderFix = 0;
- char* TcharByteOrderFix = (char*)&TbyteOrderFix;
- if (tAttrInfo->m_distributionGroupBits == 8) {
- char tFirstChar = aValue[TsizeInBytes - 2];
- char tSecondChar = aValue[TsizeInBytes - 2];
- TcharByteOrderFix[0] = tFirstChar;
- TcharByteOrderFix[1] = tSecondChar;
- TcharByteOrderFix[2] = 0x30;
- TcharByteOrderFix[3] = 0x30;
- theDistrGroupType = 0;
- } else {
- TbyteOrderFix = ((aValue[TsizeInBytes - 2] - 0x30) * 10)
- + (aValue[TsizeInBytes - 1] - 0x30);
- theDistrGroupType = 1;
- }//if
- theDistributionGroup = TbyteOrderFix;
- theDistrGroupIndicator = 1;
- }//if
- /******************************************************************************
+ /**************************************************************************
* If the operation is an insert request and the attribute is stored then
* we also set the value in the stored part through putting the
* information in the ATTRINFO signals.
- *****************************************************************************/
+ *************************************************************************/
if ((tOpType == InsertRequest) ||
(tOpType == WriteRequest)) {
- if (!tAttrInfo->m_indexOnly){
- int dummy_error;
- // invalid data can crash kernel
- if (cs != NULL &&
- (*cs->cset->well_formed_len)(cs,
- aValueToWrite,
- aValueToWrite + sizeInBytes,
- sizeInBytes,
- &dummy_error) != sizeInBytes)
- goto equal_error4;
- Uint32 ahValue;
- const Uint32 sz = totalSizeInWords;
+ Uint32 ahValue;
+ const Uint32 sz = totalSizeInWords;
+
+ // XXX
+ if(m_accessTable == m_currentTable)
+ {
AttributeHeader::init(&ahValue, tAttrId, sz);
- insertATTRINFO( ahValue );
- insertATTRINFOloop((Uint32*)aValueToWrite, sizeInWords);
- if (bitsInLastWord != 0) {
- tData = *(Uint32*)(aValueToWrite + (sizeInWords << 2));
- tData = convertEndian(tData);
- tData = tData & ((1 << bitsInLastWord) - 1);
- tData = convertEndian(tData);
- insertATTRINFO( tData );
- }//if
- }//if
+ }
+ else
+ {
+ assert(m_accessTable->m_index);
+ int attr_id_current_table =
+ m_accessTable->m_index->m_columns[tAttrId]->m_keyInfoPos;
+ AttributeHeader::init(&ahValue, attr_id_current_table, sz);
+ }
+
+ insertATTRINFO( ahValue );
+ insertATTRINFOloop((Uint32*)aValue, sz);
}//if
- /***************************************************************************
+ /**************************************************************************
* Store the Key information in the TCKEYREQ and KEYINFO signals.
- **************************************************************************/
- if (insertKEYINFO(aValue, tKeyInfoPosition,
- totalSizeInWords, bitsInLastWord) != -1) {
- /*************************************************************************
+ *************************************************************************/
+ if (insertKEYINFO(aValue, tKeyInfoPosition, totalSizeInWords) != -1) {
+ /************************************************************************
* Add one to number of tuple key attributes defined.
* If all have been defined then set the operation state to indicate
* that tuple key is defined.
* Thereby no more search conditions are allowed in this version.
- ************************************************************************/
- Uint32 tNoKeysDef = theNoOfTupKeyDefined;
+ ***********************************************************************/
+ Uint32 tNoKeysDef = theNoOfTupKeyLeft - 1;
Uint32 tErrorLine = theErrorLine;
- int tNoTableKeys = m_currentTable->m_noOfKeys;
unsigned char tInterpretInd = theInterpretIndicator;
- tNoKeysDef++;
- theNoOfTupKeyDefined = tNoKeysDef;
+ theNoOfTupKeyLeft = tNoKeysDef;
tErrorLine++;
theErrorLine = tErrorLine;
- if (int(tNoKeysDef) == tNoTableKeys) {
+
+ if (tNoKeysDef == 0) {
if (tOpType == UpdateRequest) {
if (tInterpretInd == 1) {
theStatus = GetValue;
} else {
theStatus = SetValue;
}//if
- return 0;
+ DBUG_RETURN(0);
} else if ((tOpType == ReadRequest) || (tOpType == DeleteRequest) ||
(tOpType == ReadExclusive)) {
theStatus = GetValue;
@@ -287,89 +248,62 @@ NdbOperation::equal_impl(const NdbColumnImpl* tAttrInfo,
assert(c != 0);
if (c->getBlobType()) {
if (getBlobHandle(theNdbCon, c) == NULL)
- return -1;
+ DBUG_RETURN(-1);
}
}
}
- return 0;
+ DBUG_RETURN(0);
} else if ((tOpType == InsertRequest) || (tOpType == WriteRequest)) {
theStatus = SetValue;
- return 0;
+ DBUG_RETURN(0);
} else {
setErrorCodeAbort(4005);
- return -1;
+ DBUG_RETURN(-1);
}//if
+ DBUG_RETURN(0);
}//if
- return 0;
} else {
- return -1;
+ DBUG_RETURN(-1);
}//if
+ DBUG_RETURN(0);
}
-
+
if (aValue == NULL) {
// NULL value in primary key
setErrorCodeAbort(4505);
- return -1;
+ DBUG_RETURN(-1);
}//if
if ( tAttrInfo == NULL ) {
// Attribute name not found in table
setErrorCodeAbort(4004);
- return -1;
+ DBUG_RETURN(-1);
}//if
if (theStatus == GetValue || theStatus == SetValue){
// All pk's defined
setErrorCodeAbort(4225);
- return -1;
+ DBUG_RETURN(-1);
}//if
+
+ ndbout_c("theStatus: %d", theStatus);
// If we come here, set a general errorcode
// and exit
setErrorCodeAbort(4200);
- return -1;
+ DBUG_RETURN(-1);
equal_error1:
setErrorCodeAbort(4205);
- return -1;
+ DBUG_RETURN(-1);
equal_error2:
setErrorCodeAbort(4206);
- return -1;
+ DBUG_RETURN(-1);
equal_error3:
setErrorCodeAbort(4209);
- return -1;
-
- equal_error4:
- setErrorCodeAbort(744);
- return -1;
-}
-
-/******************************************************************************
- * Uint64 setTupleId( void )
- *
- * Return Value: Return > 0: OK
- * Return 0 : setTupleId failed
- * Parameters:
- * Remark:
- *****************************************************************************/
-Uint64
-NdbOperation::setTupleId()
-{
- if (theStatus != OperationDefined)
- {
- return 0;
- }
- Uint64 tTupleId = theNdb->getTupleIdFromNdb(m_currentTable->m_tableId);
- if (tTupleId == ~(Uint64)0){
- setErrorCodeAbort(theNdb->theError.code);
- return 0;
- }
- if (equal((Uint32)0, tTupleId) == -1)
- return 0;
-
- return tTupleId;
+ DBUG_RETURN(-1);
}
/******************************************************************************
@@ -389,8 +323,7 @@ NdbOperation::setTupleId()
int
NdbOperation::insertKEYINFO(const char* aValue,
register Uint32 aStartPosition,
- register Uint32 anAttrSizeInWords,
- register Uint32 anAttrBitsInLastWord)
+ register Uint32 anAttrSizeInWords)
{
NdbApiSignal* tSignal;
NdbApiSignal* tCurrentKEYINFO;
@@ -410,7 +343,7 @@ NdbOperation::insertKEYINFO(const char* aValue,
*****************************************************************************/
tEndPos = aStartPosition + anAttrSizeInWords - 1;
- if ((tEndPos < 9) && (anAttrBitsInLastWord == 0)) {
+ if ((tEndPos < 9)) {
register Uint32 tkeyData = *(Uint32*)aValue;
//TcKeyReq* tcKeyReq = CAST_PTR(TcKeyReq, tTCREQ->getDataPtrSend());
register Uint32* tDataPtr = (Uint32*)aValue;
@@ -451,10 +384,11 @@ NdbOperation::insertKEYINFO(const char* aValue,
setErrorCodeAbort(4001);
return -1;
}
- if (theFirstKEYINFO != NULL)
+ if (theTCREQ->next() != NULL)
theLastKEYINFO->next(tSignal);
else
- theFirstKEYINFO = tSignal;
+ theTCREQ->next(tSignal);
+
theLastKEYINFO = tSignal;
theLastKEYINFO->next(NULL);
theTotalNrOfKeyWordInSignal += 20;
@@ -467,7 +401,7 @@ NdbOperation::insertKEYINFO(const char* aValue,
* this is the first word in a KEYINFO signal. *
*****************************************************************************/
tPosition = aStartPosition;
- tCurrentKEYINFO = theFirstKEYINFO;
+ tCurrentKEYINFO = theTCREQ->next();
/*****************************************************************************
* Start by filling up Key information in the 8 words allocated in the *
@@ -520,39 +454,20 @@ NdbOperation::insertKEYINFO(const char* aValue,
} while (1);
LastWordLabel:
-
-/*****************************************************************************
- * There could be a last word that only contains partial data. This word*
- * will contain zeroes in the rest of the bits since the index expects *
- * a certain number of words and do not care for parts of words. *
- *****************************************************************************/
- if (anAttrBitsInLastWord != 0) {
- tData = *(Uint32*)(aValue + (anAttrSizeInWords - 1) * 4);
- tData = convertEndian(tData);
- tData = tData & ((1 << anAttrBitsInLastWord) - 1);
- tData = convertEndian(tData);
- if (tPosition > 8) {
- tCurrentKEYINFO->setData(tData, signalCounter);
- signalCounter++;
- } else {
- theTCREQ->setData(tData, (12 + tPosition));
- }//if
- }//if
-
return 0;
}
int
NdbOperation::getKeyFromTCREQ(Uint32* data, unsigned size)
{
- assert(m_accessTable != 0 && m_accessTable->m_sizeOfKeysInWords != 0);
- assert(m_accessTable->m_sizeOfKeysInWords == size);
+ assert(m_accessTable != 0 && m_accessTable->m_keyLenInWords != 0);
+ assert(m_accessTable->m_keyLenInWords == size);
unsigned pos = 0;
while (pos < 8 && pos < size) {
data[pos] = theKEYINFOptr[pos];
pos++;
}
- NdbApiSignal* tSignal = theFirstKEYINFO;
+ NdbApiSignal* tSignal = theTCREQ->next();
unsigned n = 0;
while (pos < size) {
if (n == 20) {
@@ -563,3 +478,115 @@ NdbOperation::getKeyFromTCREQ(Uint32* data, unsigned size)
}
return 0;
}
+
+int
+NdbOperation::handle_distribution_key(const Uint64* value, Uint32 len)
+{
+ if(theDistrKeyIndicator_ == 1 ||
+ (theNoOfTupKeyLeft > 0 && m_accessTable->m_noOfDistributionKeys > 1))
+ {
+ return 0;
+ }
+
+ if(m_accessTable->m_noOfDistributionKeys == 1)
+ {
+ setPartitionHash(value, len);
+ }
+ else if(theTCREQ->readSignalNumber() == GSN_TCKEYREQ)
+ {
+ // No support for combined distribution key and scan
+
+ /**
+ * Copy distribution key to linear memory
+ */
+ NdbColumnImpl* const * cols = m_accessTable->m_columns.getBase();
+ Uint32 len = 0;
+ Uint64 tmp[1000];
+
+ Uint32 chunk = 8;
+ Uint32* dst = (Uint32*)tmp;
+ NdbApiSignal* tSignal = theTCREQ;
+ Uint32* src = ((TcKeyReq*)tSignal->getDataPtrSend())->keyInfo;
+ if(tSignal->readSignalNumber() == GSN_SCAN_TABREQ)
+ {
+ tSignal = tSignal->next();
+ src = ((KeyInfo*)tSignal->getDataPtrSend())->keyData;
+ chunk = KeyInfo::DataLength;
+ }
+
+ for(unsigned i = m_accessTable->m_columns.size(); i>0; cols++, i--)
+ {
+ if (!(* cols)->getPrimaryKey())
+ continue;
+
+ NdbColumnImpl* tAttrInfo = * cols;
+ Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ Uint32 currLen = (sizeInBytes + 3) >> 2;
+ if (tAttrInfo->getDistributionKey())
+ {
+ while (currLen >= chunk)
+ {
+ memcpy(dst, src, 4*chunk);
+ dst += chunk;
+ tSignal = tSignal->next();
+ src = ((KeyInfo*)tSignal->getDataPtrSend())->keyData;
+ currLen -= chunk;
+ chunk = KeyInfo::DataLength;
+ }
+
+ memcpy(dst, src, 4*currLen);
+ dst += currLen;
+ src += currLen;
+ chunk -= currLen;
+ }
+ else
+ {
+ while (currLen >= chunk)
+ {
+ tSignal = tSignal->next();
+ src = ((KeyInfo*)tSignal->getDataPtrSend())->keyData;
+ currLen -= chunk;
+ chunk = KeyInfo::DataLength;
+ }
+
+ src += currLen;
+ chunk -= currLen;
+ }
+ }
+ setPartitionHash(tmp, dst - (Uint32*)tmp);
+ }
+ return 0;
+}
+
+void
+NdbOperation::setPartitionHash(Uint32 value)
+{
+ union {
+ Uint32 tmp32;
+ Uint64 tmp64;
+ };
+
+ tmp32 = value;
+ setPartitionHash(&tmp64, 1);
+}
+
+void
+NdbOperation::setPartitionHash(const Uint64* value, Uint32 len)
+{
+ Uint32 buf[4];
+ md5_hash(buf, value, len);
+ setPartitionId(buf[1]);
+}
+
+void
+NdbOperation::setPartitionId(Uint32 value)
+{
+ theDistributionKey = value;
+ theDistrKeyIndicator_ = 1;
+}
+
+Uint32
+NdbOperation::getPartitionId() const
+{
+ return theDistributionKey;
+}
diff --git a/ndb/src/ndbapi/NdbPool.cpp b/ndb/src/ndbapi/NdbPool.cpp
index ba58520c1dc..a8263f564f1 100644
--- a/ndb/src/ndbapi/NdbPool.cpp
+++ b/ndb/src/ndbapi/NdbPool.cpp
@@ -21,14 +21,16 @@
static NdbPool* m_pool = 0;
bool
-create_instance(Uint32 max_ndb_objects,
+create_instance(Ndb_cluster_connection* cc,
+ Uint32 max_ndb_objects,
Uint32 no_conn_obj,
Uint32 init_no_ndb_objects)
{
if (m_pool != NULL) {
return false;
}
- m_pool = NdbPool::create_instance(max_ndb_objects,
+ m_pool = NdbPool::create_instance(cc,
+ max_ndb_objects,
no_conn_obj,
init_no_ndb_objects);
if (m_pool == NULL) {
diff --git a/ndb/src/ndbapi/NdbPoolImpl.cpp b/ndb/src/ndbapi/NdbPoolImpl.cpp
index 131edc74246..32e0a6f1410 100644
--- a/ndb/src/ndbapi/NdbPoolImpl.cpp
+++ b/ndb/src/ndbapi/NdbPoolImpl.cpp
@@ -20,7 +20,8 @@ NdbMutex *NdbPool::pool_mutex = NULL;
NdbPool *the_pool = NULL;
NdbPool*
-NdbPool::create_instance(Uint32 max_ndb_obj,
+NdbPool::create_instance(Ndb_cluster_connection* cc,
+ Uint32 max_ndb_obj,
Uint32 no_conn_obj,
Uint32 init_no_ndb_objects)
{
@@ -32,7 +33,7 @@ NdbPool::create_instance(Uint32 max_ndb_obj,
if (the_pool != NULL) {
a_pool = NULL;
} else {
- the_pool = new NdbPool(max_ndb_obj, no_conn_obj);
+ the_pool = new NdbPool(cc, max_ndb_obj, no_conn_obj);
if (!the_pool->init(init_no_ndb_objects)) {
delete the_pool;
the_pool = NULL;
@@ -76,7 +77,8 @@ NdbPool::initPoolMutex()
return ret_result;
}
-NdbPool::NdbPool(Uint32 max_no_objects,
+NdbPool::NdbPool(Ndb_cluster_connection* cc,
+ Uint32 max_no_objects,
Uint32 no_conn_objects)
{
if (no_conn_objects > 1024) {
@@ -101,6 +103,7 @@ NdbPool::NdbPool(Uint32 max_no_objects,
m_output_queue = 0;
m_input_queue = 0;
m_signal_count = 0;
+ m_cluster_connection = cc;
}
NdbPool::~NdbPool()
@@ -294,9 +297,9 @@ NdbPool::allocate_ndb(Uint32 &id,
return false;
}
if (a_schema_name) {
- a_ndb = new Ndb(a_schema_name, a_catalog_name);
+ a_ndb = new Ndb(m_cluster_connection, a_schema_name, a_catalog_name);
} else {
- a_ndb = new Ndb("");
+ a_ndb = new Ndb(m_cluster_connection, "");
}
if (a_ndb == NULL) {
return false;
diff --git a/ndb/src/ndbapi/NdbPoolImpl.hpp b/ndb/src/ndbapi/NdbPoolImpl.hpp
index af6cf4708cf..cd36f30e90b 100644
--- a/ndb/src/ndbapi/NdbPoolImpl.hpp
+++ b/ndb/src/ndbapi/NdbPoolImpl.hpp
@@ -92,7 +92,8 @@ class NdbPool {
Uint16 prev_db_object;
};
public:
- static NdbPool* create_instance(Uint32 max_ndb_objects = 240,
+ static NdbPool* create_instance(Ndb_cluster_connection*,
+ Uint32 max_ndb_objects = 240,
Uint32 no_conn_obj = 4,
Uint32 init_no_ndb_objects = 8);
static void drop_instance();
@@ -104,7 +105,8 @@ class NdbPool {
bool init(Uint32 initial_no_of_ndb_objects = 8);
void release_all();
static bool initPoolMutex();
- NdbPool(Uint32 max_no_of_ndb_objects, Uint32 no_conn_objects);
+ NdbPool(Ndb_cluster_connection*,
+ Uint32 max_no_of_ndb_objects, Uint32 no_conn_objects);
~NdbPool();
/*
We have three lists:
@@ -158,5 +160,7 @@ class NdbPool {
Uint16 m_input_queue;
Uint16 m_output_queue;
Uint16 m_signal_count;
+
+ Ndb_cluster_connection * m_cluster_connection;
};
#endif
diff --git a/ndb/src/ndbapi/NdbRecAttr.cpp b/ndb/src/ndbapi/NdbRecAttr.cpp
index f993c652bf9..5201c6c9c04 100644
--- a/ndb/src/ndbapi/NdbRecAttr.cpp
+++ b/ndb/src/ndbapi/NdbRecAttr.cpp
@@ -15,17 +15,6 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/************************************************************************************************
-Name: NdbRecAttr.C
-Include:
-Link:
-Author: UABRONM Mikael Ronström UAB/B/SD
-Date: 971206
-Version: 0.1
-Description: Interface between TIS and NDB
-Documentation:
-Adjust: 971206 UABRONM First version
-************************************************************************************************/
#include <ndb_global.h>
#include <NdbOut.hpp>
#include <NdbRecAttr.hpp>
@@ -148,6 +137,40 @@ NdbRecAttr::receive_data(const Uint32 * data, Uint32 sz){
return false;
}
+static void
+ndbrecattr_print_string(NdbOut& out, const char *type,
+ const char *aref, unsigned sz)
+{
+ const unsigned char* ref = (const unsigned char*)aref;
+ int i, len, printable= 1;
+ // trailing zeroes are not printed
+ for (i=sz-1; i >= 0; i--)
+ if (ref[i] == 0) sz--;
+ else break;
+ if (sz == 0) return; // empty
+
+ for (len=0; len < (int)sz && ref[i] != 0; len++)
+ if (printable && !isprint((int)ref[i]))
+ printable= 0;
+
+ if (printable)
+ out.print("%.*s", len, ref);
+ else
+ {
+ out.print("0x");
+ for (i=0; i < len; i++)
+ out.print("%02X", (int)ref[i]);
+ }
+ if (len != (int)sz)
+ {
+ out.print("[");
+ for (i= len+1; ref[i] != 0; i++)
+ out.print("%u]",len-i);
+ assert((int)sz > i);
+ ndbrecattr_print_string(out,type,aref+i,sz-i);
+ }
+}
+
NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
{
if (r.isNULL())
@@ -171,6 +194,9 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
case NdbDictionary::Column::Bigunsigned:
out << r.u_64_value();
break;
+ case NdbDictionary::Column::Bit:
+ out << hex << "H'" << r.u_32_value() << dec;
+ break;
case NdbDictionary::Column::Unsigned:
out << r.u_32_value();
break;
@@ -192,17 +218,21 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
case NdbDictionary::Column::Tinyint:
out << (int) r.char_value();
break;
+ case NdbDictionary::Column::Binary:
+ ndbrecattr_print_string(out,"Binary",r.aRef(),r.arraySize());
+ j = r.arraySize();
+ break;
case NdbDictionary::Column::Char:
- out.print("%.*s", r.arraySize(), r.aRef());
+ ndbrecattr_print_string(out,"Char",r.aRef(),r.arraySize());
j = length;
break;
case NdbDictionary::Column::Varchar:
- {
- short len = ntohs(r.u_short_value());
- out.print("%.*s", len, r.aRef()+2);
- }
- j = length;
- break;
+ {
+ unsigned len = *(const unsigned char*)r.aRef();
+ ndbrecattr_print_string(out,"Varchar", r.aRef()+1,len);
+ j = length;
+ }
+ break;
case NdbDictionary::Column::Float:
out << r.float_value();
break;
@@ -221,6 +251,10 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
out.print("%.*s", len, r.aRef());
}
break;
+ case NdbDictionary::Column::Decimal:
+ case NdbDictionary::Column::Decimalunsigned:
+ goto unknown; // TODO
+ break;
// for dates cut-and-paste from field.cc
case NdbDictionary::Column::Datetime:
{
@@ -323,6 +357,14 @@ NdbOut& operator<<(NdbOut& out, const NdbRecAttr &r)
j = length;
}
break;
+ case NdbDictionary::Column::Longvarchar:
+ {
+ unsigned len = uint2korr(r.aRef());
+ ndbrecattr_print_string(out,"Longvarchar", r.aRef()+2,len);
+ j = length;
+ }
+ break;
+ unknown:
default: /* no print functions for the rest, just print type */
out << (int) r.getType();
j = length;
diff --git a/ndb/src/ndbapi/NdbReceiver.cpp b/ndb/src/ndbapi/NdbReceiver.cpp
index cad247512b2..df16ae66915 100644
--- a/ndb/src/ndbapi/NdbReceiver.cpp
+++ b/ndb/src/ndbapi/NdbReceiver.cpp
@@ -20,7 +20,7 @@
#include "NdbDictionaryImpl.hpp"
#include <NdbRecAttr.hpp>
#include <AttributeHeader.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <TransporterFacade.hpp>
#include <signaldata/TcKeyConf.hpp>
@@ -140,7 +140,10 @@ NdbReceiver::calculate_batch_size(Uint32 key_size,
}
void
-NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size){
+NdbReceiver::do_get_value(NdbReceiver * org,
+ Uint32 rows,
+ Uint32 key_size,
+ Uint32 range_no){
if(rows > m_defined_rows){
delete[] m_rows;
m_defined_rows = rows;
@@ -155,7 +158,7 @@ NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size){
key.m_attrSize = 4;
key.m_nullable = true; // So that receive works w.r.t KEYINFO20
}
- m_key_info = key_size;
+ m_hidden_count = (key_size ? 1 : 0) + range_no ;
for(Uint32 i = 0; i<rows; i++){
NdbRecAttr * prev = theCurrentRecAttr;
@@ -167,6 +170,12 @@ NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size){
return ; // -1
}
+ if(range_no &&
+ !getValue(&NdbColumnImpl::getImpl(* NdbDictionary::Column::RANGE_NO),0))
+ {
+ abort();
+ }
+
NdbRecAttr* tRecAttr = org->theFirstRecAttr;
while(tRecAttr != 0){
if(getValue(&NdbColumnImpl::getImpl(*tRecAttr->m_column), (char*)0) != 0)
@@ -192,14 +201,14 @@ NdbReceiver::do_get_value(NdbReceiver * org, Uint32 rows, Uint32 key_size){
return;
}
-void
+NdbRecAttr*
NdbReceiver::copyout(NdbReceiver & dstRec){
- NdbRecAttr* src = m_rows[m_current_row++];
- NdbRecAttr* dst = dstRec.theFirstRecAttr;
- Uint32 tmp = m_key_info;
- if(tmp > 0){
+ NdbRecAttr *src = m_rows[m_current_row++];
+ NdbRecAttr *dst = dstRec.theFirstRecAttr;
+ NdbRecAttr *start = src;
+ Uint32 tmp = m_hidden_count;
+ while(tmp--)
src = src->next();
- }
while(dst){
Uint32 len = ((src->theAttrSize * src->theArraySize)+3)/4;
@@ -207,6 +216,8 @@ NdbReceiver::copyout(NdbReceiver & dstRec){
src = src->next();
dst = dst->next();
}
+
+ return start;
}
int
diff --git a/ndb/src/ndbapi/NdbResultSet.cpp b/ndb/src/ndbapi/NdbResultSet.cpp
deleted file mode 100644
index 87b304126ba..00000000000
--- a/ndb/src/ndbapi/NdbResultSet.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Copyright (C) 2003 MySQL AB
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-
-/*****************************************************************************
- * Name: NdbResultSet.cpp
- * Include:
- * Link:
- * Author: UABMASD Martin Sköld INN/V Alzato
- * Date: 2002-04-01
- * Version: 0.1
- * Description: Cursor class
- * Documentation:
- * Adjust: 2002-04-01 UABMASD First version.
- ****************************************************************************/
-
-#include <Ndb.hpp>
-#include <NdbConnection.hpp>
-#include <NdbResultSet.hpp>
-#include <NdbBlob.hpp>
-
-NdbResultSet::NdbResultSet(NdbScanOperation *owner)
-: m_operation(owner)
-{
-}
-
-NdbResultSet::~NdbResultSet()
-{
-}
-
-void NdbResultSet::init()
-{
-}
-
-int NdbResultSet::nextResult(bool fetchAllowed, bool forceSend)
-{
- int res;
- if ((res = m_operation->nextResult(fetchAllowed, forceSend)) == 0) {
- // handle blobs
- NdbBlob* tBlob = m_operation->theBlobList;
- while (tBlob != 0) {
- if (tBlob->atNextResult() == -1)
- return -1;
- tBlob = tBlob->theNext;
- }
- /*
- * Flush blob part ops on behalf of user because
- * - nextResult is analogous to execute(NoCommit)
- * - user is likely to want blob value before next execute
- */
- if (m_operation->m_transConnection->executePendingBlobOps() == -1)
- return -1;
- return 0;
- }
- return res;
-}
-
-void NdbResultSet::close(bool forceSend)
-{
- m_operation->closeScan(forceSend, true);
-}
-
-NdbOperation*
-NdbResultSet::updateTuple(){
- return updateTuple(m_operation->m_transConnection);
-}
-
-NdbOperation*
-NdbResultSet::updateTuple(NdbConnection* takeOverTrans){
- return m_operation->takeOverScanOp(NdbOperation::UpdateRequest,
- takeOverTrans);
-}
-
-int
-NdbResultSet::deleteTuple(){
- return deleteTuple(m_operation->m_transConnection);
-}
-
-int
-NdbResultSet::deleteTuple(NdbConnection * takeOverTrans){
- void * res = m_operation->takeOverScanOp(NdbOperation::DeleteRequest,
- takeOverTrans);
- if(res == 0)
- return -1;
- return 0;
-}
-
-int
-NdbResultSet::restart(bool forceSend){
- return m_operation->restart(forceSend);
-}
diff --git a/ndb/src/ndbapi/NdbScanFilter.cpp b/ndb/src/ndbapi/NdbScanFilter.cpp
index 0c851427ba5..b39fd10fe95 100644
--- a/ndb/src/ndbapi/NdbScanFilter.cpp
+++ b/ndb/src/ndbapi/NdbScanFilter.cpp
@@ -48,11 +48,8 @@ public:
int cond_col(Interpreter::UnaryCondition, Uint32 attrId);
- template<typename T>
- int cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, T value);
-
int cond_col_const(Interpreter::BinaryCondition, Uint32 attrId,
- const char * value, Uint32 len, bool nopad);
+ const void * value, Uint32 len);
};
const Uint32 LabelExit = ~0;
@@ -247,68 +244,7 @@ NdbScanFilter::isfalse(){
typedef int (NdbOperation:: * Branch1)(Uint32, Uint32 label);
-typedef int (NdbOperation:: * Branch2)(Uint32, Uint32, Uint32 label);
-typedef int (NdbOperation:: * StrBranch2)(Uint32, const char*,Uint32,bool,Uint32);
-
-struct tab {
- Branch2 m_branches[5];
-};
-
-static const tab table[] = {
- /**
- * EQ (AND, OR, NAND, NOR)
- */
- { { 0,
- &NdbOperation::branch_ne,
- &NdbOperation::branch_eq,
- &NdbOperation::branch_eq,
- &NdbOperation::branch_ne } }
-
- /**
- * NEQ
- */
- ,{ { 0,
- &NdbOperation::branch_eq,
- &NdbOperation::branch_ne,
- &NdbOperation::branch_ne,
- &NdbOperation::branch_eq } }
-
- /**
- * LT
- */
- ,{ { 0,
- &NdbOperation::branch_le,
- &NdbOperation::branch_gt,
- &NdbOperation::branch_gt,
- &NdbOperation::branch_le } }
-
- /**
- * LE
- */
- ,{ { 0,
- &NdbOperation::branch_lt,
- &NdbOperation::branch_ge,
- &NdbOperation::branch_ge,
- &NdbOperation::branch_lt } }
-
- /**
- * GT
- */
- ,{ { 0,
- &NdbOperation::branch_ge,
- &NdbOperation::branch_lt,
- &NdbOperation::branch_lt,
- &NdbOperation::branch_ge } }
-
- /**
- * GE
- */
- ,{ { 0,
- &NdbOperation::branch_gt,
- &NdbOperation::branch_le,
- &NdbOperation::branch_le,
- &NdbOperation::branch_gt } }
-};
+typedef int (NdbOperation:: * StrBranch2)(Uint32, const void*, Uint32, bool, Uint32);
struct tab2 {
Branch1 m_branches[5];
@@ -334,134 +270,9 @@ static const tab2 table2[] = {
&NdbOperation::branch_col_eq_null } }
};
-const int tab_sz = sizeof(table)/sizeof(table[0]);
const int tab2_sz = sizeof(table2)/sizeof(table2[0]);
int
-matchType(const NdbDictionary::Column * col){
- return 1;
-}
-
-template<typename T> int load_const(NdbOperation* op, T value, Uint32 reg);
-
-template<>
-int
-load_const(NdbOperation* op, Uint32 value, Uint32 reg){
- return op->load_const_u32(reg, value);
-}
-
-template<>
-int
-load_const(NdbOperation* op, Uint64 value, Uint32 reg){
- return op->load_const_u64(reg, value);
-}
-
-template<typename T>
-int
-NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
- Uint32 AttrId, T value){
-
- if(op < 0 || op >= tab_sz){
- m_operation->setErrorCodeAbort(4262);
- return -1;
- }
-
- if(m_current.m_group < NdbScanFilter::AND ||
- m_current.m_group > NdbScanFilter::NOR){
- m_operation->setErrorCodeAbort(4260);
- return -1;
- }
-
- Branch2 branch = table[op].m_branches[m_current.m_group];
- const NdbDictionary::Column * col =
- m_operation->m_currentTable->getColumn(AttrId);
-
- if(col == 0){
- m_operation->setErrorCodeAbort(4261);
- return -1;
- }
-
- if(!matchType(col)){
- /**
- * Code not reached
- */
- return -1;
- }
-
- if(m_latestAttrib != AttrId){
- m_operation->read_attr(&NdbColumnImpl::getImpl(* col), 4);
- m_latestAttrib = AttrId;
- }
-
- load_const<T>(m_operation, value, 5);
- (m_operation->* branch)(4, 5, m_current.m_ownLabel);
-
- return 0;
-}
-
-int
-NdbScanFilter::eq(int AttrId, Uint32 value){
- return m_impl.cond_col_const(Interpreter::EQ, AttrId, value);
-}
-
-int
-NdbScanFilter::ne(int AttrId, Uint32 value){
- return m_impl.cond_col_const(Interpreter::NE, AttrId, value);
-}
-
-int
-NdbScanFilter::lt(int AttrId, Uint32 value){
- return m_impl.cond_col_const(Interpreter::LT, AttrId, value);
-}
-
-int
-NdbScanFilter::le(int AttrId, Uint32 value){
- return m_impl.cond_col_const(Interpreter::LE, AttrId, value);
-}
-
-int
-NdbScanFilter::gt(int AttrId, Uint32 value){
- return m_impl.cond_col_const(Interpreter::GT, AttrId, value);
-}
-
-int
-NdbScanFilter::ge(int AttrId, Uint32 value){
- return m_impl.cond_col_const(Interpreter::GE, AttrId, value);
-}
-
-
-int
-NdbScanFilter::eq(int AttrId, Uint64 value){
- return m_impl.cond_col_const(Interpreter::EQ, AttrId, value);
-}
-
-int
-NdbScanFilter::ne(int AttrId, Uint64 value){
- return m_impl.cond_col_const(Interpreter::NE, AttrId, value);
-}
-
-int
-NdbScanFilter::lt(int AttrId, Uint64 value){
- return m_impl.cond_col_const(Interpreter::LT, AttrId, value);
-}
-
-int
-NdbScanFilter::le(int AttrId, Uint64 value){
- return m_impl.cond_col_const(Interpreter::LE, AttrId, value);
-}
-
-int
-NdbScanFilter::gt(int AttrId, Uint64 value){
- return m_impl.cond_col_const(Interpreter::GT, AttrId, value);
-}
-
-int
-NdbScanFilter::ge(int AttrId, Uint64 value){
- return m_impl.cond_col_const(Interpreter::GE, AttrId, value);
-}
-
-
-int
NdbScanFilterImpl::cond_col(Interpreter::UnaryCondition op, Uint32 AttrId){
if(op < 0 || op >= tab2_sz){
@@ -570,11 +381,10 @@ static const tab3 table3[] = {
const int tab3_sz = sizeof(table3)/sizeof(table3[0]);
-
int
NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
Uint32 AttrId,
- const char * value, Uint32 len, bool nopad){
+ const void * value, Uint32 len){
if(op < 0 || op >= tab3_sz){
m_operation->setErrorCodeAbort(4260);
return -1;
@@ -595,49 +405,35 @@ NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition op,
return -1;
}
- (m_operation->* branch)(AttrId, value, len, nopad, m_current.m_ownLabel);
- return 0;
-}
-
-int
-NdbScanFilter::eq(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::EQ, ColId, val, len, nopad);
-}
-
-int
-NdbScanFilter::ne(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::NE, ColId, val, len, nopad);
-}
-
-int
-NdbScanFilter::lt(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::LT, ColId, val, len, nopad);
+ int ret = (m_operation->* branch)(AttrId, value, len, false, m_current.m_ownLabel);
+ return ret;
}
int
-NdbScanFilter::le(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::LE, ColId, val, len, nopad);
-}
-
-int
-NdbScanFilter::gt(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::GT, ColId, val, len, nopad);
-}
-
-int
-NdbScanFilter::ge(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::GE, ColId, val, len, nopad);
-}
-
-int
-NdbScanFilter::like(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::LIKE, ColId, val, len, nopad);
-}
+NdbScanFilter::cmp(BinaryCondition cond, int ColId,
+ const void *val, Uint32 len)
+{
+ switch(cond){
+ case COND_LE:
+ return m_impl.cond_col_const(Interpreter::LE, ColId, val, len);
+ case COND_LT:
+ return m_impl.cond_col_const(Interpreter::LT, ColId, val, len);
+ case COND_GE:
+ return m_impl.cond_col_const(Interpreter::GE, ColId, val, len);
+ case COND_GT:
+ return m_impl.cond_col_const(Interpreter::GT, ColId, val, len);
+ case COND_EQ:
+ return m_impl.cond_col_const(Interpreter::EQ, ColId, val, len);
+ case COND_NE:
+ return m_impl.cond_col_const(Interpreter::NE, ColId, val, len);
+ case COND_LIKE:
+ return m_impl.cond_col_const(Interpreter::LIKE, ColId, val, len);
+ case COND_NOT_LIKE:
+ return m_impl.cond_col_const(Interpreter::NOT_LIKE, ColId, val, len);
+ }
+ return -1;
+}
-int
-NdbScanFilter::notlike(int ColId, const char * val, Uint32 len, bool nopad){
- return m_impl.cond_col_const(Interpreter::NOT_LIKE, ColId, val, len, nopad);
-}
#if 0
int
@@ -778,10 +574,4 @@ main(void){
#endif
template class Vector<NdbScanFilterImpl::State>;
-#if __SUNPRO_CC != 0x560
-#ifndef _FORTEC_
-template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint32);
-template int NdbScanFilterImpl::cond_col_const(Interpreter::BinaryCondition, Uint32 attrId, Uint64);
-#endif
-#endif
diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp
index fc5a22cce17..e0a480e02f7 100644
--- a/ndb/src/ndbapi/NdbScanOperation.cpp
+++ b/ndb/src/ndbapi/NdbScanOperation.cpp
@@ -18,11 +18,11 @@
#include <Ndb.hpp>
#include <NdbScanOperation.hpp>
#include <NdbIndexScanOperation.hpp>
-#include <NdbConnection.hpp>
-#include <NdbResultSet.hpp>
+#include <NdbTransaction.hpp>
#include "NdbApiSignal.hpp"
#include <NdbOut.hpp>
#include "NdbDictionaryImpl.hpp"
+#include <NdbBlob.hpp>
#include <NdbRecAttr.hpp>
#include <NdbReceiver.hpp>
@@ -39,7 +39,6 @@
NdbScanOperation::NdbScanOperation(Ndb* aNdb) :
NdbOperation(aNdb),
- m_resultSet(0),
m_transConnection(NULL)
{
theParallelism = 0;
@@ -60,24 +59,11 @@ NdbScanOperation::~NdbScanOperation()
theNdb->releaseNdbScanRec(m_receivers[i]);
}
delete[] m_array;
- if (m_resultSet)
- delete m_resultSet;
}
-NdbResultSet*
-NdbScanOperation::getResultSet()
-{
- if (!m_resultSet)
- m_resultSet = new NdbResultSet(this);
-
- return m_resultSet;
-}
-
-
-
void
NdbScanOperation::setErrorCode(int aErrorCode){
- NdbConnection* tmp = theNdbCon;
+ NdbTransaction* tmp = theNdbCon;
theNdbCon = m_transConnection;
NdbOperation::setErrorCode(aErrorCode);
theNdbCon = tmp;
@@ -85,7 +71,7 @@ NdbScanOperation::setErrorCode(int aErrorCode){
void
NdbScanOperation::setErrorCodeAbort(int aErrorCode){
- NdbConnection* tmp = theNdbCon;
+ NdbTransaction* tmp = theNdbCon;
theNdbCon = m_transConnection;
NdbOperation::setErrorCodeAbort(aErrorCode);
theNdbCon = tmp;
@@ -100,18 +86,21 @@ NdbScanOperation::setErrorCodeAbort(int aErrorCode){
* Remark: Initiates operation record after allocation.
*****************************************************************************/
int
-NdbScanOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection)
+NdbScanOperation::init(const NdbTableImpl* tab, NdbTransaction* myConnection)
{
m_transConnection = myConnection;
//NdbConnection* aScanConnection = theNdb->startTransaction(myConnection);
- NdbConnection* aScanConnection = theNdb->hupp(myConnection);
+ theNdb->theRemainingStartTransactions++; // will be checked in hupp...
+ NdbTransaction* aScanConnection = theNdb->hupp(myConnection);
if (!aScanConnection){
+ theNdb->theRemainingStartTransactions--;
setErrorCodeAbort(theNdb->getNdbError().code);
return -1;
}
// NOTE! The hupped trans becomes the owner of the operation
if(NdbOperation::init(tab, aScanConnection) != 0){
+ theNdb->theRemainingStartTransactions--;
return -1;
}
@@ -120,16 +109,17 @@ NdbScanOperation::init(const NdbTableImpl* tab, NdbConnection* myConnection)
theStatus = GetValue;
theOperationType = OpenScanRequest;
theNdbCon->theMagicNumber = 0xFE11DF;
-
+ theNoOfTupKeyLeft = tab->m_noOfDistributionKeys;
+ m_read_range_no = 0;
return 0;
}
-NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
- Uint32 batch,
- Uint32 parallel)
+int
+NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
+ Uint32 scan_flags,
+ Uint32 parallel)
{
- m_ordered = 0;
-
+ m_ordered = m_descending = false;
Uint32 fragCount = m_currentTable->m_fragmentCount;
if (parallel > fragCount || parallel == 0) {
@@ -142,7 +132,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
// 3. theScanOp contains a NdbScanOperation
if (theNdbCon->theScanningOp != NULL){
setErrorCode(4605);
- return 0;
+ return -1;
}
theNdbCon->theScanningOp = this;
@@ -167,14 +157,14 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
break;
default:
setErrorCode(4003);
- return 0;
+ return -1;
}
m_keyInfo = lockExcl ? 1 : 0;
- bool range = false;
- if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex ||
- m_accessTable->m_indexType == NdbDictionary::Index::UniqueOrderedIndex){
+ bool rangeScan = false;
+ if (m_accessTable->m_indexType == NdbDictionary::Index::OrderedIndex)
+ {
if (m_currentTable == m_accessTable){
// Old way of scanning indexes, should not be allowed
m_currentTable = theNdb->theDictionary->
@@ -185,22 +175,27 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
// Modify operation state
theStatus = GetValue;
theOperationType = OpenRangeScanRequest;
- range = true;
+ rangeScan = true;
}
+
+ bool tupScan = (scan_flags & SF_TupScan);
+ if (tupScan && rangeScan)
+ tupScan = false;
theParallelism = parallel;
if(fix_receivers(parallel) == -1){
setErrorCodeAbort(4000);
- return 0;
+ return -1;
}
theSCAN_TABREQ = (!theSCAN_TABREQ ? theNdb->getSignal() : theSCAN_TABREQ);
if (theSCAN_TABREQ == NULL) {
setErrorCodeAbort(4000);
- return 0;
+ return -1;
}//if
+ theSCAN_TABREQ->setSignal(GSN_SCAN_TABREQ);
ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
req->apiConnectPtr = theNdbCon->theTCConPtr;
req->tableId = m_accessTable->m_tableId;
@@ -214,17 +209,19 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
ScanTabReq::setLockMode(reqInfo, lockExcl);
ScanTabReq::setHoldLockFlag(reqInfo, lockHoldMode);
ScanTabReq::setReadCommittedFlag(reqInfo, readCommitted);
- ScanTabReq::setRangeScanFlag(reqInfo, range);
+ ScanTabReq::setRangeScanFlag(reqInfo, rangeScan);
+ ScanTabReq::setTupScanFlag(reqInfo, tupScan);
req->requestInfo = reqInfo;
Uint64 transId = theNdbCon->getTransactionId();
req->transId1 = (Uint32) transId;
req->transId2 = (Uint32) (transId >> 32);
- NdbApiSignal* tSignal =
- theFirstKEYINFO;
-
- theFirstKEYINFO = (tSignal ? tSignal : tSignal = theNdb->getSignal());
+ NdbApiSignal* tSignal = theSCAN_TABREQ->next();
+ if(!tSignal)
+ {
+ theSCAN_TABREQ->next(tSignal = theNdb->getSignal());
+ }
theLastKEYINFO = tSignal;
tSignal->setSignal(GSN_KEYINFO);
@@ -232,7 +229,7 @@ NdbResultSet* NdbScanOperation::readTuples(NdbScanOperation::LockMode lm,
theTotalNrOfKeyWordInSignal= 0;
getFirstATTRINFOScan();
- return getResultSet();
+ return 0;
}
int
@@ -356,65 +353,11 @@ NdbScanOperation::getFirstATTRINFOScan()
#define FAKE_PTR 2
#define API_PTR 3
-
-/*
- * After setBound() are done, move the accumulated ATTRINFO signals to
- * a separate list. Then continue with normal scan.
- */
-#if 0
-int
-NdbIndexScanOperation::saveBoundATTRINFO()
-{
- theCurrentATTRINFO->setLength(theAI_LenInCurrAI);
- theBoundATTRINFO = theFirstATTRINFO;
- theTotalBoundAI_Len = theTotalCurrAI_Len;
- theTotalCurrAI_Len = 5;
- theBoundATTRINFO->setData(theTotalBoundAI_Len, 4);
- theBoundATTRINFO->setData(0, 5);
- theBoundATTRINFO->setData(0, 6);
- theBoundATTRINFO->setData(0, 7);
- theBoundATTRINFO->setData(0, 8);
- theStatus = GetValue;
-
- int res = getFirstATTRINFOScan();
-
- /**
- * Define each key with getValue (if ordered)
- * unless the one's with EqBound
- */
- if(!res && m_ordered){
-
- /**
- * If setBound EQ
- */
- Uint32 i = 0;
- while(theTupleKeyDefined[i][0] == SETBOUND_EQ)
- i++;
-
-
- Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
- m_sort_columns = cnt - i;
- for(; i<cnt; i++){
- const NdbColumnImpl* key = m_accessTable->m_index->m_columns[i];
- const NdbColumnImpl* col = m_currentTable->getColumn(key->m_keyInfoPos);
- NdbRecAttr* tmp = NdbScanOperation::getValue_impl(col, (char*)-1);
- UintPtr newVal = UintPtr(tmp);
- theTupleKeyDefined[i][0] = FAKE_PTR;
- theTupleKeyDefined[i][1] = (newVal & 0xFFFFFFFF);
-#if (SIZEOF_CHARP == 8)
- theTupleKeyDefined[i][2] = (newVal >> 32);
-#endif
- }
- }
- return res;
-}
-#endif
-
#define WAITFOR_SCAN_TIMEOUT 120000
int
NdbScanOperation::executeCursor(int nodeId){
- NdbConnection * tCon = theNdbCon;
+ NdbTransaction * tCon = theNdbCon;
TransporterFacade* tp = TransporterFacade::instance();
Guard guard(tp->theMutexPtr);
@@ -448,7 +391,7 @@ NdbScanOperation::executeCursor(int nodeId){
TRACE_DEBUG("The node is stopping when attempting to start a scan");
setErrorCode(4030);
}//if
- tCon->theCommitStatus = NdbConnection::Aborted;
+ tCon->theCommitStatus = NdbTransaction::Aborted;
}//if
return -1;
}
@@ -456,6 +399,29 @@ NdbScanOperation::executeCursor(int nodeId){
int NdbScanOperation::nextResult(bool fetchAllowed, bool forceSend)
{
+ int res;
+ if ((res = nextResultImpl(fetchAllowed, forceSend)) == 0) {
+ // handle blobs
+ NdbBlob* tBlob = theBlobList;
+ while (tBlob != 0) {
+ if (tBlob->atNextResult() == -1)
+ return -1;
+ tBlob = tBlob->theNext;
+ }
+ /*
+ * Flush blob part ops on behalf of user because
+ * - nextResult is analogous to execute(NoCommit)
+ * - user is likely to want blob value before next execute
+ */
+ if (m_transConnection->executePendingBlobOps() == -1)
+ return -1;
+ return 0;
+ }
+ return res;
+}
+
+int NdbScanOperation::nextResultImpl(bool fetchAllowed, bool forceSend)
+{
if(m_ordered)
return ((NdbIndexScanOperation*)this)->next_result_ordered(fetchAllowed,
forceSend);
@@ -466,6 +432,7 @@ int NdbScanOperation::nextResult(bool fetchAllowed, bool forceSend)
int retVal = 2;
Uint32 idx = m_current_api_receiver;
Uint32 last = m_api_receivers_count;
+ m_curr_row = 0;
if(DEBUG_NEXT_RESULT)
ndbout_c("nextResult(%d) idx=%d last=%d", fetchAllowed, idx, last);
@@ -476,7 +443,7 @@ int NdbScanOperation::nextResult(bool fetchAllowed, bool forceSend)
for(; idx < last; idx++){
NdbReceiver* tRec = m_api_receivers[idx];
if(tRec->nextResult()){
- tRec->copyout(theReceiver);
+ m_curr_row = tRec->copyout(theReceiver);
retVal = 0;
break;
}
@@ -552,7 +519,7 @@ int NdbScanOperation::nextResult(bool fetchAllowed, bool forceSend)
for(; idx < last; idx++){
NdbReceiver* tRec = m_api_receivers[idx];
if(tRec->nextResult()){
- tRec->copyout(theReceiver);
+ m_curr_row = tRec->copyout(theReceiver);
retVal = 0;
break;
}
@@ -674,11 +641,17 @@ NdbScanOperation::doSend(int ProcessorId)
return 0;
}
-void NdbScanOperation::closeScan(bool forceSend, bool releaseOp)
+void NdbScanOperation::close(bool forceSend, bool releaseOp)
{
+ DBUG_ENTER("NdbScanOperation::close");
+ DBUG_PRINT("enter", ("this=%x tcon=%x con=%x force=%d release=%d",
+ (UintPtr)this,
+ (UintPtr)m_transConnection, (UintPtr)theNdbCon,
+ forceSend, releaseOp));
+
if(m_transConnection){
if(DEBUG_NEXT_RESULT)
- ndbout_c("closeScan() theError.code = %d "
+ ndbout_c("close() theError.code = %d "
"m_api_receivers_count = %d "
"m_conf_receivers_count = %d "
"m_sent_receivers_count = %d",
@@ -705,6 +678,8 @@ void NdbScanOperation::closeScan(bool forceSend, bool releaseOp)
tCon->theScanningOp = 0;
theNdb->closeTransaction(tCon);
+ theNdb->theRemainingStartTransactions--;
+ DBUG_VOID_RETURN;
}
void
@@ -716,17 +691,19 @@ NdbScanOperation::execCLOSE_SCAN_REP(){
void NdbScanOperation::release()
{
if(theNdbCon != 0 || m_transConnection != 0){
- closeScan();
+ close();
}
for(Uint32 i = 0; i<m_allocated_receivers; i++){
m_receivers[i]->release();
}
+
+ NdbOperation::release();
+
if(theSCAN_TABREQ)
{
theNdb->releaseSignal(theSCAN_TABREQ);
theSCAN_TABREQ = 0;
}
- NdbOperation::release();
}
/***************************************************************************
@@ -793,7 +770,9 @@ int NdbScanOperation::prepareSendScan(Uint32 aTC_ConnectPtr,
req->requestInfo = reqInfo;
for(Uint32 i = 0; i<theParallelism; i++){
- m_receivers[i]->do_get_value(&theReceiver, batch_size, key_size);
+ m_receivers[i]->do_get_value(&theReceiver, batch_size,
+ key_size,
+ m_read_range_no);
}
return 0;
}
@@ -821,10 +800,6 @@ NdbScanOperation::doSendScan(int aProcessorId)
assert(theSCAN_TABREQ != NULL);
tSignal = theSCAN_TABREQ;
- if (tSignal->setSignal(GSN_SCAN_TABREQ) == -1) {
- setErrorCode(4001);
- return -1;
- }
Uint32 tupKeyLen = theTupKeyLen;
Uint32 len = theTotalNrOfKeyWordInSignal;
@@ -836,7 +811,12 @@ NdbScanOperation::doSendScan(int aProcessorId)
// we created the ATTRINFO signals after the SCAN_TABREQ signal.
ScanTabReq * const req = CAST_PTR(ScanTabReq, tSignal->getDataPtrSend());
req->attrLenKeyLen = (tupKeyLen << 16) | theTotalCurrAI_Len;
-
+ Uint32 tmp = req->requestInfo;
+ ScanTabReq::setDistributionKeyFlag(tmp, theDistrKeyIndicator_);
+ req->distributionKey = theDistributionKey;
+ req->requestInfo = tmp;
+ tSignal->setLength(ScanTabReq::StaticLength + theDistrKeyIndicator_);
+
TransporterFacade *tp = TransporterFacade::instance();
LinearSectionPtr ptr[3];
ptr[0].p = m_prepared_receivers;
@@ -852,8 +832,8 @@ NdbScanOperation::doSendScan(int aProcessorId)
tSignal = theLastKEYINFO;
tSignal->setLength(KeyInfo::HeaderLength + theTotalNrOfKeyWordInSignal);
- assert(theFirstKEYINFO != NULL);
- tSignal = theFirstKEYINFO;
+ assert(theSCAN_TABREQ->next() != NULL);
+ tSignal = theSCAN_TABREQ->next();
NdbApiSignal* last;
do {
@@ -889,6 +869,7 @@ NdbScanOperation::doSendScan(int aProcessorId)
}
theStatus = WaitResponse;
+ m_curr_row = 0;
m_sent_receivers_count = theParallelism;
if(m_ordered)
{
@@ -900,9 +881,9 @@ NdbScanOperation::doSendScan(int aProcessorId)
}//NdbOperation::doSendScan()
/*****************************************************************************
- * NdbOperation* takeOverScanOp(NdbConnection* updateTrans);
+ * NdbOperation* takeOverScanOp(NdbTransaction* updateTrans);
*
- * Parameters: The update transactions NdbConnection pointer.
+ * Parameters: The update transactions NdbTransaction pointer.
* Return Value: A reference to the transferred operation object
* or NULL if no success.
* Remark: Take over the scanning transactions NdbOperation
@@ -912,8 +893,8 @@ NdbScanOperation::doSendScan(int aProcessorId)
*
* FUTURE IMPLEMENTATION: (This note was moved from header file.)
* In the future, it will even be possible to transfer
- * to a NdbConnection on another Ndb-object.
- * In this case the receiving NdbConnection-object must call
+ * to a NdbTransaction on another Ndb-object.
+ * In this case the receiving NdbTransaction-object must call
* a method receiveOpFromScan to actually receive the information.
* This means that the updating transactions can be placed
* in separate threads and thus increasing the parallelism during
@@ -922,16 +903,9 @@ NdbScanOperation::doSendScan(int aProcessorId)
int
NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
{
- Uint32 idx = m_current_api_receiver;
- Uint32 last = m_api_receivers_count;
-
- Uint32 row;
- NdbReceiver * tRec;
- NdbRecAttr * tRecAttr;
- if(idx < last && (tRec = m_api_receivers[idx])
- && ((row = tRec->m_current_row) <= tRec->m_defined_rows)
- && (tRecAttr = tRec->m_rows[row-1])){
-
+ NdbRecAttr * tRecAttr = m_curr_row;
+ if(tRecAttr)
+ {
const Uint32 * src = (Uint32*)tRecAttr->aRef();
memcpy(data, src, 4*size);
return 0;
@@ -940,18 +914,12 @@ NdbScanOperation::getKeyFromKEYINFO20(Uint32* data, unsigned size)
}
NdbOperation*
-NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
+NdbScanOperation::takeOverScanOp(OperationType opType, NdbTransaction* pTrans)
+{
- Uint32 idx = m_current_api_receiver;
- Uint32 last = m_api_receivers_count;
-
- Uint32 row;
- NdbReceiver * tRec;
- NdbRecAttr * tRecAttr;
- if(idx < last && (tRec = m_api_receivers[idx])
- && ((row = tRec->m_current_row) <= tRec->m_defined_rows)
- && (tRecAttr = tRec->m_rows[row-1])){
-
+ NdbRecAttr * tRecAttr = m_curr_row;
+ if(tRecAttr)
+ {
NdbOperation * newOp = pTrans->getNdbOperation(m_currentTable);
if (newOp == NULL){
return NULL;
@@ -970,13 +938,15 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
const Uint32 * src = (Uint32*)tRecAttr->aRef();
const Uint32 tScanInfo = src[len] & 0x3FFFF;
- const Uint32 tTakeOverNode = src[len] >> 20;
+ const Uint32 tTakeOverFragment = src[len] >> 20;
{
UintR scanInfo = 0;
TcKeyReq::setTakeOverScanFlag(scanInfo, 1);
- TcKeyReq::setTakeOverScanNode(scanInfo, tTakeOverNode);
+ TcKeyReq::setTakeOverScanFragment(scanInfo, tTakeOverFragment);
TcKeyReq::setTakeOverScanInfo(scanInfo, tScanInfo);
newOp->theScanInfo = scanInfo;
+ newOp->theDistrKeyIndicator_ = 1;
+ newOp->theDistributionKey = tTakeOverFragment;
}
// Copy the first 8 words of key info from KEYINF20 into TCKEYREQ
@@ -988,7 +958,7 @@ NdbScanOperation::takeOverScanOp(OperationType opType, NdbConnection* pTrans){
if(i < len){
NdbApiSignal* tSignal = theNdb->getSignal();
- newOp->theFirstKEYINFO = tSignal;
+ newOp->theTCREQ->next(tSignal);
Uint32 left = len - i;
while(tSignal && left > KeyInfo::DataLength){
@@ -1123,37 +1093,28 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
Uint32 currLen = theTotalNrOfKeyWordInSignal;
Uint32 remaining = KeyInfo::DataLength - currLen;
Uint32 sizeInBytes = tAttrInfo->m_attrSize * tAttrInfo->m_arraySize;
+ bool tDistrKey = tAttrInfo->m_distributionKey;
- // normalize char bound
- CHARSET_INFO* cs = tAttrInfo->m_cs;
- Uint32 xfrmData[2000];
- if (cs != NULL && aValue != NULL) {
- // current limitation: strxfrm does not increase length
- assert(cs->strxfrm_multiply <= 1);
- unsigned n =
- (*cs->coll->strnxfrm)(cs,
- (uchar*)xfrmData, sizeof(xfrmData),
- (const uchar*)aValue, sizeInBytes);
- while (n < sizeInBytes)
- ((uchar*)xfrmData)[n++] = 0x20;
- aValue = (char*)xfrmData;
- }
+ len = aValue != NULL ? sizeInBytes : 0;
if (len != sizeInBytes && (len != 0)) {
setErrorCodeAbort(4209);
return -1;
}
+
// insert attribute header
- len = aValue != NULL ? sizeInBytes : 0;
Uint32 tIndexAttrId = tAttrInfo->m_attrId;
Uint32 sizeInWords = (len + 3) / 4;
AttributeHeader ah(tIndexAttrId, sizeInWords);
const Uint32 ahValue = ah.m_value;
- const bool aligned = (UintPtr(aValue) & 3) == 0;
+ const Uint32 align = (UintPtr(aValue) & 7);
+ const bool aligned = (tDistrKey && type == BoundEQ) ?
+ (align == 0) : (align & 3) == 0;
+
const bool nobytes = (len & 0x3) == 0;
const Uint32 totalLen = 2 + sizeInWords;
Uint32 tupKeyLen = theTupKeyLen;
- if(remaining > totalLen && aligned && nobytes){
+ if(remaining > totalLen && aligned && nobytes){
Uint32 * dst = theKEYINFOptr + currLen;
* dst ++ = type;
* dst ++ = ahValue;
@@ -1161,12 +1122,12 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
theTotalNrOfKeyWordInSignal = currLen + totalLen;
} else {
if(!aligned || !nobytes){
- Uint32 tempData[2002];
+ Uint32 tempData[2000];
tempData[0] = type;
tempData[1] = ahValue;
+ tempData[2 + (len >> 2)] = 0;
memcpy(tempData+2, aValue, len);
- while ((len & 0x3) != 0)
- ((char*)&tempData[2])[len++] = 0;
+
insertBOUNDS(tempData, 2+sizeInWords);
} else {
Uint32 buf[2] = { type, ahValue };
@@ -1185,11 +1146,11 @@ NdbIndexScanOperation::setBound(const NdbColumnImpl* tAttrInfo,
* so it's safe to use [tIndexAttrId]
* (instead of looping as is NdbOperation::equal_impl)
*/
- if(type == BoundEQ && !theTupleKeyDefined[tIndexAttrId][0]){
- theNoOfTupKeyDefined++;
- theTupleKeyDefined[tIndexAttrId][0] = SETBOUND_EQ;
+ if(type == BoundEQ && tDistrKey)
+ {
+ theNoOfTupKeyLeft--;
+ return handle_distribution_key((Uint64*)aValue, sizeInWords);
}
-
return 0;
} else {
setErrorCodeAbort(4228); // XXX wrong code
@@ -1237,14 +1198,31 @@ error:
return -1;
}
-NdbResultSet*
+int
NdbIndexScanOperation::readTuples(LockMode lm,
- Uint32 batch,
- Uint32 parallel,
- bool order_by){
- NdbResultSet * rs = NdbScanOperation::readTuples(lm, batch, 0);
- if(rs && order_by){
- m_ordered = 1;
+ Uint32 scan_flags,
+ Uint32 parallel)
+{
+ const bool order_by = scan_flags & SF_OrderBy;
+ const bool order_desc = scan_flags & SF_Descending;
+ const bool read_range_no = scan_flags & SF_ReadRangeNo;
+
+ int res = NdbScanOperation::readTuples(lm, scan_flags, 0);
+ if(!res && read_range_no)
+ {
+ m_read_range_no = 1;
+ Uint32 word = 0;
+ AttributeHeader::init(&word, AttributeHeader::RANGE_NO, 0);
+ if(insertATTRINFO(word) == -1)
+ res = -1;
+ }
+ if(!res && order_by){
+ m_ordered = true;
+ if (order_desc) {
+ m_descending = true;
+ ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend());
+ ScanTabReq::setDescendingFlag(req->requestInfo, true);
+ }
Uint32 cnt = m_accessTable->getNoOfColumns() - 1;
m_sort_columns = cnt; // -1 for NDB$NODE
m_current_api_receiver = m_sent_receivers_count;
@@ -1263,7 +1241,10 @@ NdbIndexScanOperation::readTuples(LockMode lm,
#endif
}
}
- return rs;
+ m_this_bound_start = 0;
+ m_first_bound_word = theKEYINFOptr;
+
+ return res;
}
void
@@ -1305,22 +1286,23 @@ NdbIndexScanOperation::compare(Uint32 skip, Uint32 cols,
r1 = (skip ? r1->next() : r1);
r2 = (skip ? r2->next() : r2);
-
+ const int jdir = 1 - 2 * (int)m_descending;
+ assert(jdir == 1 || jdir == -1);
while(cols > 0){
Uint32 * d1 = (Uint32*)r1->aRef();
Uint32 * d2 = (Uint32*)r2->aRef();
unsigned r1_null = r1->isNULL();
if((r1_null ^ (unsigned)r2->isNULL())){
- return (r1_null ? -1 : 1);
+ return (r1_null ? -1 : 1) * jdir;
}
const NdbColumnImpl & col = NdbColumnImpl::getImpl(* r1->m_column);
- Uint32 size = (r1->theAttrSize * r1->theArraySize + 3) / 4;
+ Uint32 len = r1->theAttrSize * r1->theArraySize;
if(!r1_null){
- const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_extType);
- int r = (*sqlType.m_cmp)(col.m_cs, d1, d2, size, size);
+ const NdbSqlUtil::Type& sqlType = NdbSqlUtil::getType(col.m_type);
+ int r = (*sqlType.m_cmp)(col.m_cs, d1, len, d2, len, true);
if(r){
assert(r != NdbSqlUtil::CmpUnknown);
- return r;
+ return r * jdir;
}
}
cols--;
@@ -1334,6 +1316,7 @@ int
NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
bool forceSend){
+ m_curr_row = 0;
Uint32 u_idx = 0, u_last = 0;
Uint32 s_idx = m_current_api_receiver; // first sorted
Uint32 s_last = theParallelism; // last sorted
@@ -1407,7 +1390,7 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
s_idx, s_last);
- Uint32 cols = m_sort_columns;
+ Uint32 cols = m_sort_columns + m_read_range_no;
Uint32 skip = m_keyInfo;
while(u_idx < u_last){
u_last--;
@@ -1444,7 +1427,7 @@ NdbIndexScanOperation::next_result_ordered(bool fetchAllowed,
tRec = m_api_receivers[s_idx];
if(s_idx < s_last && tRec->nextResult()){
- tRec->copyout(theReceiver);
+ m_curr_row = tRec->copyout(theReceiver);
if(DEBUG_NEXT_RESULT) ndbout_c("return 0");
return 0;
}
@@ -1663,10 +1646,14 @@ NdbIndexScanOperation::reset_bounds(bool forceSend){
theError.code = 0;
reset_receivers(theParallelism, m_ordered);
- theLastKEYINFO = theFirstKEYINFO;
- theKEYINFOptr = ((KeyInfo*)theFirstKEYINFO->getDataPtrSend())->keyData;
+ theLastKEYINFO = theSCAN_TABREQ->next();
+ theKEYINFOptr = ((KeyInfo*)theLastKEYINFO->getDataPtrSend())->keyData;
theTupKeyLen = 0;
theTotalNrOfKeyWordInSignal = 0;
+ theNoOfTupKeyLeft = m_accessTable->m_noOfDistributionKeys;
+ theDistrKeyIndicator_ = 0;
+ m_this_bound_start = 0;
+ m_first_bound_word = theKEYINFOptr;
m_transConnection
->remove_list((NdbOperation*&)m_transConnection->m_firstExecutedScanOp,
this);
@@ -1675,3 +1662,33 @@ NdbIndexScanOperation::reset_bounds(bool forceSend){
}
return res;
}
+
+int
+NdbIndexScanOperation::end_of_bound(Uint32 no)
+{
+ if(no < (1 << 13)) // Only 12-bits no of ranges
+ {
+ Uint32 bound_head = * m_first_bound_word;
+ bound_head |= (theTupKeyLen - m_this_bound_start) << 16 | (no << 4);
+ * m_first_bound_word = bound_head;
+
+ m_first_bound_word = theKEYINFOptr + theTotalNrOfKeyWordInSignal;;
+ m_this_bound_start = theTupKeyLen;
+ return 0;
+ }
+ return -1;
+}
+
+int
+NdbIndexScanOperation::get_range_no()
+{
+ NdbRecAttr* tRecAttr = m_curr_row;
+ if(m_read_range_no && tRecAttr)
+ {
+ if(m_keyInfo)
+ tRecAttr = tRecAttr->next();
+ Uint32 ret = *(Uint32*)tRecAttr->aRef();
+ return ret;
+ }
+ return -1;
+}
diff --git a/ndb/src/ndbapi/NdbConnection.cpp b/ndb/src/ndbapi/NdbTransaction.cpp
index c9e26f8ccaf..294012d780c 100644
--- a/ndb/src/ndbapi/NdbConnection.cpp
+++ b/ndb/src/ndbapi/NdbTransaction.cpp
@@ -16,7 +16,7 @@
#include <ndb_global.h>
#include <NdbOut.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <NdbOperation.hpp>
#include <NdbScanOperation.hpp>
#include <NdbIndexScanOperation.hpp>
@@ -25,7 +25,6 @@
#include "TransporterFacade.hpp"
#include "API.hpp"
#include "NdbBlob.hpp"
-#include <ndb_limits.h>
#include <signaldata/TcKeyConf.hpp>
#include <signaldata/TcIndx.hpp>
@@ -34,13 +33,13 @@
#include <signaldata/TcHbRep.hpp>
/*****************************************************************************
-NdbConnection( Ndb* aNdb );
+NdbTransaction( Ndb* aNdb );
Return Value: None
Parameters: aNdb: Pointers to the Ndb object
Remark: Creates a connection object.
*****************************************************************************/
-NdbConnection::NdbConnection( Ndb* aNdb ) :
+NdbTransaction::NdbTransaction( Ndb* aNdb ) :
theSendStatus(NotInit),
theCallbackFunction(NULL),
theCallbackObject(NULL),
@@ -89,19 +88,19 @@ NdbConnection::NdbConnection( Ndb* aNdb ) :
CHECK_SZ(m_db_nodes, NdbNodeBitmask::Size);
CHECK_SZ(m_failed_db_nodes, NdbNodeBitmask::Size);
-}//NdbConnection::NdbConnection()
+}//NdbTransaction::NdbTransaction()
/*****************************************************************************
-~NdbConnection();
+~NdbTransaction();
Remark: Deletes the connection object.
*****************************************************************************/
-NdbConnection::~NdbConnection()
+NdbTransaction::~NdbTransaction()
{
- DBUG_ENTER("NdbConnection::~NdbConnection");
+ DBUG_ENTER("NdbTransaction::~NdbTransaction");
theNdb->theImpl->theNdbObjectIdMap.unmap(theId, this);
DBUG_VOID_RETURN;
-}//NdbConnection::~NdbConnection()
+}//NdbTransaction::~NdbTransaction()
/*****************************************************************************
void init();
@@ -109,7 +108,7 @@ void init();
Remark: Initialise connection object for new transaction.
*****************************************************************************/
void
-NdbConnection::init()
+NdbTransaction::init()
{
theListState = NotInList;
theInUseState = true;
@@ -149,7 +148,7 @@ NdbConnection::init()
//
theBlobFlag = false;
thePendingBlobOps = 0;
-}//NdbConnection::init()
+}//NdbTransaction::init()
/*****************************************************************************
setOperationErrorCode(int error);
@@ -158,9 +157,9 @@ Remark: Sets an error code on the connection object from an
operation object.
*****************************************************************************/
void
-NdbConnection::setOperationErrorCode(int error)
+NdbTransaction::setOperationErrorCode(int error)
{
- DBUG_ENTER("NdbConnection::setOperationErrorCode");
+ DBUG_ENTER("NdbTransaction::setOperationErrorCode");
setErrorCode(error);
DBUG_VOID_RETURN;
}
@@ -172,9 +171,9 @@ Remark: Sets an error code on the connection object from an
operation object.
*****************************************************************************/
void
-NdbConnection::setOperationErrorCodeAbort(int error, int abortOption)
+NdbTransaction::setOperationErrorCodeAbort(int error, int abortOption)
{
- DBUG_ENTER("NdbConnection::setOperationErrorCodeAbort");
+ DBUG_ENTER("NdbTransaction::setOperationErrorCodeAbort");
if (abortOption == -1)
abortOption = m_abortOption;
if (theTransactionIsStarted == false) {
@@ -194,20 +193,20 @@ setErrorCode(int anErrorCode);
Remark: Sets an error indication on the connection object.
*****************************************************************************/
void
-NdbConnection::setErrorCode(int error)
+NdbTransaction::setErrorCode(int error)
{
- DBUG_ENTER("NdbConnection::setErrorCode");
+ DBUG_ENTER("NdbTransaction::setErrorCode");
DBUG_PRINT("enter", ("error: %d, theError.code: %d", error, theError.code));
if (theError.code == 0)
theError.code = error;
DBUG_VOID_RETURN;
-}//NdbConnection::setErrorCode()
+}//NdbTransaction::setErrorCode()
int
-NdbConnection::restart(){
- DBUG_ENTER("NdbConnection::restart");
+NdbTransaction::restart(){
+ DBUG_ENTER("NdbTransaction::restart");
if(theCompletionStatus == CompletedSuccess){
releaseCompletedOperations();
Uint64 tTransid = theNdb->theFirstTransId;
@@ -232,23 +231,8 @@ void handleExecuteCompletion(void);
Remark: Handle time-out on a transaction object.
*****************************************************************************/
void
-NdbConnection::handleExecuteCompletion()
+NdbTransaction::handleExecuteCompletion()
{
-
- if (theCompletionStatus == CompletedFailure) {
- NdbOperation* tOpTemp = theFirstExecOpInList;
- while (tOpTemp != NULL) {
-/*****************************************************************************
- * Ensure that all executing operations report failed for each
- * read attribute when failure occurs.
- * We do not want any operations to report both failure and
- * success on different read attributes.
- ****************************************************************************/
- tOpTemp->handleFailedAI_ElemLen();
- tOpTemp = tOpTemp->next();
- }//while
- theReturnStatus = ReturnFailure;
- }//if
/***************************************************************************
* Move the NdbOperation objects from the list of executing
* operations to list of completed
@@ -265,7 +249,7 @@ NdbConnection::handleExecuteCompletion()
}//if
theSendStatus = InitState;
return;
-}//NdbConnection::handleExecuteCompletion()
+}//NdbTransaction::handleExecuteCompletion()
/*****************************************************************************
int execute(ExecType aTypeOfExec, CommitType aTypeOfCommit, int forceSend);
@@ -276,12 +260,12 @@ Parameters : aTypeOfExec: Type of execute.
Remark: Initialise connection object for new transaction.
*****************************************************************************/
int
-NdbConnection::execute(ExecType aTypeOfExec,
+NdbTransaction::execute(ExecType aTypeOfExec,
AbortOption abortOption,
int forceSend)
{
NdbError savedError= theError;
- DBUG_ENTER("NdbConnection::execute");
+ DBUG_ENTER("NdbTransaction::execute");
DBUG_PRINT("enter", ("aTypeOfExec: %d, abortOption: %d",
aTypeOfExec, abortOption));
@@ -432,11 +416,11 @@ NdbConnection::execute(ExecType aTypeOfExec,
}
int
-NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
+NdbTransaction::executeNoBlobs(ExecType aTypeOfExec,
AbortOption abortOption,
int forceSend)
{
- DBUG_ENTER("NdbConnection::executeNoBlobs");
+ DBUG_ENTER("NdbTransaction::executeNoBlobs");
DBUG_PRINT("enter", ("aTypeOfExec: %d, abortOption: %d",
aTypeOfExec, abortOption));
@@ -445,7 +429,7 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
// since last execute or since beginning. If this works ok we will continue
// by calling the poll with wait method. This method will return when
// the NDB kernel has completed its task or when 10 seconds have passed.
-// The NdbConnectionCallBack-method will receive the return code of the
+// The NdbTransactionCallBack-method will receive the return code of the
// transaction. The normal methods of reading error codes still apply.
//------------------------------------------------------------------------
Ndb* tNdb = theNdb;
@@ -493,7 +477,7 @@ NdbConnection::executeNoBlobs(ExecType aTypeOfExec,
}
thePendingBlobOps = 0;
DBUG_RETURN(0);
-}//NdbConnection::execute()
+}//NdbTransaction::execute()
/*****************************************************************************
void executeAsynchPrepare(ExecType aTypeOfExec,
@@ -511,12 +495,12 @@ Parameters : aTypeOfExec: Type of execute.
Remark: Prepare a part of a transaction in an asynchronous manner.
*****************************************************************************/
void
-NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
+NdbTransaction::executeAsynchPrepare( ExecType aTypeOfExec,
NdbAsynchCallback aCallback,
void* anyObject,
AbortOption abortOption)
{
- DBUG_ENTER("NdbConnection::executeAsynchPrepare");
+ DBUG_ENTER("NdbTransaction::executeAsynchPrepare");
DBUG_PRINT("enter", ("aTypeOfExec: %d, aCallback: %x, anyObject: %x",
aTypeOfExec, aCallback, anyObject));
@@ -679,14 +663,14 @@ NdbConnection::executeAsynchPrepare( ExecType aTypeOfExec,
NdbNodeBitmask::clear(m_db_nodes);
NdbNodeBitmask::clear(m_failed_db_nodes);
DBUG_VOID_RETURN;
-}//NdbConnection::executeAsynchPrepare()
+}//NdbTransaction::executeAsynchPrepare()
-void NdbConnection::close()
+void NdbTransaction::close()
{
theNdb->closeTransaction(this);
}
-int NdbConnection::refresh(){
+int NdbTransaction::refresh(){
return sendTC_HBREP();
}
@@ -698,7 +682,7 @@ Parameters : None.
Remark: Order NDB to refresh the timeout counter of the transaction.
******************************************************************************/
int
-NdbConnection::sendTC_HBREP() // Send a TC_HBREP signal;
+NdbTransaction::sendTC_HBREP() // Send a TC_HBREP signal;
{
NdbApiSignal* tSignal;
Ndb* tNdb = theNdb;
@@ -733,7 +717,7 @@ NdbConnection::sendTC_HBREP() // Send a TC_HBREP signal;
}
return 0;
-}//NdbConnection::sendTC_HBREP()
+}//NdbTransaction::sendTC_HBREP()
/*****************************************************************************
int doSend();
@@ -745,9 +729,9 @@ Remark: Send all operations belonging to this connection.
object from the prepared transactions array on the Ndb-object.
*****************************************************************************/
int
-NdbConnection::doSend()
+NdbTransaction::doSend()
{
- DBUG_ENTER("NdbConnection::doSend");
+ DBUG_ENTER("NdbTransaction::doSend");
/*
This method assumes that at least one operation have been defined. This
@@ -806,7 +790,7 @@ NdbConnection::doSend()
theTransactionIsStarted = false;
theCommitStatus = Aborted;
DBUG_RETURN(-1);
-}//NdbConnection::doSend()
+}//NdbTransaction::doSend()
/**************************************************************************
int sendROLLBACK();
@@ -816,7 +800,7 @@ Parameters : None.
Remark: Order NDB to rollback the transaction.
**************************************************************************/
int
-NdbConnection::sendROLLBACK() // Send a TCROLLBACKREQ signal;
+NdbTransaction::sendROLLBACK() // Send a TCROLLBACKREQ signal;
{
Ndb* tNdb = theNdb;
if ((theTransactionIsStarted == true) &&
@@ -860,7 +844,7 @@ NdbConnection::sendROLLBACK() // Send a TCROLLBACKREQ signal;
return 0;
;
}//if
-}//NdbConnection::sendROLLBACK()
+}//NdbTransaction::sendROLLBACK()
/***************************************************************************
int sendCOMMIT();
@@ -871,7 +855,7 @@ Parameters : None.
Remark: Order NDB to commit the transaction.
***************************************************************************/
int
-NdbConnection::sendCOMMIT() // Send a TC_COMMITREQ signal;
+NdbTransaction::sendCOMMIT() // Send a TC_COMMITREQ signal;
{
NdbApiSignal tSignal(theNdb->theMyRef);
Uint32 tTransId1, tTransId2;
@@ -893,7 +877,7 @@ NdbConnection::sendCOMMIT() // Send a TC_COMMITREQ signal;
} else {
return -1;
}//if
-}//NdbConnection::sendCOMMIT()
+}//NdbTransaction::sendCOMMIT()
/******************************************************************************
void release();
@@ -901,7 +885,7 @@ void release();
Remark: Release all operations.
******************************************************************************/
void
-NdbConnection::release(){
+NdbTransaction::release(){
releaseOperations();
if ( (theTransactionIsStarted == true) &&
((theCommitStatus != Committed) &&
@@ -920,10 +904,10 @@ NdbConnection::release(){
abort();
}
#endif
-}//NdbConnection::release()
+}//NdbTransaction::release()
void
-NdbConnection::releaseOps(NdbOperation* tOp){
+NdbTransaction::releaseOps(NdbOperation* tOp){
while (tOp != NULL) {
NdbOperation* tmp = tOp;
tOp->release();
@@ -938,7 +922,7 @@ void releaseOperations();
Remark: Release all operations.
******************************************************************************/
void
-NdbConnection::releaseOperations()
+NdbTransaction::releaseOperations()
{
// Release any open scans
releaseScanOperations(m_theFirstScanOperation);
@@ -958,15 +942,15 @@ NdbConnection::releaseOperations()
m_theFirstScanOperation = NULL;
m_theLastScanOperation = NULL;
m_firstExecutedScanOp = NULL;
-}//NdbConnection::releaseOperations()
+}//NdbTransaction::releaseOperations()
void
-NdbConnection::releaseCompletedOperations()
+NdbTransaction::releaseCompletedOperations()
{
releaseOps(theCompletedFirstOp);
theCompletedFirstOp = NULL;
theCompletedLastOp = NULL;
-}//NdbConnection::releaseOperations()
+}//NdbTransaction::releaseOperations()
/******************************************************************************
void releaseScanOperations();
@@ -975,7 +959,7 @@ Remark: Release all cursor operations.
(NdbScanOperation and NdbIndexOperation)
******************************************************************************/
void
-NdbConnection::releaseScanOperations(NdbIndexScanOperation* cursorOp)
+NdbTransaction::releaseScanOperations(NdbIndexScanOperation* cursorOp)
{
while(cursorOp != 0){
NdbIndexScanOperation* next = (NdbIndexScanOperation*)cursorOp->next();
@@ -983,7 +967,7 @@ NdbConnection::releaseScanOperations(NdbIndexScanOperation* cursorOp)
theNdb->releaseScanOperation(cursorOp);
cursorOp = next;
}
-}//NdbConnection::releaseScanOperations()
+}//NdbTransaction::releaseScanOperations()
/*****************************************************************************
void releaseExecutedScanOperation();
@@ -991,9 +975,9 @@ void releaseExecutedScanOperation();
Remark: Release scan op when hupp'ed trans closed (save memory)
******************************************************************************/
void
-NdbConnection::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp)
+NdbTransaction::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp)
{
- DBUG_ENTER("NdbConnection::releaseExecutedScanOperation");
+ DBUG_ENTER("NdbTransaction::releaseExecutedScanOperation");
DBUG_PRINT("enter", ("this=0x%x op=0x%x", (UintPtr)this, (UintPtr)cursorOp))
// here is one reason to make op lists doubly linked
@@ -1014,7 +998,7 @@ NdbConnection::releaseExecutedScanOperation(NdbIndexScanOperation* cursorOp)
}
}
DBUG_VOID_RETURN;
-}//NdbConnection::releaseExecutedScanOperation()
+}//NdbTransaction::releaseExecutedScanOperation()
/*****************************************************************************
NdbOperation* getNdbOperation(const char* aTableName);
@@ -1024,13 +1008,13 @@ Return Value Return a pointer to a NdbOperation object if getNdbOperation
Return NULL : In all other case.
Parameters: aTableName : Name of the database table.
Remark: Get an operation from NdbOperation idlelist and get the
- NdbConnection object
+ NdbTransaction object
who was fetch by startTransaction pointing to this operation
getOperation will set the theTableId in the NdbOperation object.
synchronous
******************************************************************************/
NdbOperation*
-NdbConnection::getNdbOperation(const char* aTableName)
+NdbTransaction::getNdbOperation(const char* aTableName)
{
if (theCommitStatus == Started){
NdbTableImpl* table = theNdb->theDictionary->getTable(aTableName);
@@ -1045,7 +1029,7 @@ NdbConnection::getNdbOperation(const char* aTableName)
setOperationErrorCodeAbort(4114);
return NULL;
-}//NdbConnection::getNdbOperation()
+}//NdbTransaction::getNdbOperation()
/*****************************************************************************
NdbOperation* getNdbOperation(int aTableId);
@@ -1055,13 +1039,13 @@ Return Value Return a pointer to a NdbOperation object if getNdbOperation
Return NULL: In all other case.
Parameters: tableId : Id of the database table beeing deleted.
Remark: Get an operation from NdbOperation object idlelist and
- get the NdbConnection object who was fetch by
+ get the NdbTransaction object who was fetch by
startTransaction pointing to this operation
getOperation will set the theTableId in the NdbOperation
object, synchronous.
*****************************************************************************/
NdbOperation*
-NdbConnection::getNdbOperation(const NdbTableImpl * tab, NdbOperation* aNextOp)
+NdbTransaction::getNdbOperation(const NdbTableImpl * tab, NdbOperation* aNextOp)
{
NdbOperation* tOp;
@@ -1105,15 +1089,15 @@ NdbConnection::getNdbOperation(const NdbTableImpl * tab, NdbOperation* aNextOp)
getNdbOp_error1:
setOperationErrorCodeAbort(4000);
return NULL;
-}//NdbConnection::getNdbOperation()
+}//NdbTransaction::getNdbOperation()
-NdbOperation* NdbConnection::getNdbOperation(const NdbDictionary::Table * table)
+NdbOperation* NdbTransaction::getNdbOperation(const NdbDictionary::Table * table)
{
if (table)
return getNdbOperation(& NdbTableImpl::getImpl(*table));
else
return NULL;
-}//NdbConnection::getNdbOperation()
+}//NdbTransaction::getNdbOperation()
// NdbScanOperation
/*****************************************************************************
@@ -1122,12 +1106,12 @@ NdbScanOperation* getNdbScanOperation(const char* aTableName);
Return Value Return a pointer to a NdbScanOperation object if getNdbScanOperation was succesful.
Return NULL : In all other case.
Parameters: aTableName : Name of the database table.
-Remark: Get an operation from NdbScanOperation idlelist and get the NdbConnection object
+Remark: Get an operation from NdbScanOperation idlelist and get the NdbTransaction object
who was fetch by startTransaction pointing to this operation
getOperation will set the theTableId in the NdbOperation object.synchronous
******************************************************************************/
NdbScanOperation*
-NdbConnection::getNdbScanOperation(const char* aTableName)
+NdbTransaction::getNdbScanOperation(const char* aTableName)
{
if (theCommitStatus == Started){
NdbTableImpl* tab = theNdb->theDictionary->getTable(aTableName);
@@ -1141,78 +1125,104 @@ NdbConnection::getNdbScanOperation(const char* aTableName)
setOperationErrorCodeAbort(4114);
return NULL;
-}//NdbConnection::getNdbScanOperation()
+}//NdbTransaction::getNdbScanOperation()
/*****************************************************************************
-NdbScanOperation* getNdbScanOperation(const char* anIndexName, const char* aTableName);
+NdbScanOperation* getNdbIndexScanOperation(const char* anIndexName, const char* aTableName);
-Return Value Return a pointer to a NdbScanOperation object if getNdbScanOperation was succesful.
+Return Value Return a pointer to a NdbIndexScanOperation object if getNdbIndexScanOperation was succesful.
Return NULL : In all other case.
Parameters: anIndexName : Name of the index to use.
aTableName : Name of the database table.
-Remark: Get an operation from NdbScanOperation idlelist and get the NdbConnection object
+Remark: Get an operation from NdbIndexScanOperation idlelist and get the NdbTransaction object
who was fetch by startTransaction pointing to this operation
- getOperation will set the theTableId in the NdbOperation object.synchronous
+ getOperation will set the theTableId in the NdbIndexScanOperation object.synchronous
******************************************************************************/
NdbIndexScanOperation*
-NdbConnection::getNdbIndexScanOperation(const char* anIndexName,
+NdbTransaction::getNdbIndexScanOperation(const char* anIndexName,
const char* aTableName)
{
NdbIndexImpl* index =
theNdb->theDictionary->getIndex(anIndexName, aTableName);
+ if (index == 0)
+ {
+ setOperationErrorCodeAbort(theNdb->theDictionary->getNdbError().code);
+ return 0;
+ }
NdbTableImpl* table = theNdb->theDictionary->getTable(aTableName);
+ if (table == 0)
+ {
+ setOperationErrorCodeAbort(theNdb->theDictionary->getNdbError().code);
+ return 0;
+ }
return getNdbIndexScanOperation(index, table);
}
NdbIndexScanOperation*
-NdbConnection::getNdbIndexScanOperation(const NdbIndexImpl* index,
+NdbTransaction::getNdbIndexScanOperation(const NdbIndexImpl* index,
const NdbTableImpl* table)
{
if (theCommitStatus == Started){
const NdbTableImpl * indexTable = index->getIndexTable();
if (indexTable != 0){
- NdbIndexScanOperation* tOp =
- getNdbScanOperation((NdbTableImpl *) indexTable);
+ NdbIndexScanOperation* tOp = getNdbScanOperation(indexTable);
if(tOp)
{
tOp->m_currentTable = table;
- tOp->m_cursor_type = NdbScanOperation::IndexCursor;
}
return tOp;
} else {
- setOperationErrorCodeAbort(theNdb->theError.code);
+ setOperationErrorCodeAbort(4271);
return NULL;
}//if
}
setOperationErrorCodeAbort(4114);
return NULL;
-}//NdbConnection::getNdbIndexScanOperation()
+}//NdbTransaction::getNdbIndexScanOperation()
NdbIndexScanOperation*
-NdbConnection::getNdbIndexScanOperation(const NdbDictionary::Index * index,
+NdbTransaction::getNdbIndexScanOperation(const NdbDictionary::Index * index)
+{
+ if (index)
+ {
+ const NdbDictionary::Table *table=
+ theNdb->theDictionary->getTable(index->getTable());
+
+ if (table)
+ return getNdbIndexScanOperation(index, table);
+
+ setOperationErrorCodeAbort(theNdb->theDictionary->getNdbError().code);
+ return NULL;
+ }
+ setOperationErrorCodeAbort(4271);
+ return NULL;
+}
+
+NdbIndexScanOperation*
+NdbTransaction::getNdbIndexScanOperation(const NdbDictionary::Index * index,
const NdbDictionary::Table * table)
{
if (index && table)
return getNdbIndexScanOperation(& NdbIndexImpl::getImpl(*index),
& NdbTableImpl::getImpl(*table));
- else
- return NULL;
-}//NdbConnection::getNdbIndexScanOperation()
+ setOperationErrorCodeAbort(4271);
+ return NULL;
+}//NdbTransaction::getNdbIndexScanOperation()
/*****************************************************************************
NdbScanOperation* getNdbScanOperation(int aTableId);
-Return Value Return a pointer to a NdbOperation object if getNdbOperation was succesful.
+Return Value Return a pointer to a NdbScanOperation object if getNdbScanOperation was succesful.
Return NULL: In all other case.
Parameters: tableId : Id of the database table beeing deleted.
-Remark: Get an operation from NdbScanOperation object idlelist and get the NdbConnection
+Remark: Get an operation from NdbScanOperation object idlelist and get the NdbTransaction
object who was fetch by startTransaction pointing to this operation
- getOperation will set the theTableId in the NdbOperation object, synchronous.
+ getOperation will set the theTableId in the NdbScanOperation object, synchronous.
*****************************************************************************/
NdbIndexScanOperation*
-NdbConnection::getNdbScanOperation(const NdbTableImpl * tab)
+NdbTransaction::getNdbScanOperation(const NdbTableImpl * tab)
{
NdbIndexScanOperation* tOp;
@@ -1231,10 +1241,10 @@ NdbConnection::getNdbScanOperation(const NdbTableImpl * tab)
getNdbOp_error1:
setOperationErrorCodeAbort(4000);
return NULL;
-}//NdbConnection::getNdbScanOperation()
+}//NdbTransaction::getNdbScanOperation()
void
-NdbConnection::remove_list(NdbOperation*& list, NdbOperation* op){
+NdbTransaction::remove_list(NdbOperation*& list, NdbOperation* op){
NdbOperation* tmp= list;
if(tmp == op)
list = op->next();
@@ -1247,7 +1257,7 @@ NdbConnection::remove_list(NdbOperation*& list, NdbOperation* op){
}
void
-NdbConnection::define_scan_op(NdbIndexScanOperation * tOp){
+NdbTransaction::define_scan_op(NdbIndexScanOperation * tOp){
// Link scan operation into list of cursor operations
if (m_theLastScanOperation == NULL)
m_theFirstScanOperation = m_theLastScanOperation = tOp;
@@ -1259,13 +1269,13 @@ NdbConnection::define_scan_op(NdbIndexScanOperation * tOp){
}
NdbScanOperation*
-NdbConnection::getNdbScanOperation(const NdbDictionary::Table * table)
+NdbTransaction::getNdbScanOperation(const NdbDictionary::Table * table)
{
if (table)
return getNdbScanOperation(& NdbTableImpl::getImpl(*table));
else
return NULL;
-}//NdbConnection::getNdbScanOperation()
+}//NdbTransaction::getNdbScanOperation()
// IndexOperation
@@ -1273,21 +1283,39 @@ NdbConnection::getNdbScanOperation(const NdbDictionary::Table * table)
NdbIndexOperation* getNdbIndexOperation(const char* anIndexName,
const char* aTableName);
-Return Value Return a pointer to a NdbOperation object if getNdbScanOperation was succesful.
+Return Value Return a pointer to a NdbOperation object if getNdbIndexOperation was succesful.
Return NULL : In all other case.
Parameters: aTableName : Name of the database table.
-Remark: Get an operation from NdbScanOperation idlelist and get the NdbConnection object
- who was fetch by startTransaction pointing to this operation
- getOperation will set the theTableId in the NdbScanOperation object.synchronous
+Remark: Get an operation from NdbIndexOperation idlelist and get the NdbTransaction object
+ who was fetch by startTransaction pointing to this operation
+ getOperation will set the theTableId in the NdbIndexOperation object.synchronous
******************************************************************************/
NdbIndexOperation*
-NdbConnection::getNdbIndexOperation(const char* anIndexName,
+NdbTransaction::getNdbIndexOperation(const char* anIndexName,
const char* aTableName)
{
if (theCommitStatus == Started) {
NdbTableImpl * table = theNdb->theDictionary->getTable(aTableName);
- NdbIndexImpl * index = theNdb->theDictionary->getIndex(anIndexName,
- aTableName);
+ NdbIndexImpl * index;
+
+ if (table == 0)
+ {
+ setOperationErrorCodeAbort(theNdb->theDictionary->getNdbError().code);
+ return NULL;
+ }
+
+ if (table->m_frm.get_data())
+ {
+ // This unique index is defined from SQL level
+ static const char* uniqueSuffix= "$unique";
+ BaseString uniqueIndexName(anIndexName);
+ uniqueIndexName.append(uniqueSuffix);
+ index = theNdb->theDictionary->getIndex(uniqueIndexName.c_str(),
+ aTableName);
+ }
+ else
+ index = theNdb->theDictionary->getIndex(anIndexName,
+ aTableName);
if(table != 0 && index != 0){
return getNdbIndexOperation(index, table);
}
@@ -1297,14 +1325,13 @@ NdbConnection::getNdbIndexOperation(const char* anIndexName,
return NULL;
}
- // table == 0
- setOperationErrorCodeAbort(theNdb->theError.code);
+ setOperationErrorCodeAbort(4243);
return NULL;
}
setOperationErrorCodeAbort(4114);
return 0;
-}//NdbConnection::getNdbIndexOperation()
+}//NdbTransaction::getNdbIndexOperation()
/*****************************************************************************
NdbIndexOperation* getNdbIndexOperation(int anIndexId, int aTableId);
@@ -1312,12 +1339,12 @@ NdbIndexOperation* getNdbIndexOperation(int anIndexId, int aTableId);
Return Value Return a pointer to a NdbIndexOperation object if getNdbIndexOperation was succesful.
Return NULL: In all other case.
Parameters: tableId : Id of the database table beeing deleted.
-Remark: Get an operation from NdbIndexOperation object idlelist and get the NdbConnection
+Remark: Get an operation from NdbIndexOperation object idlelist and get the NdbTransaction
object who was fetch by startTransaction pointing to this operation
getOperation will set the theTableId in the NdbIndexOperation object, synchronous.
*****************************************************************************/
NdbIndexOperation*
-NdbConnection::getNdbIndexOperation(const NdbIndexImpl * anIndex,
+NdbTransaction::getNdbIndexOperation(const NdbIndexImpl * anIndex,
const NdbTableImpl * aTable,
NdbOperation* aNextOp)
{
@@ -1358,18 +1385,37 @@ NdbConnection::getNdbIndexOperation(const NdbIndexImpl * anIndex,
getNdbOp_error1:
setOperationErrorCodeAbort(4000);
return NULL;
-}//NdbConnection::getNdbIndexOperation()
+}//NdbTransaction::getNdbIndexOperation()
NdbIndexOperation*
-NdbConnection::getNdbIndexOperation(const NdbDictionary::Index * index,
+NdbTransaction::getNdbIndexOperation(const NdbDictionary::Index * index)
+{
+ if (index)
+ {
+ const NdbDictionary::Table *table=
+ theNdb->theDictionary->getTable(index->getTable());
+
+ if (table)
+ return getNdbIndexOperation(index, table);
+
+ setOperationErrorCodeAbort(theNdb->theDictionary->getNdbError().code);
+ return NULL;
+ }
+ setOperationErrorCodeAbort(4271);
+ return NULL;
+}
+
+NdbIndexOperation*
+NdbTransaction::getNdbIndexOperation(const NdbDictionary::Index * index,
const NdbDictionary::Table * table)
{
if (index && table)
return getNdbIndexOperation(& NdbIndexImpl::getImpl(*index),
& NdbTableImpl::getImpl(*table));
- else
- return NULL;
-}//NdbConnection::getNdbIndexOperation()
+
+ setOperationErrorCodeAbort(4271);
+ return NULL;
+}//NdbTransaction::getNdbIndexOperation()
/*******************************************************************************
@@ -1381,7 +1427,7 @@ Parameters: aSignal: The signal object pointer.
Remark: Sets theRestartGCI in the NDB object.
*******************************************************************************/
int
-NdbConnection::receiveDIHNDBTAMPER(NdbApiSignal* aSignal)
+NdbTransaction::receiveDIHNDBTAMPER(NdbApiSignal* aSignal)
{
if (theStatus != Connecting) {
return -1;
@@ -1390,7 +1436,7 @@ NdbConnection::receiveDIHNDBTAMPER(NdbApiSignal* aSignal)
theStatus = Connected;
}//if
return 0;
-}//NdbConnection::receiveDIHNDBTAMPER()
+}//NdbTransaction::receiveDIHNDBTAMPER()
/*******************************************************************************
int receiveTCSEIZECONF(NdbApiSignal* aSignal);
@@ -1401,7 +1447,7 @@ Parameters: aSignal: The signal object pointer.
Remark: Sets TC Connect pointer at reception of TCSEIZECONF.
*******************************************************************************/
int
-NdbConnection::receiveTCSEIZECONF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTCSEIZECONF(NdbApiSignal* aSignal)
{
if (theStatus != Connecting)
{
@@ -1412,7 +1458,7 @@ NdbConnection::receiveTCSEIZECONF(NdbApiSignal* aSignal)
theStatus = Connected;
}
return 0;
-}//NdbConnection::receiveTCSEIZECONF()
+}//NdbTransaction::receiveTCSEIZECONF()
/*******************************************************************************
int receiveTCSEIZEREF(NdbApiSignal* aSignal);
@@ -1423,18 +1469,22 @@ Parameters: aSignal: The signal object pointer.
Remark: Sets TC Connect pointer.
*******************************************************************************/
int
-NdbConnection::receiveTCSEIZEREF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTCSEIZEREF(NdbApiSignal* aSignal)
{
+ DBUG_ENTER("NdbTransaction::receiveTCSEIZEREF");
if (theStatus != Connecting)
{
- return -1;
+ DBUG_RETURN(-1);
} else
{
theStatus = ConnectFailure;
theNdb->theError.code = aSignal->readData(2);
- return 0;
+ DBUG_PRINT("info",("error code %d, %s",
+ theNdb->getNdbError().code,
+ theNdb->getNdbError().message));
+ DBUG_RETURN(0);
}
-}//NdbConnection::receiveTCSEIZEREF()
+}//NdbTransaction::receiveTCSEIZEREF()
/*******************************************************************************
int receiveTCRELEASECONF(NdbApiSignal* aSignal);
@@ -1445,7 +1495,7 @@ Parameters: aSignal: The signal object pointer.
Remark: DisConnect TC Connect pointer to NDBAPI.
*******************************************************************************/
int
-NdbConnection::receiveTCRELEASECONF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTCRELEASECONF(NdbApiSignal* aSignal)
{
if (theStatus != DisConnecting)
{
@@ -1455,7 +1505,7 @@ NdbConnection::receiveTCRELEASECONF(NdbApiSignal* aSignal)
theStatus = NotConnected;
}
return 0;
-}//NdbConnection::receiveTCRELEASECONF()
+}//NdbTransaction::receiveTCRELEASECONF()
/*******************************************************************************
int receiveTCRELEASEREF(NdbApiSignal* aSignal);
@@ -1466,7 +1516,7 @@ Parameters: aSignal: The signal object pointer.
Remark: DisConnect TC Connect pointer to NDBAPI Failure.
*******************************************************************************/
int
-NdbConnection::receiveTCRELEASEREF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTCRELEASEREF(NdbApiSignal* aSignal)
{
if (theStatus != DisConnecting) {
return -1;
@@ -1475,7 +1525,7 @@ NdbConnection::receiveTCRELEASEREF(NdbApiSignal* aSignal)
theNdb->theError.code = aSignal->readData(2);
return 0;
}//if
-}//NdbConnection::receiveTCRELEASEREF()
+}//NdbTransaction::receiveTCRELEASEREF()
/******************************************************************************
int receiveTC_COMMITCONF(NdbApiSignal* aSignal);
@@ -1486,11 +1536,12 @@ Parameters: aSignal: The signal object pointer.
Remark:
******************************************************************************/
int
-NdbConnection::receiveTC_COMMITCONF(const TcCommitConf * commitConf)
+NdbTransaction::receiveTC_COMMITCONF(const TcCommitConf * commitConf)
{
if(checkState_TransId(&commitConf->transId1)){
theCommitStatus = Committed;
theCompletionStatus = CompletedSuccess;
+ theGlobalCheckpointId = commitConf->gci;
return 0;
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -1498,7 +1549,7 @@ NdbConnection::receiveTC_COMMITCONF(const TcCommitConf * commitConf)
#endif
}
return -1;
-}//NdbConnection::receiveTC_COMMITCONF()
+}//NdbTransaction::receiveTC_COMMITCONF()
/******************************************************************************
int receiveTC_COMMITREF(NdbApiSignal* aSignal);
@@ -1509,13 +1560,14 @@ Parameters: aSignal: The signal object pointer.
Remark:
******************************************************************************/
int
-NdbConnection::receiveTC_COMMITREF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTC_COMMITREF(NdbApiSignal* aSignal)
{
const TcCommitRef * ref = CAST_CONSTPTR(TcCommitRef, aSignal->getDataPtr());
if(checkState_TransId(&ref->transId1)){
setOperationErrorCodeAbort(ref->errorCode);
theCommitStatus = Aborted;
theCompletionStatus = CompletedFailure;
+ theReturnStatus = ReturnFailure;
return 0;
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -1524,7 +1576,7 @@ NdbConnection::receiveTC_COMMITREF(NdbApiSignal* aSignal)
}
return -1;
-}//NdbConnection::receiveTC_COMMITREF()
+}//NdbTransaction::receiveTC_COMMITREF()
/******************************************************************************
int receiveTCROLLBACKCONF(NdbApiSignal* aSignal);
@@ -1535,7 +1587,7 @@ Parameters: aSignal: The signal object pointer.
Remark:
******************************************************************************/
int
-NdbConnection::receiveTCROLLBACKCONF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTCROLLBACKCONF(NdbApiSignal* aSignal)
{
if(checkState_TransId(aSignal->getDataPtr() + 1)){
theCommitStatus = Aborted;
@@ -1548,7 +1600,7 @@ NdbConnection::receiveTCROLLBACKCONF(NdbApiSignal* aSignal)
}
return -1;
-}//NdbConnection::receiveTCROLLBACKCONF()
+}//NdbTransaction::receiveTCROLLBACKCONF()
/*******************************************************************************
int receiveTCROLLBACKREF(NdbApiSignal* aSignal);
@@ -1559,12 +1611,13 @@ Parameters: aSignal: The signal object pointer.
Remark:
*******************************************************************************/
int
-NdbConnection::receiveTCROLLBACKREF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTCROLLBACKREF(NdbApiSignal* aSignal)
{
if(checkState_TransId(aSignal->getDataPtr() + 1)){
setOperationErrorCodeAbort(aSignal->readData(4));
theCommitStatus = Aborted;
theCompletionStatus = CompletedFailure;
+ theReturnStatus = ReturnFailure;
return 0;
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -1573,7 +1626,7 @@ NdbConnection::receiveTCROLLBACKREF(NdbApiSignal* aSignal)
}
return -1;
-}//NdbConnection::receiveTCROLLBACKREF()
+}//NdbTransaction::receiveTCROLLBACKREF()
/*****************************************************************************
int receiveTCROLLBACKREP( NdbApiSignal* aSignal)
@@ -1585,7 +1638,7 @@ Parameters: aSignal: the signal object that contains the
Remark: Handles the reception of the ROLLBACKREP signal.
*****************************************************************************/
int
-NdbConnection::receiveTCROLLBACKREP( NdbApiSignal* aSignal)
+NdbTransaction::receiveTCROLLBACKREP( NdbApiSignal* aSignal)
{
/****************************************************************************
Check that we are expecting signals from this transaction and that it doesn't
@@ -1604,6 +1657,7 @@ transactions.
/**********************************************************************/
theCompletionStatus = CompletedFailure;
theCommitStatus = Aborted;
+ theReturnStatus = ReturnFailure;
return 0;
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -1612,7 +1666,7 @@ transactions.
}
return -1;
-}//NdbConnection::receiveTCROLLBACKREP()
+}//NdbTransaction::receiveTCROLLBACKREP()
/*******************************************************************************
int receiveTCKEYCONF(NdbApiSignal* aSignal, Uint32 long_short_ind);
@@ -1623,7 +1677,7 @@ Parameters: aSignal: The signal object pointer.
Remark:
*******************************************************************************/
int
-NdbConnection::receiveTCKEYCONF(const TcKeyConf * keyConf, Uint32 aDataLength)
+NdbTransaction::receiveTCKEYCONF(const TcKeyConf * keyConf, Uint32 aDataLength)
{
NdbReceiver* tOp;
const Uint32 tTemp = keyConf->confInfo;
@@ -1652,6 +1706,7 @@ from other transactions.
done = 1;
tOp->setErrorCode(4119);
theCompletionStatus = CompletedFailure;
+ theReturnStatus = NdbTransaction::ReturnFailure;
}
}
tNoComp += done;
@@ -1681,6 +1736,7 @@ from other transactions.
/**********************************************************************/
theError.code = 4011;
theCompletionStatus = CompletedFailure;
+ theReturnStatus = NdbTransaction::ReturnFailure;
theCommitStatus = Aborted;
return 0;
}//if
@@ -1695,7 +1751,7 @@ from other transactions.
}
return -1;
-}//NdbConnection::receiveTCKEYCONF()
+}//NdbTransaction::receiveTCKEYCONF()
/*****************************************************************************
int receiveTCKEY_FAILCONF( NdbApiSignal* aSignal)
@@ -1707,7 +1763,7 @@ Parameters: aSignal: the signal object that contains the
Remark: Handles the reception of the TCKEY_FAILCONF signal.
*****************************************************************************/
int
-NdbConnection::receiveTCKEY_FAILCONF(const TcKeyFailConf * failConf)
+NdbTransaction::receiveTCKEY_FAILCONF(const TcKeyFailConf * failConf)
{
NdbOperation* tOp;
/*
@@ -1740,6 +1796,7 @@ NdbConnection::receiveTCKEY_FAILCONF(const TcKeyFailConf * failConf)
case NdbOperation::OpenScanRequest:
case NdbOperation::OpenRangeScanRequest:
theCompletionStatus = CompletedFailure;
+ theReturnStatus = NdbTransaction::ReturnFailure;
setOperationErrorCodeAbort(4115);
tOp = NULL;
break;
@@ -1757,7 +1814,7 @@ NdbConnection::receiveTCKEY_FAILCONF(const TcKeyFailConf * failConf)
#endif
}
return -1;
-}//NdbConnection::receiveTCKEY_FAILCONF()
+}//NdbTransaction::receiveTCKEY_FAILCONF()
/*************************************************************************
int receiveTCKEY_FAILREF( NdbApiSignal* aSignal)
@@ -1769,7 +1826,7 @@ Parameters: aSignal: the signal object that contains the
Remark: Handles the reception of the TCKEY_FAILREF signal.
**************************************************************************/
int
-NdbConnection::receiveTCKEY_FAILREF(NdbApiSignal* aSignal)
+NdbTransaction::receiveTCKEY_FAILREF(NdbApiSignal* aSignal)
{
/*
Check that we are expecting signals from this transaction and
@@ -1781,18 +1838,19 @@ NdbConnection::receiveTCKEY_FAILREF(NdbApiSignal* aSignal)
We received an indication of that this transaction was aborted due to a
node failure.
*/
- if (theSendStatus == NdbConnection::sendTC_ROLLBACK) {
+ if (theSendStatus == NdbTransaction::sendTC_ROLLBACK) {
/*
We were in the process of sending a rollback anyways. We will
report it as a success.
*/
- theCompletionStatus = NdbConnection::CompletedSuccess;
+ theCompletionStatus = NdbTransaction::CompletedSuccess;
} else {
- theCompletionStatus = NdbConnection::CompletedFailure;
+ theReturnStatus = NdbTransaction::ReturnFailure;
+ theCompletionStatus = NdbTransaction::CompletedFailure;
theError.code = 4031;
}//if
theReleaseOnClose = true;
- theCommitStatus = NdbConnection::Aborted;
+ theCommitStatus = NdbTransaction::Aborted;
return 0;
} else {
#ifdef VM_TRACE
@@ -1800,7 +1858,7 @@ NdbConnection::receiveTCKEY_FAILREF(NdbApiSignal* aSignal)
#endif
}
return -1;
-}//NdbConnection::receiveTCKEY_FAILREF()
+}//NdbTransaction::receiveTCKEY_FAILREF()
/******************************************************************************
int receiveTCINDXCONF(NdbApiSignal* aSignal, Uint32 long_short_ind);
@@ -1811,7 +1869,7 @@ Parameters: aSignal: The signal object pointer.
Remark:
******************************************************************************/
int
-NdbConnection::receiveTCINDXCONF(const TcIndxConf * indxConf,
+NdbTransaction::receiveTCINDXCONF(const TcIndxConf * indxConf,
Uint32 aDataLength)
{
if(checkState_TransId(&indxConf->transId1)){
@@ -1845,8 +1903,9 @@ NdbConnection::receiveTCINDXCONF(const TcIndxConf * indxConf,
// no Commit flag set. This is clearly an anomaly.
/**********************************************************************/
theError.code = 4011;
- theCompletionStatus = NdbConnection::CompletedFailure;
- theCommitStatus = NdbConnection::Aborted;
+ theCompletionStatus = NdbTransaction::CompletedFailure;
+ theCommitStatus = NdbTransaction::Aborted;
+ theReturnStatus = NdbTransaction::ReturnFailure;
return 0;
}//if
if (tNoComp >= tNoSent) {
@@ -1860,7 +1919,7 @@ NdbConnection::receiveTCINDXCONF(const TcIndxConf * indxConf,
}
return -1;
-}//NdbConnection::receiveTCINDXCONF()
+}//NdbTransaction::receiveTCINDXCONF()
/*****************************************************************************
int receiveTCINDXREF( NdbApiSignal* aSignal)
@@ -1872,7 +1931,7 @@ Parameters: aSignal: the signal object that contains the
Remark: Handles the reception of the TCINDXREF signal.
*****************************************************************************/
int
-NdbConnection::receiveTCINDXREF( NdbApiSignal* aSignal)
+NdbTransaction::receiveTCINDXREF( NdbApiSignal* aSignal)
{
if(checkState_TransId(aSignal->getDataPtr()+1)){
theError.code = aSignal->readData(4); // Override any previous errors
@@ -1884,8 +1943,9 @@ NdbConnection::receiveTCINDXREF( NdbApiSignal* aSignal)
/* and we only need to report completion and return with the */
/* error code to the application. */
/**********************************************************************/
- theCompletionStatus = NdbConnection::CompletedFailure;
- theCommitStatus = NdbConnection::Aborted;
+ theCompletionStatus = NdbTransaction::CompletedFailure;
+ theCommitStatus = NdbTransaction::Aborted;
+ theReturnStatus = NdbTransaction::ReturnFailure;
return 0;
} else {
#ifdef NDB_NO_DROPPED_SIGNAL
@@ -1894,7 +1954,7 @@ NdbConnection::receiveTCINDXREF( NdbApiSignal* aSignal)
}
return -1;
-}//NdbConnection::receiveTCINDXREF()
+}//NdbTransaction::receiveTCINDXREF()
/*******************************************************************************
int OpCompletedFailure();
@@ -1905,12 +1965,12 @@ Parameters: aErrorCode: The error code.
Remark: An operation was completed with failure.
*******************************************************************************/
int
-NdbConnection::OpCompleteFailure(Uint8 abortOption, bool setFailure)
+NdbTransaction::OpCompleteFailure(Uint8 abortOption, bool setFailure)
{
Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
if (setFailure)
- theCompletionStatus = NdbConnection::CompletedFailure;
+ theCompletionStatus = NdbTransaction::CompletedFailure;
tNoComp++;
theNoOfOpCompleted = tNoComp;
if (tNoComp == tNoSent) {
@@ -1935,7 +1995,7 @@ NdbConnection::OpCompleteFailure(Uint8 abortOption, bool setFailure)
} else {
return -1; // Continue waiting for more signals
}//if
-}//NdbConnection::OpCompleteFailure()
+}//NdbTransaction::OpCompleteFailure()
/******************************************************************************
int OpCompleteSuccess();
@@ -1945,7 +2005,7 @@ Return Value: Return 0 : OpCompleteSuccess was successful.
Remark: An operation was completed with success.
*******************************************************************************/
int
-NdbConnection::OpCompleteSuccess()
+NdbTransaction::OpCompleteSuccess()
{
Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
@@ -1958,10 +2018,11 @@ NdbConnection::OpCompleteSuccess()
} else {
setOperationErrorCodeAbort(4113); // Too many operations,
// stop waiting for more
- theCompletionStatus = NdbConnection::CompletedFailure;
+ theCompletionStatus = NdbTransaction::CompletedFailure;
+ theReturnStatus = NdbTransaction::ReturnFailure;
return 0;
}//if
-}//NdbConnection::OpCompleteSuccess()
+}//NdbTransaction::OpCompleteSuccess()
/******************************************************************************
int getGCI();
@@ -1969,13 +2030,13 @@ NdbConnection::OpCompleteSuccess()
Remark: Get global checkpoint identity of the transaction
*******************************************************************************/
int
-NdbConnection::getGCI()
+NdbTransaction::getGCI()
{
- if (theCommitStatus == NdbConnection::Committed) {
+ if (theCommitStatus == NdbTransaction::Committed) {
return theGlobalCheckpointId;
}//if
return 0;
-}//NdbConnection::getGCI()
+}//NdbTransaction::getGCI()
/*******************************************************************************
Uint64 getTransactionId(void);
@@ -1983,31 +2044,31 @@ Uint64 getTransactionId(void);
Remark: Get the transaction identity.
*******************************************************************************/
Uint64
-NdbConnection::getTransactionId()
+NdbTransaction::getTransactionId()
{
return theTransactionId;
-}//NdbConnection::getTransactionId()
+}//NdbTransaction::getTransactionId()
-NdbConnection::CommitStatusType
-NdbConnection::commitStatus()
+NdbTransaction::CommitStatusType
+NdbTransaction::commitStatus()
{
return theCommitStatus;
-}//NdbConnection::commitStatus()
+}//NdbTransaction::commitStatus()
int
-NdbConnection::getNdbErrorLine()
+NdbTransaction::getNdbErrorLine()
{
return theErrorLine;
}
NdbOperation*
-NdbConnection::getNdbErrorOperation()
+NdbTransaction::getNdbErrorOperation()
{
return theErrorOperation;
-}//NdbConnection::getNdbErrorOperation()
+}//NdbTransaction::getNdbErrorOperation()
const NdbOperation *
-NdbConnection::getNextCompletedOperation(const NdbOperation * current) const {
+NdbTransaction::getNextCompletedOperation(const NdbOperation * current) const {
if(current == 0)
return theCompletedFirstOp;
return current->theNext;
@@ -2016,7 +2077,7 @@ NdbConnection::getNextCompletedOperation(const NdbOperation * current) const {
#ifdef VM_TRACE
#define CASE(x) case x: ndbout << " " << #x; break
void
-NdbConnection::printState()
+NdbTransaction::printState()
{
ndbout << "con=" << hex << this << dec;
ndbout << " node=" << getConnectedNodeId();
@@ -2069,7 +2130,7 @@ NdbConnection::printState()
#endif
int
-NdbConnection::report_node_failure(Uint32 id){
+NdbTransaction::report_node_failure(Uint32 id){
NdbNodeBitmask::set(m_failed_db_nodes, id);
if(!NdbNodeBitmask::get(m_db_nodes, id))
{
@@ -2088,22 +2149,28 @@ NdbConnection::report_node_failure(Uint32 id){
const Uint32 len = TcKeyConf::SimpleReadBit | id;
Uint32 tNoComp = theNoOfOpCompleted;
Uint32 tNoSent = theNoOfOpSent;
+ Uint32 count = 0;
while(tmp != 0)
{
if(tmp->theReceiver.m_expected_result_length == len &&
tmp->theReceiver.m_received_result_length == 0)
{
- tNoComp++;
+ count++;
tmp->theError.code = 4119;
}
tmp = tmp->next();
}
+ tNoComp += count;
theNoOfOpCompleted = tNoComp;
- if(tNoComp == tNoSent)
+ if(count)
{
- theError.code = 4119;
- theCompletionStatus = NdbConnection::CompletedFailure;
- return 1;
+ theReturnStatus = NdbTransaction::ReturnFailure;
+ if(tNoComp == tNoSent)
+ {
+ theError.code = 4119;
+ theCompletionStatus = NdbTransaction::CompletedFailure;
+ return 1;
+ }
}
return 0;
}
diff --git a/ndb/src/ndbapi/NdbConnectionScan.cpp b/ndb/src/ndbapi/NdbTransactionScan.cpp
index b0c546c512a..4c507f6ab8c 100644
--- a/ndb/src/ndbapi/NdbConnectionScan.cpp
+++ b/ndb/src/ndbapi/NdbTransactionScan.cpp
@@ -15,22 +15,10 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-/*****************************************************************************
- * Name: NdbConnectionScan.cpp
- * Include:
- * Link:
- * Author: UABRONM MikaelRonström UAB/M/MT
- * QABJKAM Jonas Kamf UAB/M/MT
- * Date: 2000-06-12
- * Version: 0.1
- * Description: Interface between Application and NDB
- * Documentation:
- * Adjust: 2000-06-12 UABRONM First version.
- ****************************************************************************/
#include <ndb_global.h>
#include <Ndb.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <NdbOperation.hpp>
#include <NdbScanOperation.hpp>
#include "NdbApiSignal.hpp"
@@ -52,7 +40,7 @@
*
****************************************************************************/
int
-NdbConnection::receiveSCAN_TABREF(NdbApiSignal* aSignal){
+NdbTransaction::receiveSCAN_TABREF(NdbApiSignal* aSignal){
const ScanTabRef * ref = CAST_CONSTPTR(ScanTabRef, aSignal->getDataPtr());
if(checkState_TransId(&ref->transId1)){
@@ -93,7 +81,7 @@ NdbConnection::receiveSCAN_TABREF(NdbApiSignal* aSignal){
*
*****************************************************************************/
int
-NdbConnection::receiveSCAN_TABCONF(NdbApiSignal* aSignal,
+NdbTransaction::receiveSCAN_TABCONF(NdbApiSignal* aSignal,
const Uint32 * ops, Uint32 len)
{
const ScanTabConf * conf = CAST_CONSTPTR(ScanTabConf, aSignal->getDataPtr());
diff --git a/ndb/src/ndbapi/Ndberr.cpp b/ndb/src/ndbapi/Ndberr.cpp
index a8b968da03f..b05818de6f1 100644
--- a/ndb/src/ndbapi/Ndberr.cpp
+++ b/ndb/src/ndbapi/Ndberr.cpp
@@ -19,9 +19,9 @@
#include "NdbImpl.hpp"
#include "NdbDictionaryImpl.hpp"
#include <NdbOperation.hpp>
-#include <NdbConnection.hpp>
+#include <NdbTransaction.hpp>
#include <NdbBlob.hpp>
-
+#include "NdbEventOperationImpl.hpp"
static void
update(const NdbError & _err){
@@ -55,7 +55,7 @@ NdbDictionaryImpl::getNdbError() const {
const
NdbError &
-NdbConnection::getNdbError() const {
+NdbTransaction::getNdbError() const {
update(theError);
return theError;
}
@@ -73,3 +73,10 @@ NdbBlob::getNdbError() const {
update(theError);
return theError;
}
+
+const
+NdbError &
+NdbEventOperationImpl::getNdbError() const {
+ update(m_error);
+ return m_error;
+}
diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp
index 3ebba7e1c4a..bfbf98d1b3a 100644
--- a/ndb/src/ndbapi/Ndbif.cpp
+++ b/ndb/src/ndbapi/Ndbif.cpp
@@ -19,12 +19,12 @@
#include "NdbApiSignal.hpp"
#include "NdbImpl.hpp"
-#include "NdbOperation.hpp"
-#include "NdbIndexOperation.hpp"
-#include "NdbScanOperation.hpp"
-#include "NdbConnection.hpp"
-#include "NdbRecAttr.hpp"
-#include "NdbReceiver.hpp"
+#include <NdbTransaction.hpp>
+#include <NdbOperation.hpp>
+#include <NdbIndexOperation.hpp>
+#include <NdbScanOperation.hpp>
+#include <NdbRecAttr.hpp>
+#include <NdbReceiver.hpp>
#include "API.hpp"
#include <signaldata/TcCommit.hpp>
@@ -107,15 +107,13 @@ Ndb::init(int aMaxNoOfTransactions)
goto error_handler;
}
- tMaxNoOfTransactions = aMaxNoOfTransactions * 3;
- if (tMaxNoOfTransactions > 1024) {
- tMaxNoOfTransactions = 1024;
- }//if
+
+ tMaxNoOfTransactions = aMaxNoOfTransactions;
theMaxNoOfTransactions = tMaxNoOfTransactions;
-
- thePreparedTransactionsArray = new NdbConnection* [tMaxNoOfTransactions];
- theSentTransactionsArray = new NdbConnection* [tMaxNoOfTransactions];
- theCompletedTransactionsArray = new NdbConnection* [tMaxNoOfTransactions];
+ theRemainingStartTransactions= tMaxNoOfTransactions;
+ thePreparedTransactionsArray = new NdbTransaction* [tMaxNoOfTransactions];
+ theSentTransactionsArray = new NdbTransaction* [tMaxNoOfTransactions];
+ theCompletedTransactionsArray = new NdbTransaction* [tMaxNoOfTransactions];
if ((thePreparedTransactionsArray == NULL) ||
(theSentTransactionsArray == NULL) ||
@@ -263,11 +261,11 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId)
{
Uint32 tNoSentTransactions = theNoOfSentTransactions;
for (int i = tNoSentTransactions - 1; i >= 0; i--) {
- NdbConnection* localCon = theSentTransactionsArray[i];
+ NdbTransaction* localCon = theSentTransactionsArray[i];
if (localCon->getConnectedNodeId() == aNodeId) {
- const NdbConnection::SendStatusType sendStatus = localCon->theSendStatus;
- if (sendStatus == NdbConnection::sendTC_OP ||
- sendStatus == NdbConnection::sendTC_COMMIT) {
+ const NdbTransaction::SendStatusType sendStatus = localCon->theSendStatus;
+ if (sendStatus == NdbTransaction::sendTC_OP ||
+ sendStatus == NdbTransaction::sendTC_COMMIT) {
/*
A transaction was interrupted in the prepare phase by a node
failure. Since the transaction was not found in the phase
@@ -275,13 +273,13 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId)
we report a normal node failure abort.
*/
localCon->setOperationErrorCodeAbort(4010);
- localCon->theCompletionStatus = NdbConnection::CompletedFailure;
- } else if (sendStatus == NdbConnection::sendTC_ROLLBACK) {
+ localCon->theCompletionStatus = NdbTransaction::CompletedFailure;
+ } else if (sendStatus == NdbTransaction::sendTC_ROLLBACK) {
/*
We aimed for abort and abort we got even if it was by a node
failure. We will thus report it as a success.
*/
- localCon->theCompletionStatus = NdbConnection::CompletedSuccess;
+ localCon->theCompletionStatus = NdbTransaction::CompletedSuccess;
} else {
#ifdef VM_TRACE
printState("abortTransactionsAfterNodeFailure %x", this);
@@ -293,7 +291,8 @@ Ndb::abortTransactionsAfterNodeFailure(Uint16 aNodeId)
intact since the node was failing and they were aborted. Thus we
set commit state to Aborted and set state to release on close.
*/
- localCon->theCommitStatus = NdbConnection::Aborted;
+ localCon->theReturnStatus = NdbTransaction::ReturnFailure;
+ localCon->theCommitStatus = NdbTransaction::Aborted;
localCon->theReleaseOnClose = true;
completedTransaction(localCon);
}
@@ -316,7 +315,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
{
NdbOperation* tOp;
NdbIndexOperation* tIndexOp;
- NdbConnection* tCon;
+ NdbTransaction* tCon;
int tReturnCode = -1;
const Uint32* tDataPtr = aSignal->getDataPtr();
const Uint32 tWaitState = theImpl->theWaiter.m_state;
@@ -346,14 +345,14 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
- (tCon->theSendStatus == NdbConnection::sendTC_OP)) {
+ (tCon->theSendStatus == NdbTransaction::sendTC_OP)) {
tReturnCode = tCon->receiveTCKEYCONF(keyConf, tLen);
if (tReturnCode != -1) {
completedTransaction(tCon);
}//if
if(TcKeyConf::getMarkerFlag(keyConf->confInfo)){
- NdbConnection::sendTC_COMMIT_ACK(theCommitAckSignal,
+ NdbTransaction::sendTC_COMMIT_ACK(theCommitAckSignal,
keyConf->transId1,
keyConf->transId2,
aTCRef);
@@ -378,24 +377,24 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
com = tRec->execTRANSID_AI(tDataPtr + TransIdAI::HeaderLength,
tLen - TransIdAI::HeaderLength);
}
+
+ if(com == 0)
+ return;
- if(com == 1){
- switch(tRec->getType()){
- case NdbReceiver::NDB_OPERATION:
- case NdbReceiver::NDB_INDEX_OPERATION:
- if(tCon->OpCompleteSuccess() != -1){
- completedTransaction(tCon);
- return;
- }
- break;
- case NdbReceiver::NDB_SCANRECEIVER:
- tCon->theScanningOp->receiver_delivered(tRec);
- theImpl->theWaiter.m_state = (((WaitSignalType) tWaitState) == WAIT_SCAN ?
- (Uint32) NO_WAIT : tWaitState);
- break;
- default:
- goto InvalidSignal;
+ switch(tRec->getType()){
+ case NdbReceiver::NDB_OPERATION:
+ case NdbReceiver::NDB_INDEX_OPERATION:
+ if(tCon->OpCompleteSuccess() != -1){
+ completedTransaction(tCon);
}
+ return;
+ case NdbReceiver::NDB_SCANRECEIVER:
+ tCon->theScanningOp->receiver_delivered(tRec);
+ theImpl->theWaiter.m_state = (((WaitSignalType) tWaitState) == WAIT_SCAN ?
+ (Uint32) NO_WAIT : tWaitState);
+ break;
+ default:
+ goto InvalidSignal;
}
break;
} else {
@@ -417,8 +416,8 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
if (tOp->checkMagicNumber(false) == 0) {
tCon = tOp->theNdbCon;
if (tCon != NULL) {
- if ((tCon->theSendStatus == NdbConnection::sendTC_OP) ||
- (tCon->theSendStatus == NdbConnection::sendTC_COMMIT)) {
+ if ((tCon->theSendStatus == NdbTransaction::sendTC_OP) ||
+ (tCon->theSendStatus == NdbTransaction::sendTC_COMMIT)) {
tReturnCode = tCon->receiveTCKEY_FAILCONF(failConf);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -432,7 +431,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
#endif
}
if(tFirstData & 1){
- NdbConnection::sendTC_COMMIT_ACK(theCommitAckSignal,
+ NdbTransaction::sendTC_COMMIT_ACK(theCommitAckSignal,
failConf->transId1,
failConf->transId2,
aTCRef);
@@ -447,8 +446,8 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
if (tOp->checkMagicNumber(false) == 0) {
tCon = tOp->theNdbCon;
if (tCon != NULL) {
- if ((tCon->theSendStatus == NdbConnection::sendTC_OP) ||
- (tCon->theSendStatus == NdbConnection::sendTC_ROLLBACK)) {
+ if ((tCon->theSendStatus == NdbTransaction::sendTC_OP) ||
+ (tCon->theSendStatus == NdbTransaction::sendTC_ROLLBACK)) {
tReturnCode = tCon->receiveTCKEY_FAILREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -473,7 +472,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
if (tOp->checkMagicNumber() == 0) {
tCon = tOp->theNdbCon;
if (tCon != NULL) {
- if (tCon->theSendStatus == NdbConnection::sendTC_OP) {
+ if (tCon->theSendStatus == NdbTransaction::sendTC_OP) {
tReturnCode = tOp->receiveTCKEYREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -496,14 +495,14 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
- (tCon->theSendStatus == NdbConnection::sendTC_COMMIT)) {
+ (tCon->theSendStatus == NdbTransaction::sendTC_COMMIT)) {
tReturnCode = tCon->receiveTC_COMMITCONF(commitConf);
if (tReturnCode != -1) {
completedTransaction(tCon);
}//if
if(tFirstData & 1){
- NdbConnection::sendTC_COMMIT_ACK(theCommitAckSignal,
+ NdbTransaction::sendTC_COMMIT_ACK(theCommitAckSignal,
commitConf->transId1,
commitConf->transId2,
aTCRef);
@@ -521,7 +520,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
- (tCon->theSendStatus == NdbConnection::sendTC_COMMIT)) {
+ (tCon->theSendStatus == NdbTransaction::sendTC_COMMIT)) {
tReturnCode = tCon->receiveTC_COMMITREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -536,7 +535,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
- (tCon->theSendStatus == NdbConnection::sendTC_ROLLBACK)) {
+ (tCon->theSendStatus == NdbTransaction::sendTC_ROLLBACK)) {
tReturnCode = tCon->receiveTCROLLBACKCONF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -551,7 +550,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
- (tCon->theSendStatus == NdbConnection::sendTC_ROLLBACK)) {
+ (tCon->theSendStatus == NdbTransaction::sendTC_ROLLBACK)) {
tReturnCode = tCon->receiveTCROLLBACKREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -788,7 +787,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
const BlockReference aTCRef = aSignal->theSendersBlockRef;
tCon = void2con(tFirstDataPtr);
if ((tCon->checkMagicNumber() == 0) &&
- (tCon->theSendStatus == NdbConnection::sendTC_OP)) {
+ (tCon->theSendStatus == NdbTransaction::sendTC_OP)) {
tReturnCode = tCon->receiveTCINDXCONF(indxConf, tLen);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -796,7 +795,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
}//if
if(TcIndxConf::getMarkerFlag(indxConf->confInfo)){
- NdbConnection::sendTC_COMMIT_ACK(theCommitAckSignal,
+ NdbTransaction::sendTC_COMMIT_ACK(theCommitAckSignal,
indxConf->transId1,
indxConf->transId2,
aTCRef);
@@ -811,7 +810,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
if (tIndexOp->checkMagicNumber() == 0) {
tCon = tIndexOp->theNdbCon;
if (tCon != NULL) {
- if (tCon->theSendStatus == NdbConnection::sendTC_OP) {
+ if (tCon->theSendStatus == NdbTransaction::sendTC_OP) {
tReturnCode = tIndexOp->receiveTCINDXREF(aSignal);
if (tReturnCode != -1) {
completedTransaction(tCon);
@@ -852,7 +851,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3])
/*****************************************************************************
-void completedTransaction(NdbConnection* aCon);
+void completedTransaction(NdbTransaction* aCon);
Remark: One transaction has been completed.
Remove it from send array and put it into the completed
@@ -860,14 +859,14 @@ Remark: One transaction has been completed.
up a poller.
******************************************************************************/
void
-Ndb::completedTransaction(NdbConnection* aCon)
+Ndb::completedTransaction(NdbTransaction* aCon)
{
Uint32 tTransArrayIndex = aCon->theTransArrayIndex;
Uint32 tNoSentTransactions = theNoOfSentTransactions;
Uint32 tNoCompletedTransactions = theNoOfCompletedTransactions;
- if ((tNoSentTransactions > 0) && (aCon->theListState == NdbConnection::InSendList) &&
+ if ((tNoSentTransactions > 0) && (aCon->theListState == NdbTransaction::InSendList) &&
(tTransArrayIndex < tNoSentTransactions)) {
- NdbConnection* tMoveCon = theSentTransactionsArray[tNoSentTransactions - 1];
+ NdbTransaction* tMoveCon = theSentTransactionsArray[tNoSentTransactions - 1];
theCompletedTransactionsArray[tNoCompletedTransactions] = aCon;
aCon->theTransArrayIndex = tNoCompletedTransactions;
@@ -879,7 +878,7 @@ Ndb::completedTransaction(NdbConnection* aCon)
theNoOfCompletedTransactions = tNoCompletedTransactions + 1;
theNoOfSentTransactions = tNoSentTransactions - 1;
- aCon->theListState = NdbConnection::InCompletedList;
+ aCon->theListState = NdbTransaction::InCompletedList;
aCon->handleExecuteCompletion();
if ((theMinNoOfEventsToWakeUp != 0) &&
(theNoOfCompletedTransactions >= theMinNoOfEventsToWakeUp)) {
@@ -900,12 +899,12 @@ Ndb::completedTransaction(NdbConnection* aCon)
}//Ndb::completedTransaction()
/*****************************************************************************
-void reportCallback(NdbConnection** aCopyArray, Uint32 aNoOfCompletedTrans);
+void reportCallback(NdbTransaction** aCopyArray, Uint32 aNoOfCompletedTrans);
Remark: Call the callback methods of the completed transactions.
******************************************************************************/
void
-Ndb::reportCallback(NdbConnection** aCopyArray, Uint32 aNoOfCompletedTrans)
+Ndb::reportCallback(NdbTransaction** aCopyArray, Uint32 aNoOfCompletedTrans)
{
Uint32 i;
if (aNoOfCompletedTrans > 0) {
@@ -914,7 +913,7 @@ Ndb::reportCallback(NdbConnection** aCopyArray, Uint32 aNoOfCompletedTrans)
NdbAsynchCallback aCallback = aCopyArray[i]->theCallbackFunction;
int tResult = 0;
if (aCallback != NULL) {
- if (aCopyArray[i]->theReturnStatus == NdbConnection::ReturnFailure) {
+ if (aCopyArray[i]->theReturnStatus == NdbTransaction::ReturnFailure) {
tResult = -1;
}//if
(*aCallback)(tResult, aCopyArray[i], anyObject);
@@ -924,13 +923,13 @@ Ndb::reportCallback(NdbConnection** aCopyArray, Uint32 aNoOfCompletedTrans)
}//Ndb::reportCallback()
/*****************************************************************************
-Uint32 pollCompleted(NdbConnection** aCopyArray);
+Uint32 pollCompleted(NdbTransaction** aCopyArray);
Remark: Transfer the data from the completed transaction to a local array.
This support is used by a number of the poll-methods.
******************************************************************************/
Uint32
-Ndb::pollCompleted(NdbConnection** aCopyArray)
+Ndb::pollCompleted(NdbTransaction** aCopyArray)
{
check_send_timeout();
Uint32 i;
@@ -938,13 +937,13 @@ Ndb::pollCompleted(NdbConnection** aCopyArray)
if (tNoCompletedTransactions > 0) {
for (i = 0; i < tNoCompletedTransactions; i++) {
aCopyArray[i] = theCompletedTransactionsArray[i];
- if (aCopyArray[i]->theListState != NdbConnection::InCompletedList) {
+ if (aCopyArray[i]->theListState != NdbTransaction::InCompletedList) {
ndbout << "pollCompleted error ";
ndbout << (int) aCopyArray[i]->theListState << endl;
abort();
}//if
theCompletedTransactionsArray[i] = NULL;
- aCopyArray[i]->theListState = NdbConnection::NotInList;
+ aCopyArray[i]->theListState = NdbTransaction::NotInList;
}//for
}//if
theNoOfCompletedTransactions = 0;
@@ -959,7 +958,7 @@ Ndb::check_send_timeout()
the_last_check_time = current_time;
Uint32 no_of_sent = theNoOfSentTransactions;
for (Uint32 i = 0; i < no_of_sent; i++) {
- NdbConnection* a_con = theSentTransactionsArray[i];
+ NdbTransaction* a_con = theSentTransactionsArray[i];
if ((current_time - a_con->theStartTransTime) >
WAITFOR_RESPONSE_TIMEOUT) {
#ifdef VM_TRACE
@@ -970,8 +969,8 @@ Ndb::check_send_timeout()
abort();
#endif
a_con->setOperationErrorCodeAbort(4012);
- a_con->theCommitStatus = NdbConnection::Aborted;
- a_con->theCompletionStatus = NdbConnection::CompletedFailure;
+ a_con->theCommitStatus = NdbTransaction::Aborted;
+ a_con->theCompletionStatus = NdbTransaction::CompletedFailure;
a_con->handleExecuteCompletion();
remove_sent_list(i);
insert_completed_list(a_con);
@@ -987,7 +986,7 @@ Ndb::remove_sent_list(Uint32 list_index)
{
Uint32 last_index = theNoOfSentTransactions - 1;
if (list_index < last_index) {
- NdbConnection* t_con = theSentTransactionsArray[last_index];
+ NdbTransaction* t_con = theSentTransactionsArray[last_index];
theSentTransactionsArray[list_index] = t_con;
}//if
theNoOfSentTransactions = last_index;
@@ -995,23 +994,23 @@ Ndb::remove_sent_list(Uint32 list_index)
}
Uint32
-Ndb::insert_completed_list(NdbConnection* a_con)
+Ndb::insert_completed_list(NdbTransaction* a_con)
{
Uint32 no_of_comp = theNoOfCompletedTransactions;
theCompletedTransactionsArray[no_of_comp] = a_con;
theNoOfCompletedTransactions = no_of_comp + 1;
- a_con->theListState = NdbConnection::InCompletedList;
+ a_con->theListState = NdbTransaction::InCompletedList;
a_con->theTransArrayIndex = no_of_comp;
return no_of_comp;
}
Uint32
-Ndb::insert_sent_list(NdbConnection* a_con)
+Ndb::insert_sent_list(NdbTransaction* a_con)
{
Uint32 no_of_sent = theNoOfSentTransactions;
theSentTransactionsArray[no_of_sent] = a_con;
theNoOfSentTransactions = no_of_sent + 1;
- a_con->theListState = NdbConnection::InSendList;
+ a_con->theListState = NdbTransaction::InSendList;
a_con->theTransArrayIndex = no_of_sent;
return no_of_sent;
}
@@ -1043,16 +1042,16 @@ Ndb::sendPrepTrans(int forceSend)
TransporterFacade* tp = TransporterFacade::instance();
Uint32 no_of_prep_trans = theNoOfPreparedTransactions;
for (i = 0; i < no_of_prep_trans; i++) {
- NdbConnection * a_con = thePreparedTransactionsArray[i];
+ NdbTransaction * a_con = thePreparedTransactionsArray[i];
thePreparedTransactionsArray[i] = NULL;
Uint32 node_id = a_con->getConnectedNodeId();
if ((tp->getNodeSequence(node_id) == a_con->theNodeSequence) &&
tp->get_node_alive(node_id) ||
(tp->get_node_stopping(node_id) &&
- ((a_con->theSendStatus == NdbConnection::sendABORT) ||
- (a_con->theSendStatus == NdbConnection::sendABORTfail) ||
- (a_con->theSendStatus == NdbConnection::sendCOMMITstate) ||
- (a_con->theSendStatus == NdbConnection::sendCompleted)))) {
+ ((a_con->theSendStatus == NdbTransaction::sendABORT) ||
+ (a_con->theSendStatus == NdbTransaction::sendABORTfail) ||
+ (a_con->theSendStatus == NdbTransaction::sendCOMMITstate) ||
+ (a_con->theSendStatus == NdbTransaction::sendCompleted)))) {
/*
We will send if
1) Node is alive and sequences are correct OR
@@ -1084,13 +1083,13 @@ Ndb::sendPrepTrans(int forceSend)
again and will thus set the state to Aborted to avoid a more or
less eternal loop of tries.
*/
- if (a_con->theSendStatus == NdbConnection::sendOperations) {
+ if (a_con->theSendStatus == NdbTransaction::sendOperations) {
a_con->setOperationErrorCodeAbort(4021);
- a_con->theCommitStatus = NdbConnection::NeedAbort;
+ a_con->theCommitStatus = NdbTransaction::NeedAbort;
TRACE_DEBUG("Send buffer full and sendOperations");
} else {
a_con->setOperationErrorCodeAbort(4026);
- a_con->theCommitStatus = NdbConnection::Aborted;
+ a_con->theCommitStatus = NdbTransaction::Aborted;
TRACE_DEBUG("Send buffer full, set state to Aborted");
}//if
}//if
@@ -1107,7 +1106,7 @@ Ndb::sendPrepTrans(int forceSend)
*/
TRACE_DEBUG("Abort a transaction when stopping a node");
a_con->setOperationErrorCodeAbort(4023);
- a_con->theCommitStatus = NdbConnection::NeedAbort;
+ a_con->theCommitStatus = NdbTransaction::NeedAbort;
} else {
/*
The node is hard dead and we cannot continue. We will also release
@@ -1117,10 +1116,11 @@ Ndb::sendPrepTrans(int forceSend)
a_con->setOperationErrorCodeAbort(4025);
a_con->theReleaseOnClose = true;
a_con->theTransactionIsStarted = false;
- a_con->theCommitStatus = NdbConnection::Aborted;
+ a_con->theCommitStatus = NdbTransaction::Aborted;
}//if
}//if
- a_con->theCompletionStatus = NdbConnection::CompletedFailure;
+ a_con->theReturnStatus = NdbTransaction::ReturnFailure;
+ a_con->theCompletionStatus = NdbTransaction::CompletedFailure;
a_con->handleExecuteCompletion();
insert_completed_list(a_con);
}//for
@@ -1194,7 +1194,7 @@ Remark: First send all prepared operations and then check if there are any
int
Ndb::sendPollNdb(int aMillisecondNumber, int minNoOfEventsToWakeup, int forceSend)
{
- NdbConnection* tConArray[1024];
+ NdbTransaction* tConArray[1024];
Uint32 tNoCompletedTransactions;
//theCurrentConnectCounter = 0;
@@ -1227,7 +1227,7 @@ Remark: Check if there are any transactions already completed. Wait for not
int
Ndb::pollNdb(int aMillisecondNumber, int minNoOfEventsToWakeup)
{
- NdbConnection* tConArray[1024];
+ NdbTransaction* tConArray[1024];
Uint32 tNoCompletedTransactions;
//theCurrentConnectCounter = 0;
@@ -1334,7 +1334,7 @@ Ndb::sendRecSignal(Uint16 node_id,
}//Ndb::sendRecSignal()
void
-NdbConnection::sendTC_COMMIT_ACK(NdbApiSignal * aSignal,
+NdbTransaction::sendTC_COMMIT_ACK(NdbApiSignal * aSignal,
Uint32 transId1, Uint32 transId2,
Uint32 aTCRef){
#ifdef MARKER_TRACE
diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp
index 59a6a825be4..d4ab30aec4e 100644
--- a/ndb/src/ndbapi/Ndbinit.cpp
+++ b/ndb/src/ndbapi/Ndbinit.cpp
@@ -19,12 +19,12 @@
#include "NdbApiSignal.hpp"
#include "NdbImpl.hpp"
-#include "NdbOperation.hpp"
-#include "NdbConnection.hpp"
-#include "NdbRecAttr.hpp"
-#include "IPCConfig.hpp"
+#include <NdbOperation.hpp>
+#include <NdbTransaction.hpp>
+#include <NdbRecAttr.hpp>
+#include <IPCConfig.hpp>
#include "TransporterFacade.hpp"
-#include "ConfigRetriever.hpp"
+#include <ConfigRetriever.hpp>
#include <ndb_limits.h>
#include <NdbOut.hpp>
#include <NdbSleep.h>
@@ -38,48 +38,12 @@ class NdbGlobalEventBufferHandle;
NdbGlobalEventBufferHandle *NdbGlobalEventBuffer_init(int);
void NdbGlobalEventBuffer_drop(NdbGlobalEventBufferHandle *);
-/**
- * Static object for NDB
- */
-
-// only needed for backwards compatability, before ndb_cluster_connection
-static char *ndbConnectString = 0;
-static int theNoOfNdbObjects = 0;
-static Ndb_cluster_connection *global_ndb_cluster_connection= 0;
-
-
-/***************************************************************************
-Ndb(const char* aDataBase);
-
-Parameters: aDataBase : Name of the database.
-Remark: Connect to the database.
-***************************************************************************/
-Ndb::Ndb( const char* aDataBase , const char* aSchema)
- : theImpl(NULL)
-{
- DBUG_ENTER("Ndb::Ndb()");
- DBUG_PRINT("enter",("(old)Ndb::Ndb this=0x%x", this));
- if (theNoOfNdbObjects < 0)
- abort(); // old and new Ndb constructor used mixed
- theNoOfNdbObjects++;
- if (global_ndb_cluster_connection == 0) {
- global_ndb_cluster_connection= new Ndb_cluster_connection(ndbConnectString);
- global_ndb_cluster_connection->connect(12,5,1);
- }
- setup(global_ndb_cluster_connection, aDataBase, aSchema);
- DBUG_VOID_RETURN;
-}
-
Ndb::Ndb( Ndb_cluster_connection *ndb_cluster_connection,
const char* aDataBase , const char* aSchema)
: theImpl(NULL)
{
DBUG_ENTER("Ndb::Ndb()");
DBUG_PRINT("enter",("Ndb::Ndb this=0x%x", this));
- if (global_ndb_cluster_connection != 0 &&
- global_ndb_cluster_connection != ndb_cluster_connection)
- abort(); // old and new Ndb constructor used mixed
- theNoOfNdbObjects= -1;
setup(ndb_cluster_connection, aDataBase, aSchema);
DBUG_VOID_RETURN;
}
@@ -99,10 +63,9 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection,
theNoOfPreparedTransactions= 0;
theNoOfSentTransactions= 0;
theNoOfCompletedTransactions= 0;
- theNoOfAllocatedTransactions= 0;
+ theRemainingStartTransactions= 0;
theMaxNoOfTransactions= 0;
theMinNoOfEventsToWakeUp= 0;
- prefixEnd= NULL;
theTransactionList= NULL;
theConnectionArray= NULL;
the_last_check_time= 0;
@@ -137,17 +100,10 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection,
theFirstTupleId[i] = 0;
theLastTupleId[i] = 0;
}//for
-
- BaseString::snprintf(theDataBase, sizeof(theDataBase), "%s",
- aDataBase ? aDataBase : "");
- BaseString::snprintf(theDataBaseSchema, sizeof(theDataBaseSchema), "%s",
- aSchema ? aSchema : "");
- int len = BaseString::snprintf(prefixName, sizeof(prefixName), "%s%c%s%c",
- theDataBase, table_name_separator,
- theDataBaseSchema, table_name_separator);
- prefixEnd = prefixName + (len < (int) sizeof(prefixName) ? len :
- sizeof(prefixName) - 1);
+ theImpl->m_dbname.assign(aDataBase);
+ theImpl->m_schemaname.assign(aSchema);
+ theImpl->update_prefix();
theImpl->theWaiter.m_mutex = TransporterFacade::instance()->theMutexPtr;
@@ -169,16 +125,6 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection,
}
-void Ndb::setConnectString(const char * connectString)
-{
- if (ndbConnectString != 0) {
- free(ndbConnectString);
- ndbConnectString = 0;
- }
- if (connectString)
- ndbConnectString = strdup(connectString);
-}
-
/*****************************************************************************
* ~Ndb();
*
@@ -206,19 +152,6 @@ Ndb::~Ndb()
delete theImpl;
- /**
- * This needs to be put after delete theImpl
- * as TransporterFacade::instance is delete by global_ndb_cluster_connection
- * and used by theImpl
- */
- if (global_ndb_cluster_connection != 0) {
- theNoOfNdbObjects--;
- if(theNoOfNdbObjects == 0){
- delete global_ndb_cluster_connection;
- global_ndb_cluster_connection= 0;
- }
- }//if
-
/**
* This sleep is to make sure that the transporter
* send thread will come in and send any
diff --git a/ndb/src/ndbapi/Ndblist.cpp b/ndb/src/ndbapi/Ndblist.cpp
index 3001561a73a..f82348fc91d 100644
--- a/ndb/src/ndbapi/Ndblist.cpp
+++ b/ndb/src/ndbapi/Ndblist.cpp
@@ -19,7 +19,6 @@
#include <NdbOperation.hpp>
#include <NdbIndexOperation.hpp>
#include <NdbIndexScanOperation.hpp>
-#include <NdbConnection.hpp>
#include "NdbApiSignal.hpp"
#include <NdbRecAttr.hpp>
#include "NdbUtil.hpp"
@@ -51,10 +50,10 @@ Ndb::checkFailedNode()
/**
* Release all connections in idle list (for node)
*/
- NdbConnection * tNdbCon = theConnectionArray[node_id];
+ NdbTransaction * tNdbCon = theConnectionArray[node_id];
theConnectionArray[node_id] = NULL;
while (tNdbCon != NULL) {
- NdbConnection* tempNdbCon = tNdbCon;
+ NdbTransaction* tempNdbCon = tNdbCon;
tNdbCon = tNdbCon->next();
releaseNdbCon(tempNdbCon);
}
@@ -71,7 +70,7 @@ Ndb::checkFailedNode()
* if createConIdleList was succesful
* Return -1: In all other case.
* Parameters: aNrOfCon : Number of connections offered to the application.
- * Remark: Create connection idlelist with NdbConnection objects.
+ * Remark: Create connection idlelist with NdbTransaction objects.
***************************************************************************/
int
Ndb::createConIdleList(int aNrOfCon)
@@ -123,23 +122,16 @@ Ndb::getNdbCall()
}
/***************************************************************************
- * NdbConnection* getNdbCon();
+ * NdbTransaction* getNdbCon();
*
* Return Value: Return a connection if the getNdbCon was successful.
* Return NULL : In all other case.
* Remark: Get a connection from theConIdleList and return the object .
***************************************************************************/
-NdbConnection*
+NdbTransaction*
Ndb::getNdbCon()
{
- NdbConnection* tNdbCon = theImpl->theConIdleList.seize(this);
- if (unlikely(theImpl->theConIdleList.m_alloc_cnt > theMaxNoOfTransactions))
- {
- theImpl->theConIdleList.release(tNdbCon);
- ndbout << "theNoOfAllocatedTransactions = " << theNoOfAllocatedTransactions << " theMaxNoOfTransactions = " << theMaxNoOfTransactions << endl;
- return NULL;
- }//if
-
+ NdbTransaction* tNdbCon = theImpl->theConIdleList.seize(this);
tNdbCon->theMagicNumber = 0x37412619;
return tNdbCon;
}
@@ -290,13 +282,13 @@ Ndb::releaseNdbCall(NdbCall* aNdbCall)
}
/***************************************************************************
-void releaseNdbCon(NdbConnection* aNdbCon);
+void releaseNdbCon(NdbTransaction* aNdbCon);
-Parameters: aNdbCon: The NdbConnection object.
+Parameters: aNdbCon: The NdbTransaction object.
Remark: Add a Connection object into the signal idlelist.
***************************************************************************/
void
-Ndb::releaseNdbCon(NdbConnection* aNdbCon)
+Ndb::releaseNdbCon(NdbTransaction* aNdbCon)
{
aNdbCon->theMagicNumber = 0xFE11DD;
theImpl->theConIdleList.release(aNdbCon);
@@ -368,9 +360,20 @@ Remark: Add a NdbScanOperation object into the signal idlelist.
void
Ndb::releaseScanOperation(NdbIndexScanOperation* aScanOperation)
{
+ DBUG_ENTER("Ndb::releaseScanOperation");
+ DBUG_PRINT("enter", ("op=%x", (UintPtr)aScanOperation));
+#ifdef ndb_release_check_dup
+ { NdbIndexScanOperation* tOp = theScanOpIdleList;
+ while (tOp != NULL) {
+ assert(tOp != aScanOperation);
+ tOp = (NdbIndexScanOperation*)tOp->theNext;
+ }
+ }
+#endif
aScanOperation->theNdbCon = NULL;
aScanOperation->theMagicNumber = 0xFE11D2;
theImpl->theScanOpIdleList.release(aScanOperation);
+ DBUG_VOID_RETURN;
}
/***************************************************************************
@@ -426,18 +429,19 @@ Ndb::releaseSignalsInList(NdbApiSignal** pList){
void
Ndb::releaseNdbBlob(NdbBlob* aBlob)
{
+ aBlob->release();
theImpl->theNdbBlobIdleList.release(aBlob);
}
/****************************************************************************
-int releaseConnectToNdb(NdbConnection* aConnectConnection);
+int releaseConnectToNdb(NdbTransaction* aConnectConnection);
Return Value: -1 if error
Parameters: aConnectConnection : Seized schema connection to DBTC
Remark: Release and disconnect from DBTC a connection and seize it to theConIdleList.
*****************************************************************************/
void
-Ndb::releaseConnectToNdb(NdbConnection* a_con)
+Ndb::releaseConnectToNdb(NdbTransaction* a_con)
{
DBUG_ENTER("Ndb::releaseConnectToNdb");
NdbApiSignal tSignal(theMyRef);
@@ -455,7 +459,7 @@ Ndb::releaseConnectToNdb(NdbConnection* a_con)
tSignal.setData((tConPtr = a_con->getTC_ConnectPtr()), 1);
tSignal.setData(theMyRef, 2);
tSignal.setData(a_con->ptr2int(), 3);
- a_con->Status(NdbConnection::DisConnecting);
+ a_con->Status(NdbTransaction::DisConnecting);
a_con->theMagicNumber = 0x37412619;
int ret_code = sendRecSignal(node_id,
WAIT_TC_RELEASE,
diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp
index b6fb2d6cded..802e0785988 100644
--- a/ndb/src/ndbapi/TransporterFacade.cpp
+++ b/ndb/src/ndbapi/TransporterFacade.cpp
@@ -127,6 +127,10 @@ reportDisconnect(void * callbackObj, NodeId nodeId, Uint32 error){
//TransporterFacade::instance()->reportDisconnected(nodeId);
}
+void
+transporter_recv_from(void * callbackObj, NodeId nodeId){
+ ((TransporterFacade*)(callbackObj))->hb_received(nodeId);
+}
/****************************************************************************
*
@@ -529,43 +533,32 @@ TransporterFacade::init(Uint32 nodeId, const ndb_mgm_configuration* props)
iter.first();
theClusterMgr->init(iter);
- /**
- * Unless there is a "Name", the initiated transporter is within
- * an NDB Cluster. (If "Name" is defined, then the transporter
- * is used to connect to a different system, i.e. NDB Cluster.)
- */
-#if 0
- if (!props->contains("Name")) {
-#endif
- iter.first();
- if(iter.find(CFG_NODE_ID, nodeId)){
- TRP_DEBUG( "Node info missing from config." );
- DBUG_RETURN(false);
- }
-
- Uint32 rank = 0;
- if(!iter.get(CFG_NODE_ARBIT_RANK, &rank) && rank>0){
- theArbitMgr = new ArbitMgr(* this);
- theArbitMgr->setRank(rank);
- Uint32 delay = 0;
- iter.get(CFG_NODE_ARBIT_DELAY, &delay);
- theArbitMgr->setDelay(delay);
- }
- Uint32 scan_batch_size= 0;
- if (!iter.get(CFG_MAX_SCAN_BATCH_SIZE, &scan_batch_size)) {
- m_scan_batch_size= scan_batch_size;
- }
- Uint32 batch_byte_size= 0;
- if (!iter.get(CFG_BATCH_BYTE_SIZE, &batch_byte_size)) {
- m_batch_byte_size= batch_byte_size;
- }
- Uint32 batch_size= 0;
- if (!iter.get(CFG_BATCH_SIZE, &batch_size)) {
- m_batch_size= batch_size;
- }
-#if 0
+ iter.first();
+ if(iter.find(CFG_NODE_ID, nodeId)){
+ TRP_DEBUG( "Node info missing from config." );
+ DBUG_RETURN(false);
+ }
+
+ Uint32 rank = 0;
+ if(!iter.get(CFG_NODE_ARBIT_RANK, &rank) && rank>0){
+ theArbitMgr = new ArbitMgr(* this);
+ theArbitMgr->setRank(rank);
+ Uint32 delay = 0;
+ iter.get(CFG_NODE_ARBIT_DELAY, &delay);
+ theArbitMgr->setDelay(delay);
+ }
+ Uint32 scan_batch_size= 0;
+ if (!iter.get(CFG_MAX_SCAN_BATCH_SIZE, &scan_batch_size)) {
+ m_scan_batch_size= scan_batch_size;
+ }
+ Uint32 batch_byte_size= 0;
+ if (!iter.get(CFG_BATCH_BYTE_SIZE, &batch_byte_size)) {
+ m_batch_byte_size= batch_byte_size;
+ }
+ Uint32 batch_size= 0;
+ if (!iter.get(CFG_BATCH_SIZE, &batch_size)) {
+ m_batch_size= batch_size;
}
-#endif
if (!theTransporterRegistry->start_service(m_socket_server)){
ndbout_c("Unable to start theTransporterRegistry->start_service");
diff --git a/ndb/src/ndbapi/TransporterFacade.hpp b/ndb/src/ndbapi/TransporterFacade.hpp
index 99edea846c1..fa070889dd9 100644
--- a/ndb/src/ndbapi/TransporterFacade.hpp
+++ b/ndb/src/ndbapi/TransporterFacade.hpp
@@ -24,6 +24,7 @@
#include <NdbMutex.h>
#include "DictCache.hpp"
#include <BlockNumbers.h>
+#include <mgmapi.h>
class ClusterMgr;
class ArbitMgr;
@@ -43,10 +44,6 @@ extern "C" {
void atexit_stop_instance();
}
-/**
- * Max number of Ndb objects in different threads.
- * (Ndb objects should not be shared by different threads.)
- */
class TransporterFacade
{
public:
@@ -115,6 +112,11 @@ public:
Uint32 get_batch_byte_size();
Uint32 get_batch_size();
+ TransporterRegistry* get_registry() { return theTransporterRegistry;};
+
+ // heart beat received from a node (e.g. a signal came)
+ void hb_received(NodeId n);
+
private:
/**
* Send a signal unconditional of node status (used by ClusterMgr)
@@ -133,7 +135,7 @@ private:
bool isConnected(NodeId aNodeId);
void doStop();
-
+
TransporterRegistry* theTransporterRegistry;
SocketServer m_socket_server;
int sendPerformedLastInterval;
@@ -171,6 +173,10 @@ private:
* Block number handling
*/
public:
+ /**
+ * Max number of Ndb objects.
+ * (Ndb objects should not be shared by different threads.)
+ */
STATIC_CONST( MAX_NO_THREADS = 4711 );
private:
@@ -293,6 +299,12 @@ TransporterFacade::get_node_alive(NodeId n) const {
}
inline
+void
+TransporterFacade::hb_received(NodeId n) {
+ theClusterMgr->hb_received(n);
+}
+
+inline
bool
TransporterFacade::get_node_stopping(NodeId n) const {
const ClusterMgr::Node & node = theClusterMgr->getNodeInfo(n);
diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp
index 4fcf4d08396..7625da609b0 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection.cpp
+++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp
@@ -28,7 +28,8 @@
#include <ndb_limits.h>
#include <ConfigRetriever.hpp>
#include <ndb_version.h>
-#include <Vector.hpp>
+#include <mgmapi_debug.h>
+#include <mgmapi_internal.h>
#include <md5_hash.hpp>
#include <EventLogger.hpp>
@@ -180,6 +181,12 @@ Ndb_cluster_connection::no_db_nodes()
return m_impl.m_all_nodes.size();
}
+unsigned
+Ndb_cluster_connection::node_id()
+{
+ return m_impl.m_transporter_facade->ownId();
+}
+
int
Ndb_cluster_connection::wait_until_ready(int timeout,
@@ -253,22 +260,16 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char *
g_eventLogger.createConsoleHandler();
g_eventLogger.setCategory("NdbApi");
g_eventLogger.enable(Logger::LL_ON, Logger::LL_ERROR);
-
- m_transporter_facade=
- TransporterFacade::theFacadeInstance= new TransporterFacade();
m_connect_thread= 0;
m_connect_callback= 0;
if (ndb_global_event_buffer_mutex == NULL)
- {
ndb_global_event_buffer_mutex= NdbMutex_Create();
- }
+
#ifdef VM_TRACE
if (ndb_print_state_mutex == NULL)
- {
ndb_print_state_mutex= NdbMutex_Create();
- }
#endif
m_config_retriever=
new ConfigRetriever(connect_string, NDB_VERSION, NODE_TYPE_API);
@@ -280,13 +281,16 @@ Ndb_cluster_connection_impl::Ndb_cluster_connection_impl(const char *
m_config_retriever= 0;
}
+ m_transporter_facade=
+ TransporterFacade::theFacadeInstance=
+ new TransporterFacade();
+
DBUG_VOID_RETURN;
}
Ndb_cluster_connection_impl::~Ndb_cluster_connection_impl()
{
DBUG_ENTER("~Ndb_cluster_connection");
- DBUG_PRINT("enter",("~Ndb_cluster_connection this=0x%x", this));
TransporterFacade::stop_instance();
if (m_connect_thread)
{
@@ -304,10 +308,22 @@ Ndb_cluster_connection_impl::~Ndb_cluster_connection_impl()
TransporterFacade::theFacadeInstance= 0;
}
if (m_config_retriever)
+ {
delete m_config_retriever;
-
- // fragmentToNodeMap.release();
-
+ m_config_retriever= NULL;
+ }
+ if (ndb_global_event_buffer_mutex != NULL)
+ {
+ NdbMutex_Destroy(ndb_global_event_buffer_mutex);
+ ndb_global_event_buffer_mutex= NULL;
+ }
+#ifdef VM_TRACE
+ if (ndb_print_state_mutex != NULL)
+ {
+ NdbMutex_Destroy(ndb_print_state_mutex);
+ ndb_print_state_mutex= NULL;
+ }
+#endif
DBUG_VOID_RETURN;
}
@@ -465,6 +481,8 @@ Ndb_cluster_connection_impl::do_test()
int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds,
int verbose)
{
+ struct ndb_mgm_reply mgm_reply;
+
DBUG_ENTER("Ndb_cluster_connection::connect");
const char* error = 0;
do {
@@ -482,10 +500,24 @@ int Ndb_cluster_connection::connect(int no_retries, int retry_delay_in_seconds,
ndb_mgm_configuration * props = m_impl.m_config_retriever->getConfig();
if(props == 0)
break;
- m_impl.m_transporter_facade->start_instance(nodeId, props);
+ m_impl.m_transporter_facade->start_instance(nodeId, props);
m_impl.init_nodes_vector(nodeId, *props);
+ for(unsigned i=0;
+ i<m_impl.m_transporter_facade->get_registry()->m_transporter_interface.size();
+ i++)
+ ndb_mgm_set_connection_int_parameter(m_impl.m_config_retriever->get_mgmHandle(),
+ nodeId,
+ m_impl.m_transporter_facade->get_registry()
+ ->m_transporter_interface[i]
+ .m_remote_nodeId,
+ CFG_CONNECTION_SERVER_PORT,
+ m_impl.m_transporter_facade->get_registry()
+ ->m_transporter_interface[i]
+ .m_s_service_port,
+ &mgm_reply);
+
ndb_mgm_destroy_configuration(props);
m_impl.m_transporter_facade->connected();
DBUG_RETURN(0);
@@ -522,110 +554,5 @@ void Ndb_cluster_connection_impl::connect_thread()
DBUG_VOID_RETURN;
}
-/*
- * Hint handling to select node
- * ToDo: fix this
- */
-
-void
-Ndb_cluster_connection_impl::FragmentToNodeMap::init(Uint32 noOfNodes,
- Uint8 nodeIds[])
-{
- kValue = 6;
- noOfFragments = 2 * noOfNodes;
-
- /**
- * Compute hashValueMask and hashpointerValue
- */
- {
- Uint32 topBit = (1 << 31);
- for(int i = 31; i>=0; i--){
- if((noOfFragments & topBit) != 0)
- break;
- topBit >>= 1;
- }
- hashValueMask = topBit - 1;
- hashpointerValue = noOfFragments - (hashValueMask + 1);
- }
-
- /**
- * This initialization depends on
- * the fact that:
- * primary node for fragment i = i % noOfNodes
- *
- * This algorithm should be implemented in Dbdih
- */
- {
- if (fragment2PrimaryNodeMap != 0)
- abort();
-
- fragment2PrimaryNodeMap = new Uint32[noOfFragments];
- Uint32 i;
- for(i = 0; i<noOfNodes; i++){
- fragment2PrimaryNodeMap[i] = nodeIds[i];
- }
-
- // Sort them (bubble sort)
- for(i = 0; i<noOfNodes-1; i++)
- for(Uint32 j = i+1; j<noOfNodes; j++)
- if(fragment2PrimaryNodeMap[i] > fragment2PrimaryNodeMap[j]){
- Uint32 tmp = fragment2PrimaryNodeMap[i];
- fragment2PrimaryNodeMap[i] = fragment2PrimaryNodeMap[j];
- fragment2PrimaryNodeMap[j] = tmp;
- }
-
- for(i = 0; i<noOfNodes; i++){
- fragment2PrimaryNodeMap[i+noOfNodes] = fragment2PrimaryNodeMap[i];
- }
- }
-}
-
-void
-Ndb_cluster_connection_impl::FragmentToNodeMap::release(){
- delete [] fragment2PrimaryNodeMap;
- fragment2PrimaryNodeMap = 0;
-}
-
-static const Uint32 MAX_KEY_LEN_64_WORDS = 4;
-Uint32
-Ndb_cluster_connection_impl::guess_primary_node(const char *keyData,
- Uint32 keyLen)
-{
- Uint64 tempData[MAX_KEY_LEN_64_WORDS];
-
- const Uint32 usedKeyLen = (keyLen + 3) >> 2; // In words
- const char * usedKeyData = 0;
-
- /**
- * If key data buffer is not aligned (on 64 bit boundary)
- * or key len is not a multiple of 4
- * Use temp data
- */
- if(((((UintPtr)keyData) & 7) == 0) && ((keyLen & 3) == 0)) {
- usedKeyData = keyData;
- } else {
- memcpy(&tempData[0], keyData, keyLen);
- const int slack = keyLen & 3;
- if(slack > 0) {
- memset(&((char *)&tempData[0])[keyLen], 0, (4 - slack));
- }//if
- usedKeyData = (char *)&tempData[0];
- }//if
-
- Uint32 hashValue = md5_hash((Uint64 *)usedKeyData, usedKeyLen);
-
- hashValue >>= fragmentToNodeMap.kValue;
-
- Uint32 fragmentId = hashValue &
- fragmentToNodeMap.hashValueMask;
-
- if(fragmentId < fragmentToNodeMap.hashpointerValue) {
- fragmentId = hashValue &
- ((fragmentToNodeMap.hashValueMask << 1) + 1);
- }//if
- return fragmentId;
-}
-
-
template class Vector<Ndb_cluster_connection_impl::Node>;
diff --git a/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp b/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
index 620eac296a3..05652f3316a 100644
--- a/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
+++ b/ndb/src/ndbapi/ndb_cluster_connection_impl.hpp
@@ -54,22 +54,6 @@ private:
friend class NdbImpl;
friend void* run_ndb_cluster_connection_connect_thread(void*);
friend class Ndb_cluster_connection;
-
- /**
- * Structure containing values for guessing primary node
- */
- struct FragmentToNodeMap {
- FragmentToNodeMap():
- fragment2PrimaryNodeMap(0) {};
- Uint32 kValue;
- Uint32 hashValueMask;
- Uint32 hashpointerValue;
- Uint32 noOfFragments;
- Uint32 *fragment2PrimaryNodeMap;
-
- void init(Uint32 noOfNodes, Uint8 nodeIds[]);
- void release();
- } fragmentToNodeMap;
struct Node
{
@@ -85,8 +69,6 @@ private:
Vector<Node> m_all_nodes;
void init_nodes_vector(Uint32 nodeid, const ndb_mgm_configuration &config);
- Uint32 guess_primary_node(const char * keyData, Uint32 keyLen);
-
void connect_thread();
TransporterFacade *m_transporter_facade;
diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c
index 69fc47ff70c..3c3893c38ae 100644
--- a/ndb/src/ndbapi/ndberror.c
+++ b/ndb/src/ndbapi/ndberror.c
@@ -55,6 +55,8 @@ typedef struct ErrorBundle {
#define NI ndberror_cl_function_not_implemented
#define UE ndberror_cl_unknown_error_code
+#define OE ndberror_cl_schema_object_already_exists
+
static const char* empty_string = "";
/*
@@ -66,6 +68,7 @@ static const char* empty_string = "";
* 600 - ACC
* 700 - DICT
* 800 - TUP
+ * 900 - TUX
* 1200 - LQH
* 1300 - BACKUP
* 4000 - API
@@ -75,6 +78,7 @@ static const char* empty_string = "";
* 4400 - ""
* 4500 - ""
* 4600 - ""
+ * 4700 - "" Event
* 5000 - Management server
*/
@@ -174,11 +178,13 @@ ErrorBundle ErrorCodes[] = {
{ 623, IS, "623" },
{ 624, IS, "624" },
{ 625, IS, "Out of memory in Ndb Kernel, hash index part (increase IndexMemory)" },
- { 800, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
+ { 640, IS, "Too many hash indexes (should not happen)" },
{ 826, IS, "Too many tables and attributes (increase MaxNoOfAttributes or MaxNoOfTables)" },
{ 827, IS, "Out of memory in Ndb Kernel, table data (increase DataMemory)" },
{ 902, IS, "Out of memory in Ndb Kernel, ordered index data (increase DataMemory)" },
- { 832, IS, "832" },
+ { 903, IS, "Too many ordered indexes (increase MaxNoOfOrderedIndexes)" },
+ { 904, IS, "Out of fragment records (increase MaxNoOfOrderedIndexes)" },
+ { 905, IS, "Out of attribute records (increase MaxNoOfAttributes)" },
/**
* TimeoutExpired
@@ -204,6 +210,7 @@ ErrorBundle ErrorCodes[] = {
* Internal errors
*/
{ 892, IE, "Inconsistent hash index. The index needs to be dropped and recreated" },
+ { 896, IE, "Tuple corrupted - wrong checksum or column data in invalid format" },
{ 901, IE, "Inconsistent ordered index. The index needs to be dropped and recreated" },
{ 202, IE, "202" },
{ 203, IE, "203" },
@@ -259,6 +266,7 @@ ErrorBundle ErrorCodes[] = {
* Application error
*/
{ 823, AE, "Too much attrinfo from application in tuple manager" },
+ { 831, AE, "Too many nullable/bitfields in table definition" },
{ 876, AE, "876" },
{ 877, AE, "877" },
{ 878, AE, "878" },
@@ -270,7 +278,7 @@ ErrorBundle ErrorCodes[] = {
{ 897, AE, "Update attempt of primary key via ndbcluster internal api (if this occurs via the MySQL server it is a bug, please report)" },
{ 4256, AE, "Must call Ndb::init() before this function" },
{ 4257, AE, "Tried to read too much - too many getValue calls" },
-
+
/**
* Scan application errors
*/
@@ -289,6 +297,30 @@ ErrorBundle ErrorCodes[] = {
{ 4232, AE, "Parallelism can only be between 1 and 240" },
{ 290, AE, "Scan not started or has been closed by kernel due to timeout" },
+ /**
+ * Event schema errors
+ */
+
+ { 4713, SE, "Column defined in event does not exist in table"},
+
+ /**
+ * Event application errors
+ */
+
+ { 4707, AE, "Too many event have been defined"},
+ { 4708, AE, "Event name is too long"},
+ { 4709, AE, "Can't accept more subscribers"},
+ { 746, OE, "Event name already exists"},
+ { 4710, AE, "Event not found"},
+ { 4711, AE, "Creation of event failed"},
+ { 4712, AE, "Stopped event operation does not exist. Already stopped?"},
+
+ /**
+ * Event internal errors
+ */
+
+ { 4731, IE, "Event not found"},
+
/**
* SchemaError
*/
@@ -299,17 +331,17 @@ ErrorBundle ErrorCodes[] = {
{ 707, SE, "No more table metadata records (increase MaxNoOfTables)" },
{ 708, SE, "No more attribute metadata records (increase MaxNoOfAttributes)" },
{ 709, SE, "No such table existed" },
- { 721, SE, "Table or index with given name already exists" },
+ { 721, OE, "Table or index with given name already exists" },
{ 723, SE, "No such table existed" },
- { 736, SE, "Wrong attribute size" },
+ { 736, SE, "Unsupported array size" },
{ 737, SE, "Attribute array size too big" },
{ 738, SE, "Record too big" },
{ 739, SE, "Unsupported primary key length" },
{ 740, SE, "Nullable primary key not supported" },
{ 741, SE, "Unsupported alter table" },
- { 742, SE, "Unsupported attribute type in index" },
{ 743, SE, "Unsupported character set in table or index" },
{ 744, SE, "Character string is invalid for given character set" },
+ { 745, SE, "Distribution key not supported for char attribute (use binary attribute)" },
{ 761, SE, "Unable to drop table as backup is in progress" },
{ 762, SE, "Unable to alter table as backup is in progress" },
{ 241, SE, "Invalid schema object version" },
@@ -317,6 +349,9 @@ ErrorBundle ErrorCodes[] = {
{ 284, SE, "Table not defined in transaction coordinator" },
{ 285, SE, "Unknown table error in transaction coordinator" },
{ 881, SE, "Unable to create table, out of data pages (increase DataMemory) " },
+ { 906, SE, "Unsupported attribute type in index" },
+ { 907, SE, "Unsupported character set in table or index" },
+ { 908, IS, "Invalid ordered index tree node size" },
{ 1225, SE, "Table not defined in local query handler" },
{ 1226, SE, "Table is being dropped" },
{ 1228, SE, "Cannot use drop table for drop index" },
@@ -461,8 +496,7 @@ ErrorBundle ErrorCodes[] = {
{ 4241, AE, "Index name too long" },
{ 4242, AE, "Too many indexes" },
{ 4243, AE, "Index not found" },
- { 4244, AE, "Index or table with given name already exists" },
- { 4245, AE, "Index attribute must be defined as stored, i.e. the StorageAttributeType must be defined as NormalStorageAttribute"},
+ { 4244, OE, "Index or table with given name already exists" },
{ 4247, AE, "Illegal index/trigger create/drop/alter request" },
{ 4248, AE, "Trigger/index name invalid" },
{ 4249, AE, "Invalid table" },
@@ -483,9 +517,10 @@ ErrorBundle ErrorCodes[] = {
{ 4266, AE, "Invalid blob seek position" },
{ 4267, IE, "Corrupted blob value" },
{ 4268, IE, "Error in blob head update forced rollback of transaction" },
- { 4268, IE, "Unknown blob error" },
{ 4269, IE, "No connection to ndb management server" },
- { 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" }
+ { 4270, IE, "Unknown blob error" },
+ { 4335, AE, "Only one autoincrement column allowed per table. Having a table without primary key uses an autoincremented hidden key, i.e. a table without a primary key can not have an autoincremented column" },
+ { 4271, AE, "Invalid index object, not retrieved via getIndex()" }
};
static
diff --git a/ndb/test/include/HugoCalculator.hpp b/ndb/test/include/HugoCalculator.hpp
index b782eb003a3..03de46cd7ea 100644
--- a/ndb/test/include/HugoCalculator.hpp
+++ b/ndb/test/include/HugoCalculator.hpp
@@ -31,14 +31,7 @@ class HugoCalculator {
public:
HugoCalculator(const NdbDictionary::Table& tab);
Int32 calcValue(int record, int attrib, int updates) const;
-#if 0
- U_Int32 calcValue(int record, int attrib, int updates) const;
- U_Int64 calcValue(int record, int attrib, int updates) const;
- Int64 calcValue(int record, int attrib, int updates) const;
- float calcValue(int record, int attrib, int updates) const;
- double calcValue(int record, int attrib, int updates) const;
-#endif
- const char* calcValue(int record, int attrib, int updates, char* buf) const;
+ const char* calcValue(int record, int attrib, int updates, char* buf, int len) const;
int verifyRowValues(NDBT_ResultRow* const pRow) const;
int getIdValue(NDBT_ResultRow* const pRow) const;
diff --git a/ndb/test/include/HugoOperations.hpp b/ndb/test/include/HugoOperations.hpp
index 34b2edc2ae8..82fd5529fa2 100644
--- a/ndb/test/include/HugoOperations.hpp
+++ b/ndb/test/include/HugoOperations.hpp
@@ -24,11 +24,14 @@
class HugoOperations : public UtilTransactions {
public:
- HugoOperations(const NdbDictionary::Table&);
+ HugoOperations(const NdbDictionary::Table&,
+ const NdbDictionary::Index* idx = 0);
+
~HugoOperations();
int startTransaction(Ndb*);
+ int setTransaction(NdbTransaction*);
int closeTransaction(Ndb*);
- NdbConnection* getTransaction();
+ NdbTransaction* getTransaction();
void refresh();
void setTransactionId(Uint64);
@@ -42,6 +45,10 @@ public:
int recordNo,
int numRecords = 1,
int updatesValue = 0);
+
+ int pkWritePartialRecord(Ndb*,
+ int recordNo,
+ int numRecords = 1);
int pkReadRecord(Ndb*,
int recordNo,
@@ -73,10 +80,13 @@ public:
int attrId,
int rowId,
int updateId);
+
int equalForAttr(NdbOperation*,
int attrId,
int rowId);
-
+
+ int setValues(NdbOperation*, int rowId, int updateId);
+
int verifyUpdatesValue(int updatesValue, int _numRows = 0);
int indexReadRecords(Ndb*, const char * idxName, int recordNo,
@@ -93,8 +103,11 @@ public:
NdbScanOperation::LM_CommittedRead,
int numRecords = 1);
+ NdbIndexScanOperation* pIndexScanOp;
- int execute_async(Ndb*, ExecType, AbortOption = AbortOnError);
+ NDBT_ResultRow& get_row(Uint32 idx) { return *rows[idx];}
+
+ int execute_async(Ndb*, NdbTransaction::ExecType, NdbTransaction::AbortOption = NdbTransaction::AbortOnError);
int wait_async(Ndb*, int timeout = -1);
protected:
@@ -106,16 +119,14 @@ protected:
Vector<BaseString> savedRecords;
- struct RsPair { NdbResultSet* m_result_set; int records; };
+ struct RsPair { NdbScanOperation* m_result_set; int records; };
Vector<RsPair> m_result_sets;
Vector<RsPair> m_executed_result_sets;
- NdbConnection* pTrans;
-
int m_async_reply;
int m_async_return;
- friend void HugoOperations_async_callback(int, NdbConnection*, void*);
- void callback(int res, NdbConnection*);
+ friend void HugoOperations_async_callback(int, NdbTransaction*, void*);
+ void callback(int res, NdbTransaction*);
};
#endif
diff --git a/ndb/test/include/HugoTransactions.hpp b/ndb/test/include/HugoTransactions.hpp
index b833f2ac629..5795bbc94c9 100644
--- a/ndb/test/include/HugoTransactions.hpp
+++ b/ndb/test/include/HugoTransactions.hpp
@@ -25,7 +25,8 @@
class HugoTransactions : public HugoOperations {
public:
- HugoTransactions(const NdbDictionary::Table&);
+ HugoTransactions(const NdbDictionary::Table&,
+ const NdbDictionary::Index* idx = 0);
~HugoTransactions();
int createEvent(Ndb*);
int eventOperation(Ndb*, void* stats,
@@ -41,7 +42,8 @@ public:
int records,
int abort = 0,
int parallelism = 0,
- NdbOperation::LockMode = NdbOperation::LM_Read);
+ NdbOperation::LockMode = NdbOperation::LM_Read,
+ int scan_flags = 0);
int scanReadRecords(Ndb*,
const NdbDictionary::Index*,
@@ -49,7 +51,7 @@ public:
int abort = 0,
int parallelism = 0,
NdbOperation::LockMode = NdbOperation::LM_Read,
- bool sorted = false);
+ int scan_flags = 0);
int pkReadRecords(Ndb*,
int records,
diff --git a/ndb/test/include/NDBT_Error.hpp b/ndb/test/include/NDBT_Error.hpp
index ef107072465..6775a107196 100644
--- a/ndb/test/include/NDBT_Error.hpp
+++ b/ndb/test/include/NDBT_Error.hpp
@@ -91,7 +91,11 @@ private:
; \
}
-#define ERR(error) ERR_OUT(g_err, error)
+#define ERR(error) \
+{ \
+ const NdbError &_error= (error); \
+ ERR_OUT(g_err, _error); \
+}
#define ERR_INFO(error) ERR_OUT(g_info, error)
#endif
diff --git a/ndb/test/include/NDBT_ResultRow.hpp b/ndb/test/include/NDBT_ResultRow.hpp
index 6072d0ea510..cbb5d7f6c6a 100644
--- a/ndb/test/include/NDBT_ResultRow.hpp
+++ b/ndb/test/include/NDBT_ResultRow.hpp
@@ -27,7 +27,7 @@ public:
const NdbRecAttr * attributeStore(int i) const ;
const NdbRecAttr * attributeStore(const char* name) const ;
- BaseString c_str();
+ BaseString c_str() const ;
NdbOut & header (NdbOut &) const;
friend NdbOut & operator << (NdbOut&, const NDBT_ResultRow &);
@@ -36,6 +36,11 @@ public:
* Make copy of NDBT_ResultRow
*/
NDBT_ResultRow * clone() const;
+
+ bool operator==(const NDBT_ResultRow&) const ;
+ bool operator!=(const NDBT_ResultRow& other) const {
+ return ! (*this == other);
+ }
private:
int cols;
diff --git a/ndb/test/include/NDBT_Tables.hpp b/ndb/test/include/NDBT_Tables.hpp
index aa78f7d4e2c..fb0df8aa35b 100644
--- a/ndb/test/include/NDBT_Tables.hpp
+++ b/ndb/test/include/NDBT_Tables.hpp
@@ -23,11 +23,13 @@
#include <NdbDictionary.hpp>
#include <NDBT_Table.hpp>
+typedef int (* NDBT_CreateTableHook)(Ndb*, NdbDictionary::Table&, int when);
+
class NDBT_Tables {
public:
-
+
static int createTable(Ndb* pNdb, const char* _name, bool _temp = false,
- bool existsOK = false);
+ bool existsOK = false, NDBT_CreateTableHook = 0);
static int createAllTables(Ndb* pNdb, bool _temp, bool existsOK = false);
static int createAllTables(Ndb* pNdb);
diff --git a/ndb/test/include/NDBT_Test.hpp b/ndb/test/include/NDBT_Test.hpp
index 44eb24cd87e..027ac356e0c 100644
--- a/ndb/test/include/NDBT_Test.hpp
+++ b/ndb/test/include/NDBT_Test.hpp
@@ -25,6 +25,7 @@
#include <NdbCondition.h>
#include <NdbTimer.hpp>
#include <Vector.hpp>
+#include <NdbApi.hpp>
#include <NdbDictionary.hpp>
class NDBT_Step;
@@ -34,7 +35,9 @@ class NDBT_TestCaseImpl1;
class NDBT_Context {
public:
- NDBT_Context();
+ Ndb_cluster_connection& m_cluster_connection;
+
+ NDBT_Context(Ndb_cluster_connection&);
~NDBT_Context();
const NdbDictionary::Table* getTab();
NDBT_TestSuite* getSuite();
@@ -121,7 +124,7 @@ public:
NDBT_TESTFUNC* pfunc);
virtual ~NDBT_Step() {}
int execute(NDBT_Context*);
- virtual int setUp() = 0;
+ virtual int setUp(Ndb_cluster_connection&) = 0;
virtual void tearDown() = 0;
void setContext(NDBT_Context*);
NDBT_Context* getContext();
@@ -143,7 +146,7 @@ public:
const char* pname,
NDBT_TESTFUNC* pfunc);
virtual ~NDBT_NdbApiStep() {}
- virtual int setUp();
+ virtual int setUp(Ndb_cluster_connection&);
virtual void tearDown();
Ndb* getNdb();
@@ -350,10 +353,13 @@ public:
int addTest(NDBT_TestCase* pTest);
private:
- int executeOne(const char* _tabname, const char* testname = NULL);
- int executeAll(const char* testname = NULL);
-
- void execute(Ndb*, const NdbDictionary::Table*, const char* testname = NULL);
+ int executeOne(Ndb_cluster_connection&,
+ const char* _tabname, const char* testname = NULL);
+ int executeAll(Ndb_cluster_connection&,
+ const char* testname = NULL);
+ void execute(Ndb_cluster_connection&,
+ Ndb*, const NdbDictionary::Table*, const char* testname = NULL);
+
int report(const char* _tcname = NULL);
int reportAllTables(const char* );
const char* name;
diff --git a/ndb/test/include/NdbSchemaOp.hpp b/ndb/test/include/NdbSchemaOp.hpp
index e8ab542b00a..1edbc155643 100644
--- a/ndb/test/include/NdbSchemaOp.hpp
+++ b/ndb/test/include/NdbSchemaOp.hpp
@@ -79,29 +79,6 @@
};
/**
- * Where attribute is stored.
- *
- * This is used to indicate whether a primary key
- * should only be stored in the index storage and not in the data storage
- * or if it should be stored in both places.
- * The first alternative makes the attribute take less space,
- * but makes it impossible to scan using attribute.
- *
- * @note Use NormalStorageAttribute for most cases.
- * (IndexStorageAttribute should only be used on primary key
- * attributes and only if you do not want to scan using the attribute.)
- */
- enum StorageAttributeType {
- NoStorageAttributeTypeDefined = -1, ///< <i>Missing explanation</i>
- IndexStorageAttribute, ///< Attribute is only stored in
- ///< index storage (ACC)
- NormalStorageAttribute ///< Attribute values are stored
- ///< both in the index (ACC) and
- ///< in the data storage (TUP)
- };
-
-
- /**
* Type of fragmentation used for a table
*/
enum FragmentType {
@@ -405,27 +382,7 @@ public:
* the attribute.
* <br>
* Legal values: true, false
- * @param aStType Stored in both index and data storage or
- * only store in index data storage.
- * <br>
- * This parameter is only of interest for tuple
- * key attributes.
- * All tuple key attributes values are always stored
- * in the index storage part.
- * If this parameter is set to
- * IndexStorageAttribute, then the attribute values
- * will <em>only</em> be stored in the index
- * storage part and <em>not</em> in the data
- * storage part.
- * <br>
- * If there will be no scans using the primary
- * key attribute and if the size of the attribute
- * is large, then this might be of interest.
- * A typical example is a table where
- * http-addresses are used as primary key.
- * <br>
- * Legal values: NormalStorageAttribute,
- * IndexStorageAttribute
+ * @param aStType Obsolete since wl-2066
* @param aDistributionKey Sometimes it is preferable to use a subset
* of the primary key as the distribution key.
* An example is TPC-C where it might be
@@ -474,7 +431,7 @@ public:
AttrType aAttrType = UnSigned,
StorageMode aStorageMode = MMBased,
bool nullable = false,
- StorageAttributeType aStType= NormalStorageAttribute,
+ int aStType= 0, // obsolete
int aDistributionKey = 0,
int aDistributionGroup = 0,
int aDistributionGroupNoOfBits = 16,
@@ -491,7 +448,7 @@ public:
AttrType aAttrType,
StorageMode aStorageMode,
NullAttributeType aNullAttr,
- StorageAttributeType aStType = NormalStorageAttribute,
+ int aStType, // obsolete
int aDistributionKey = 0,
int aDistributionGroup = 0,
int aDistributionGroupNoOfBits = 16){
@@ -569,6 +526,8 @@ convertColumnTypeToAttrType(NdbDictionary::Column::Type _type)
case NdbDictionary::Column::Float:
case NdbDictionary::Column::Olddecimal:
case NdbDictionary::Column::Olddecimalunsigned:
+ case NdbDictionary::Column::Decimal:
+ case NdbDictionary::Column::Decimalunsigned:
case NdbDictionary::Column::Double:
return Float;
case NdbDictionary::Column::Char:
diff --git a/ndb/test/include/UtilTransactions.hpp b/ndb/test/include/UtilTransactions.hpp
index 23902f3b317..333f5d98328 100644
--- a/ndb/test/include/UtilTransactions.hpp
+++ b/ndb/test/include/UtilTransactions.hpp
@@ -23,15 +23,13 @@ typedef int (ReadCallBackFn)(NDBT_ResultRow*);
class UtilTransactions {
public:
- enum ScanLock {
- SL_Read = 0,
- SL_ReadHold = 1,
- SL_Exclusive = 2
- };
-
- UtilTransactions(const NdbDictionary::Table&);
- UtilTransactions(Ndb* ndb, const char * tableName);
-
+ UtilTransactions(const NdbDictionary::Table&,
+ const NdbDictionary::Index* idx = 0);
+ UtilTransactions(Ndb* ndb,
+ const char * tableName, const char * indexName = 0);
+
+ int closeTransaction(Ndb*);
+
int clearTable(Ndb*,
int records = 0,
int parallelism = 0);
@@ -70,6 +68,14 @@ public:
int copyTableData(Ndb*,
const char* destName);
+ /**
+ * Compare this table with other_table
+ *
+ * return 0 - on equality
+ * -1 - on error
+ * >0 - otherwise
+ */
+ int compare(Ndb*, const char * other_table, int flags);
private:
static int takeOverAndDeleteRecord(Ndb*,
@@ -114,6 +120,12 @@ private:
protected:
int m_defaultClearMethod;
const NdbDictionary::Table& tab;
+ const NdbDictionary::Index* idx;
+ NdbConnection* pTrans;
+
+ NdbOperation* getOperation(NdbConnection*,
+ NdbOperation::OperationType);
+ NdbScanOperation* getScanOperation(NdbConnection*);
};
#endif
diff --git a/ndb/test/ndbapi/Makefile.am b/ndb/test/ndbapi/Makefile.am
index 7b4a96f5890..d83e9614eb5 100644
--- a/ndb/test/ndbapi/Makefile.am
+++ b/ndb/test/ndbapi/Makefile.am
@@ -31,7 +31,10 @@ testTimeout \
testTransactions \
testDeadlock \
test_event ndbapi_slow_select testReadPerf testLcp \
+testPartitioning \
+testBitfield \
DbCreate DbAsyncGenerator \
+test_event_multi_table \
testSRBank
#flexTimedAsynch
@@ -71,8 +74,11 @@ test_event_SOURCES = test_event.cpp
ndbapi_slow_select_SOURCES = slow_select.cpp
testReadPerf_SOURCES = testReadPerf.cpp
testLcp_SOURCES = testLcp.cpp
-DbCreate_SOURCES= bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp bench/dbPopulate.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
-DbAsyncGenerator_SOURCES= bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp bench/dbGenerator.h bench/macros.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
+testPartitioning_SOURCES = testPartitioning.cpp
+testBitfield_SOURCES = testBitfield.cpp
+DbCreate_SOURCES = bench/mainPopulate.cpp bench/dbPopulate.cpp bench/userInterface.cpp bench/dbPopulate.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
+DbAsyncGenerator_SOURCES = bench/mainAsyncGenerator.cpp bench/asyncGenerator.cpp bench/ndb_async2.cpp bench/dbGenerator.h bench/macros.h bench/userInterface.h bench/testData.h bench/testDefinitions.h bench/ndb_schema.hpp bench/ndb_error.hpp
+test_event_multi_table_SOURCES = test_event_multi_table.cpp
testSRBank_SOURCES = testSRBank.cpp
INCLUDES_LOC = -I$(top_srcdir)/ndb/include/kernel
@@ -90,6 +96,8 @@ testSRBank_LDADD = bank/libbank.a $(LDADD)
# Don't update the files from bitkeeper
%::SCCS/s.%
+
+
windoze-dsp: flexBench.dsp testBasic.dsp testBlobs.dsp \
testScan.dsp
diff --git a/ndb/test/ndbapi/ScanFunctions.hpp b/ndb/test/ndbapi/ScanFunctions.hpp
index 6964f8c73a8..37389d9b7de 100644
--- a/ndb/test/ndbapi/ScanFunctions.hpp
+++ b/ndb/test/ndbapi/ScanFunctions.hpp
@@ -81,7 +81,6 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
int check;
NdbConnection *pTrans = 0;
NdbScanOperation *pOp = 0;
- NdbResultSet *rs = 0;
while (true){
if (retryAttempt >= retryMax){
@@ -111,12 +110,9 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
return NDBT_FAILED;
}
-
- rs = pOp->readTuples(exclusive ?
- NdbScanOperation::LM_Exclusive :
- NdbScanOperation::LM_Read);
-
- if( rs == 0 ) {
+ if( pOp->readTuples(exclusive ?
+ NdbScanOperation::LM_Exclusive :
+ NdbScanOperation::LM_Read) ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -125,8 +121,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
if (action == OnlyOpenScanOnce){
// Call openScan one more time when it's already defined
- NdbResultSet* rs2 = pOp->readTuples(NdbScanOperation::LM_Read);
- if( rs2 == 0 ) {
+ if( pOp->readTuples(NdbScanOperation::LM_Read) ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -168,7 +163,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
bool abortTrans = (action==CloseWithoutStop);
int eof;
int rows = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
rows++;
@@ -178,7 +173,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
if (action != CloseWithoutStop){
// Test that we can closeTrans without stopScan
- rs->close();
+ pOp->close();
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -201,7 +196,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
}
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
@@ -211,7 +206,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
// Be cruel, call nextScanResult after error
for(int i=0; i<10; i++){
- eof = rs->nextResult();
+ eof = pOp->nextResult();
if(eof == 0){
g_err << "nextScanResult returned eof = " << eof << endl
<< " That is an error when there are no more records" << endl;
@@ -241,7 +236,7 @@ ScanFunctions::scanReadFunctions(Ndb* pNdb,
if (action == NextScanWhenNoMore){
g_info << "Calling nextScanresult when there are no more records" << endl;
for(int i=0; i<10; i++){
- eof = rs->nextResult();
+ eof = pOp->nextResult();
if(eof == 0){
g_err << "nextScanResult returned eof = " << eof << endl
<< " That is an error when there are no more records" << endl;
diff --git a/ndb/test/ndbapi/ScanInterpretTest.hpp b/ndb/test/ndbapi/ScanInterpretTest.hpp
index e8a0d4b6dca..d4e9bbecc81 100644
--- a/ndb/test/ndbapi/ScanInterpretTest.hpp
+++ b/ndb/test/ndbapi/ScanInterpretTest.hpp
@@ -227,10 +227,7 @@ ScanInterpretTest::scanRead(Ndb* pNdb,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples(NdbScanOperation::LM_Read,
- 0, parallelism);
-
- if( rs == 0 ) {
+ if( pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism) ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -262,14 +259,14 @@ ScanInterpretTest::scanRead(Ndb* pNdb,
int rows = 0;
NdbConnection* pInsTrans;
- while((eof = rs->nextResult(true)) == 0){
+ while((eof = pOp->nextResult(true)) == 0){
do {
rows++;
if (addRowToInsert(pNdb, pTrans) != 0){
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
}
- } while((eof = rs->nextResult(false)) == 0);
+ } while((eof = pOp->nextResult(false)) == 0);
check = pTrans->execute(Commit);
if( check == -1 ) {
@@ -349,9 +346,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples(NdbScanOperation::LM_Read,
- 0, parallelism);
- if( rs == 0 ) {
+ if( pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism) ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -392,7 +387,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
NdbConnection* pExistTrans;
NdbConnection* pNoExistTrans;
- while((eof = rs->nextResult(true)) == 0){
+ while((eof = pOp->nextResult(true)) == 0){
pExistTrans = pNdb->startTransaction();
if (pExistTrans == NULL) {
const NdbError err = pNdb->getNdbError();
@@ -424,7 +419,7 @@ ScanInterpretTest::scanReadVerify(Ndb* pNdb,
return NDBT_FAILED;
}
}
- } while((eof = rs->nextResult(false)) == 0);
+ } while((eof = pOp->nextResult(false)) == 0);
// Execute the transaction containing reads of
diff --git a/ndb/test/ndbapi/bank/Bank.cpp b/ndb/test/ndbapi/bank/Bank.cpp
index 346442367fc..37224fdd055 100644
--- a/ndb/test/ndbapi/bank/Bank.cpp
+++ b/ndb/test/ndbapi/bank/Bank.cpp
@@ -19,8 +19,8 @@
#include <NdbSleep.h>
#include <UtilTransactions.hpp>
-Bank::Bank(bool _init):
- m_ndb("BANK"),
+Bank::Bank(Ndb_cluster_connection& con, bool _init):
+ m_ndb(&con, "BANK"),
m_maxAccount(-1),
m_initialized(false)
{
@@ -661,8 +661,7 @@ int Bank::findLastGL(Uint64 &lastTime){
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples();
- if( rs == 0 ) {
+ if( pOp->readTuples() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -691,7 +690,7 @@ int Bank::findLastGL(Uint64 &lastTime){
int eof;
int rows = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
lastTime = 0;
while(eof == 0){
@@ -701,7 +700,7 @@ int Bank::findLastGL(Uint64 &lastTime){
if (t > lastTime)
lastTime = t;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -993,8 +992,7 @@ int Bank::sumTransactionsForGL(const Uint64 glTime,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuplesExclusive();
- if( rs == 0 ) {
+ if( pOp->readTuplesExclusive()) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1045,7 +1043,7 @@ int Bank::sumTransactionsForGL(const Uint64 glTime,
int eof;
int rows = 0;
int rowsFound = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
rows++;
@@ -1069,7 +1067,7 @@ int Bank::sumTransactionsForGL(const Uint64 glTime,
}
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
if ((rows % 100) == 0){
// "refresh" ownner transaction every 100th row
@@ -1153,8 +1151,7 @@ int Bank::performValidateGL(Uint64 glTime){
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples();
- if( rs == 0 ) {
+ if( pOp->readTuples() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1233,7 +1230,7 @@ int Bank::performValidateGL(Uint64 glTime){
int rows = 0;
int countGlRecords = 0;
int result = NDBT_OK;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
rows++;
@@ -1320,7 +1317,7 @@ int Bank::performValidateGL(Uint64 glTime){
}
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1417,8 +1414,7 @@ int Bank::getOldestPurgedGL(const Uint32 accountType,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples();
- if( rs == 0 ) {
+ if( pOp->readTuples() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1461,7 +1457,7 @@ int Bank::getOldestPurgedGL(const Uint32 accountType,
int eof;
int rows = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
oldest = 0;
while(eof == 0){
@@ -1475,7 +1471,7 @@ int Bank::getOldestPurgedGL(const Uint32 accountType,
if (t > oldest)
oldest = t;
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1509,8 +1505,7 @@ int Bank::getOldestNotPurgedGL(Uint64 &oldest,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples();
- if( rs == 0 ) {
+ if( pOp->readTuples() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1553,7 +1548,7 @@ int Bank::getOldestNotPurgedGL(Uint64 &oldest,
int eof;
int rows = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
oldest = (Uint64)-1;
found = false;
@@ -1570,7 +1565,7 @@ int Bank::getOldestNotPurgedGL(Uint64 &oldest,
accountTypeId = a;
}
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1606,8 +1601,7 @@ int Bank::checkNoTransactionsOlderThan(const Uint32 accountType,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples();
- if( rs == 0 ) {
+ if( pOp->readTuples() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1651,7 +1645,7 @@ int Bank::checkNoTransactionsOlderThan(const Uint32 accountType,
int eof;
int rows = 0;
int found = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
rows++;
@@ -1667,7 +1661,7 @@ int Bank::checkNoTransactionsOlderThan(const Uint32 accountType,
<< " ti = " << ti << endl;
found++;
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -1850,8 +1844,7 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuplesExclusive();
- if( rs == 0 ) {
+ if( pOp->readTuplesExclusive() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -1888,7 +1881,7 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
int eof;
int rows = 0;
int rowsFound = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
rows++;
@@ -1898,7 +1891,7 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
if (a == accountType && t == glTime){
rowsFound++;
// One record found
- check = rs->deleteTuple(pTrans);
+ check = pOp->deleteCurrentTuple(pTrans);
if (check == -1){
ERR(m_ndb.getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -1913,7 +1906,7 @@ int Bank::findTransactionsToPurge(const Uint64 glTime,
return NDBT_FAILED;
}
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
@@ -2371,8 +2364,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuplesExclusive();
- if( rs == 0 ) {
+ if( pOp->readTuplesExclusive() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -2407,7 +2399,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
}
int eof;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
Uint32 b = balanceRec->u_32_value();
@@ -2419,7 +2411,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
// << ", sum="<< sumAccounts << endl;
// Take over the operation so that the lock is kept in db
- NdbOperation* pLockOp = rs->updateTuple(pTrans);
+ NdbOperation* pLockOp = pOp->updateCurrentTuple(pTrans);
if (pLockOp == NULL){
ERR(m_ndb.getNdbError());
m_ndb.closeTransaction(pScanTrans);
@@ -2445,7 +2437,7 @@ int Bank::getSumAccounts(Uint32 &sumAccounts,
return NDBT_FAILED;
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
diff --git a/ndb/test/ndbapi/bank/Bank.hpp b/ndb/test/ndbapi/bank/Bank.hpp
index 14e01df29d5..b80f02dae97 100644
--- a/ndb/test/ndbapi/bank/Bank.hpp
+++ b/ndb/test/ndbapi/bank/Bank.hpp
@@ -27,7 +27,7 @@
class Bank {
public:
- Bank(bool init = true);
+ Bank(Ndb_cluster_connection&, bool init = true);
int createAndLoadBank(bool overWrite, int num_accounts=10);
int dropBank();
diff --git a/ndb/test/ndbapi/bank/BankLoad.cpp b/ndb/test/ndbapi/bank/BankLoad.cpp
index 2cc42240234..34947019a51 100644
--- a/ndb/test/ndbapi/bank/BankLoad.cpp
+++ b/ndb/test/ndbapi/bank/BankLoad.cpp
@@ -342,8 +342,7 @@ int Bank::getBalanceForAccountType(const Uint32 accountType,
return NDBT_FAILED;
}
- NdbResultSet* rs = pOp->readTuples();
- if( rs == 0 ) {
+ if( pOp->readTuples() ) {
ERR(pScanTrans->getNdbError());
m_ndb.closeTransaction(pScanTrans);
return NDBT_FAILED;
@@ -379,7 +378,7 @@ int Bank::getBalanceForAccountType(const Uint32 accountType,
int eof;
int rows = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
rows++;
@@ -391,7 +390,7 @@ int Bank::getBalanceForAccountType(const Uint32 accountType,
balance += b;
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
ERR(pScanTrans->getNdbError());
diff --git a/ndb/test/ndbapi/bank/bankCreator.cpp b/ndb/test/ndbapi/bank/bankCreator.cpp
index 301d8bda6d2..257255babc8 100644
--- a/ndb/test/ndbapi/bank/bankCreator.cpp
+++ b/ndb/test/ndbapi/bank/bankCreator.cpp
@@ -43,7 +43,13 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Bank bank;
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Bank bank(con);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/ndbapi/bank/bankMakeGL.cpp b/ndb/test/ndbapi/bank/bankMakeGL.cpp
index 9e2762ed8ae..cf373481e3e 100644
--- a/ndb/test/ndbapi/bank/bankMakeGL.cpp
+++ b/ndb/test/ndbapi/bank/bankMakeGL.cpp
@@ -43,7 +43,13 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Bank bank;
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Bank bank(con);
if (bank.performMakeGLs() != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/ndbapi/bank/bankSumAccounts.cpp b/ndb/test/ndbapi/bank/bankSumAccounts.cpp
index b576161b27b..034f70f8f95 100644
--- a/ndb/test/ndbapi/bank/bankSumAccounts.cpp
+++ b/ndb/test/ndbapi/bank/bankSumAccounts.cpp
@@ -43,7 +43,13 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Bank bank;
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Bank bank(con);
if (bank.performSumAccounts() != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/ndbapi/bank/bankTimer.cpp b/ndb/test/ndbapi/bank/bankTimer.cpp
index 874afd9c21e..298f85e1e43 100644
--- a/ndb/test/ndbapi/bank/bankTimer.cpp
+++ b/ndb/test/ndbapi/bank/bankTimer.cpp
@@ -46,7 +46,13 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Bank bank;
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Bank bank(con);
if (bank.performIncreaseTime(_wait) != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
index e5ff9aeb918..f8e646b6553 100644
--- a/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
+++ b/ndb/test/ndbapi/bank/bankTransactionMaker.cpp
@@ -46,7 +46,13 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Bank bank;
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Bank bank(con);
if (bank.performTransactions(_wait) != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
index cf298ecc8e3..0c268121d8a 100644
--- a/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
+++ b/ndb/test/ndbapi/bank/bankValidateAllGLs.cpp
@@ -44,7 +44,13 @@ int main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Bank bank;
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Bank bank(con);
if (bank.performValidateAllGLs() != 0)
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/ndbapi/bank/testBank.cpp b/ndb/test/ndbapi/bank/testBank.cpp
index 3ef2799cd3c..6be66d528b1 100644
--- a/ndb/test/ndbapi/bank/testBank.cpp
+++ b/ndb/test/ndbapi/bank/testBank.cpp
@@ -32,7 +32,7 @@
#include "Bank.hpp"
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting) != NDBT_OK)
return NDBT_FAILED;
@@ -40,7 +40,7 @@ int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 30; // Max seconds between each "day"
int yield = 1; // Loops before bank returns
@@ -51,7 +51,7 @@ int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 10; // Max ms between each transaction
int yield = 100; // Loops before bank returns
@@ -62,7 +62,7 @@ int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int yield = 20; // Loops before bank returns
int result = NDBT_OK;
@@ -76,7 +76,7 @@ int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 2000; // Max ms between each sum of accounts
int yield = 1; // Loops before bank returns
int result = NDBT_OK;
@@ -91,7 +91,7 @@ int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
}
int runDropBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
if (bank.dropBank() != NDBT_OK)
return NDBT_FAILED;
return NDBT_OK;
diff --git a/ndb/test/ndbapi/bench/userInterface.cpp b/ndb/test/ndbapi/bench/userInterface.cpp
index 683552c3133..35e88183230 100644
--- a/ndb/test/ndbapi/bench/userInterface.cpp
+++ b/ndb/test/ndbapi/bench/userInterface.cpp
@@ -173,7 +173,7 @@ create_table_server(Ndb * pNdb){
String,
MMBased,
NotNullAttribute,
- NormalStorageAttribute,
+ 0,
0,
1,
16);
@@ -376,7 +376,7 @@ create_table_subscriber(Ndb * pNdb){
String,
MMBased,
NotNullAttribute,
- NormalStorageAttribute,
+ 0,
0,
1,
16);
@@ -494,7 +494,7 @@ create_table_session(Ndb * pNdb){
String,
MMBased,
NotNullAttribute,
- NormalStorageAttribute,
+ 0,
0,
1,
16);
diff --git a/ndb/test/ndbapi/bench/userInterface.h b/ndb/test/ndbapi/bench/userInterface.h
index 9e3b6f8f2a5..bad61fcf171 100644
--- a/ndb/test/ndbapi/bench/userInterface.h
+++ b/ndb/test/ndbapi/bench/userInterface.h
@@ -101,7 +101,7 @@ extern "C" {
typedef struct {
struct Ndb_cluster_connection* pNCC;
struct Ndb * pNDB;
- struct NdbConnection * pCurrTrans;
+ struct NdbTransaction * pCurrTrans;
} UserHandle;
/***************************************************************
diff --git a/ndb/test/ndbapi/create_all_tabs.cpp b/ndb/test/ndbapi/create_all_tabs.cpp
index 97236b98b36..f06078d67a2 100644
--- a/ndb/test/ndbapi/create_all_tabs.cpp
+++ b/ndb/test/ndbapi/create_all_tabs.cpp
@@ -47,7 +47,12 @@ int main(int argc, const char** argv){
}
// Connect to Ndb
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/ndbapi/create_tab.cpp b/ndb/test/ndbapi/create_tab.cpp
index 283c83d30e0..b35c8655236 100644
--- a/ndb/test/ndbapi/create_tab.cpp
+++ b/ndb/test/ndbapi/create_tab.cpp
@@ -77,8 +77,12 @@ int main(int argc, const char** argv){
*/
// Connect to Ndb
- Ndb::setConnectString(_connectstr);
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con(_connectstr);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/ndbapi/drop_all_tabs.cpp b/ndb/test/ndbapi/drop_all_tabs.cpp
index c024a81a5e6..f12d750916e 100644
--- a/ndb/test/ndbapi/drop_all_tabs.cpp
+++ b/ndb/test/ndbapi/drop_all_tabs.cpp
@@ -40,7 +40,13 @@ int main(int argc, const char** argv){
}
// Connect to Ndb
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp
index 4b87b2c70ed..8a7dbec1561 100644
--- a/ndb/test/ndbapi/flexAsynch.cpp
+++ b/ndb/test/ndbapi/flexAsynch.cpp
@@ -16,6 +16,7 @@
+#include <ndb_global.h>
#include "NdbApi.hpp"
#include <NdbSchemaCon.hpp>
#include <NdbMain.h>
@@ -143,6 +144,8 @@ tellThreads(StartType what)
ThreadStart[i] = what;
}
+static Ndb_cluster_connection *g_cluster_connection= 0;
+
NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
{
ndb_init();
@@ -200,7 +203,14 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
setAttrNames();
setTableNames();
- Ndb * pNdb = new Ndb("TEST_DB");
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ g_cluster_connection= &con;
+
+ Ndb * pNdb = new Ndb(g_cluster_connection, "TEST_DB");
pNdb->init();
tNodeId = pNdb->getNodeId();
@@ -225,7 +235,7 @@ NDB_COMMAND(flexAsynch, "flexAsynch", "flexAsynch", "flexAsynch", 65535)
* Create NDB objects. *
****************************************************************/
resetThreads();
- for (int i = 0; i < tNoOfThreads ; i++) {
+ for (i = 0; i < tNoOfThreads ; i++) {
pThreadData[i].ThreadNo = i
;
threadLife[i] = NdbThread_Create(threadLoop,
@@ -468,7 +478,7 @@ threadLoop(void* ThreadData)
StartType tType;
ThreadNdb* tabThread = (ThreadNdb*)ThreadData;
int threadNo = tabThread->ThreadNo;
- localNdb = new Ndb("TEST_DB");
+ localNdb = new Ndb(g_cluster_connection, "TEST_DB");
localNdb->init(1024);
localNdb->waitUntilReady(10000);
unsigned int threadBase = (threadNo << 16) + tNodeId ;
diff --git a/ndb/test/ndbapi/flexBench.cpp b/ndb/test/ndbapi/flexBench.cpp
index cc2bfb391da..abddecfdc40 100644
--- a/ndb/test/ndbapi/flexBench.cpp
+++ b/ndb/test/ndbapi/flexBench.cpp
@@ -49,6 +49,7 @@ Arguments:
* *************************************************** */
+#include <ndb_global.h>
#include "NdbApi.hpp"
#include <NdbMain.h>
@@ -279,6 +280,8 @@ tellThreads(ThreadData* pt, StartType what)
pt[i].threadStart = what;
}
+static Ndb_cluster_connection *g_cluster_connection= 0;
+
NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
{
ndb_init();
@@ -326,8 +329,16 @@ NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
NdbThread_SetConcurrencyLevel(tNoOfThreads + 2);
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ g_cluster_connection= &con;
+
Ndb* pNdb;
- pNdb = new Ndb( "TEST_DB" );
+ pNdb = new Ndb(&con, "TEST_DB" );
pNdb->init();
tNodeId = pNdb->getNodeId();
@@ -605,7 +616,7 @@ static void* flexBenchThread(void* pArg)
attrValue = (int*)malloc(nReadBuffSize) ;
attrRefValue = (int*)malloc(nRefBuffSize) ;
pOps = (NdbOperation**)malloc(tNoOfTables*sizeof(NdbOperation*)) ;
- pNdb = new Ndb( "TEST_DB" );
+ pNdb = new Ndb(g_cluster_connection, "TEST_DB" );
if(!attrValue || !attrRefValue || !pOps || !pNdb){
// Check allocations to make sure we got all the memory we asked for
diff --git a/ndb/test/ndbapi/flexHammer.cpp b/ndb/test/ndbapi/flexHammer.cpp
index 13cd2d5e561..f254b1e5ccf 100644
--- a/ndb/test/ndbapi/flexHammer.cpp
+++ b/ndb/test/ndbapi/flexHammer.cpp
@@ -47,6 +47,7 @@ Revision history:
* *************************************************** */
+#include <ndb_global.h>
#include <NdbApi.hpp>
#include <NdbMain.h>
@@ -174,6 +175,8 @@ tellThreads(ThreadNdb* threadArrayP, const StartType what)
threadArrayP[i].threadStart = what;
} // for
} // tellThreads
+
+static Ndb_cluster_connection *g_cluster_connection= 0;
NDB_COMMAND(flexHammer, "flexHammer", "flexHammer", "flexHammer", 65535)
//main(int argc, const char** argv)
@@ -213,7 +216,13 @@ NDB_COMMAND(flexHammer, "flexHammer", "flexHammer", "flexHammer", 65535)
// NdbThread_SetConcurrencyLevel(tNoOfThreads + 2);
// Create and init Ndb object
- pMyNdb = new Ndb("TEST_DB");
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ g_cluster_connection= &con;
+ pMyNdb = new Ndb(g_cluster_connection, "TEST_DB");
pMyNdb->init();
// Wait for Ndb to become ready
@@ -345,7 +354,7 @@ flexHammerThread(void* pArg)
for (i = 0; i < MAXATTRSIZE; i++)
attrValue[i] = 0;
// Ndb object for each thread
- pMyNdb = new Ndb( "TEST_DB" );
+ pMyNdb = new Ndb(g_cluster_connection, "TEST_DB" );
pMyNdb->init();
if (pMyNdb->waitUntilReady(10000) != 0) {
// Error, NDB is not ready
diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp
index 8d5be2bb399..7cd5ac8e3b4 100644
--- a/ndb/test/ndbapi/flexTT.cpp
+++ b/ndb/test/ndbapi/flexTT.cpp
@@ -169,6 +169,8 @@ tellThreads(StartType what)
ThreadStart[i] = what;
}
+static Ndb_cluster_connection *g_cluster_connection= 0;
+
NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
{
ndb_init();
@@ -226,7 +228,14 @@ NDB_COMMAND(flexTT, "flexTT", "flexTT", "flexTT", 65535)
setAttrNames();
setTableNames();
- Ndb * pNdb = new Ndb("TEST_DB");
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ g_cluster_connection= &con;
+
+ Ndb * pNdb = new Ndb(g_cluster_connection, "TEST_DB");
pNdb->init();
tNodeId = pNdb->getNodeId();
@@ -334,7 +343,7 @@ threadLoop(void* ThreadData)
void * mem = malloc(sizeof(TransNdb)*tNoOfParallelTrans);
TransNdb* pTransData = (TransNdb*)mem;
- localNdb = new Ndb("TEST_DB");
+ localNdb = new Ndb(g_cluster_connection, "TEST_DB");
localNdb->init(1024);
localNdb->waitUntilReady();
diff --git a/ndb/test/ndbapi/flex_bench_mysql.cpp b/ndb/test/ndbapi/flex_bench_mysql.cpp
index c15175bfb00..3efb7ee2094 100644
--- a/ndb/test/ndbapi/flex_bench_mysql.cpp
+++ b/ndb/test/ndbapi/flex_bench_mysql.cpp
@@ -397,6 +397,7 @@ NDB_COMMAND(flexBench, "flexBench", "flexBench", "flexbench", 65535)
ndbout << "Connect failed" <<endl;
returnValue = NDBT_FAILED;
}
+ mysql.reconnect= 1;
}
if(returnValue == NDBT_OK){
mysql_set_server_option(&mysql, MYSQL_OPTION_MULTI_STATEMENTS_ON);
@@ -712,6 +713,7 @@ static void* flexBenchThread(void* pArg)
ndbout << "failed" << endl;
return 0;
}
+ mysql.reconnect= 1;
ndbout << "ok" << endl;
int r;
diff --git a/ndb/test/ndbapi/slow_select.cpp b/ndb/test/ndbapi/slow_select.cpp
index 625dbc34457..8d615fa5771 100644
--- a/ndb/test/ndbapi/slow_select.cpp
+++ b/ndb/test/ndbapi/slow_select.cpp
@@ -1,4 +1,5 @@
+#include <ndb_global.h>
#include <NdbApi.hpp>
#include <NdbOut.hpp>
#include <NdbTick.h>
@@ -8,18 +9,17 @@ S_Scan {
const char * m_table;
const char * m_index;
NdbIndexScanOperation * m_scan;
- NdbResultSet * m_result;
Uint32 metaid;
Uint32 match_count;
Uint32 row_count;
};
static S_Scan g_scans[] = {
- { "affiliatestometa", "ind_affiliatestometa", 0, 0, 0, 0, 0 },
- { "media", "metaid", 0, 0, 0, 0, 0 },
- { "meta", "PRIMARY", 0, 0, 0, 0, 0 },
- { "artiststometamap", "PRIMARY", 0, 0, 0, 0, 0 },
- { "subgenrestometamap", "metaid", 0, 0, 0, 0, 0 }
+ { "affiliatestometa", "ind_affiliatestometa", 0, 0, 0, 0 },
+ { "media", "metaid", 0, 0, 0, 0 },
+ { "meta", "PRIMARY", 0, 0, 0, 0 },
+ { "artiststometamap", "PRIMARY", 0, 0, 0, 0 },
+ { "subgenrestometamap", "metaid", 0, 0, 0, 0 }
};
#define require(x) if(!(x)) { ndbout << "LINE: " << __LINE__ << endl;abort(); }
@@ -37,7 +37,14 @@ static void lookup();
int
main(void){
ndb_init();
- Ndb g_ndb("test");
+
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return 1;
+ }
+
+ Ndb g_ndb(&con, "test");
g_ndb.init(1024);
require(g_ndb.waitUntilReady() == 0);
@@ -58,9 +65,8 @@ main(void){
g_scans[i].m_table);
NdbIndexScanOperation* scan = g_scans[i].m_scan;
require(scan);
- g_scans[i].m_result = scan->readTuples(NdbScanOperation::LM_CommittedRead,
- 0, 0, true);
- require(g_scans[i].m_result);
+ require(scan->readTuples(NdbScanOperation::LM_CommittedRead,
+ 0, 0, true) == 0);
}
require(!g_scans[0].m_scan->setBound((Uint32)0,
@@ -125,7 +131,7 @@ main(void){
//ndbout_c("%s - %d", g_scans[i].m_table, g_scans[i].metaid);
for(i = 0; i<prev_F_sz; i++){
- int res = F[i]->m_result->nextResult();
+ int res = F[i]->m_scan->nextResult();
if(res == -1)
abort();
diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp
index 14198c250c7..1ab348e735f 100644
--- a/ndb/test/ndbapi/testBackup.cpp
+++ b/ndb/test/ndbapi/testBackup.cpp
@@ -271,7 +271,7 @@ int runDropTable(NDBT_Context* ctx, NDBT_Step* step){
#include "bank/Bank.hpp"
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting, 10) != NDBT_OK)
return NDBT_FAILED;
@@ -279,7 +279,7 @@ int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 30; // Max seconds between each "day"
int yield = 1; // Loops before bank returns
@@ -290,7 +290,7 @@ int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 10; // Max ms between each transaction
int yield = 100; // Loops before bank returns
@@ -301,7 +301,7 @@ int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int yield = 20; // Loops before bank returns
int result = NDBT_OK;
@@ -315,7 +315,7 @@ int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 2000; // Max ms between each sum of accounts
int yield = 1; // Loops before bank returns
int result = NDBT_OK;
@@ -330,7 +330,7 @@ int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
}
int runDropBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
if (bank.dropBank() != NDBT_OK)
return NDBT_FAILED;
return NDBT_OK;
@@ -404,7 +404,7 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
// To erase all tables from cache(s)
// To be removed, maybe replaced by ndb.invalidate();
{
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
if (bank.dropBank() != NDBT_OK){
result = NDBT_FAILED;
@@ -427,7 +427,7 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){
ndbout << "Backup " << backupId << " restored" << endl;
// Let bank verify
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 0;
int yield = 1;
diff --git a/ndb/test/ndbapi/testBitfield.cpp b/ndb/test/ndbapi/testBitfield.cpp
new file mode 100644
index 00000000000..e26f495f5a4
--- /dev/null
+++ b/ndb/test/ndbapi/testBitfield.cpp
@@ -0,0 +1,198 @@
+
+#include <ndb_global.h>
+#include <ndb_opts.h>
+#include <NDBT.hpp>
+#include <NdbApi.hpp>
+#include <HugoTransactions.hpp>
+
+static const char* _dbname = "TEST_DB";
+static int g_loops = 7;
+
+static void usage()
+{
+ ndb_std_print_version();
+}
+#if 0
+static my_bool
+get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
+ const char *argument)
+{
+ return ndb_std_get_one_option(optid, opt, argument ? argument :
+ "d:t:O,/tmp/testBitfield.trace");
+}
+#endif
+
+static const NdbDictionary::Table* create_random_table(Ndb*);
+static int transactions(Ndb*, const NdbDictionary::Table* tab);
+static int unique_indexes(Ndb*, const NdbDictionary::Table* tab);
+static int ordered_indexes(Ndb*, const NdbDictionary::Table* tab);
+static int node_restart(Ndb*, const NdbDictionary::Table* tab);
+static int system_restart(Ndb*, const NdbDictionary::Table* tab);
+
+int
+main(int argc, char** argv){
+ NDB_INIT(argv[0]);
+ const char *load_default_groups[]= { "mysql_cluster",0 };
+ load_defaults("my",load_default_groups,&argc,&argv);
+ int ho_error;
+
+ argc--;
+ argv++;
+
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1))
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+
+ Ndb* pNdb;
+ pNdb = new Ndb(&con, _dbname);
+ pNdb->init();
+ while (pNdb->waitUntilReady() != 0);
+ int res = NDBT_FAILED;
+
+ NdbDictionary::Dictionary * dict = pNdb->getDictionary();
+
+ const NdbDictionary::Table* pTab = 0;
+ for (int i = 0; i < (argc ? argc : g_loops) ; i++)
+ {
+ res = NDBT_FAILED;
+ if(argc == 0)
+ {
+ pTab = create_random_table(pNdb);
+ }
+ else
+ {
+ dict->dropTable(argv[i]);
+ NDBT_Tables::createTable(pNdb, argv[i]);
+ pTab = dict->getTable(argv[i]);
+ }
+
+ if (pTab == 0)
+ {
+ ndbout << "Failed to create table" << endl;
+ ndbout << dict->getNdbError() << endl;
+ break;
+ }
+
+ if(transactions(pNdb, pTab))
+ break;
+
+ if(unique_indexes(pNdb, pTab))
+ break;
+
+ if(ordered_indexes(pNdb, pTab))
+ break;
+
+ if(node_restart(pNdb, pTab))
+ break;
+
+ if(system_restart(pNdb, pTab))
+ break;
+
+ dict->dropTable(pTab->getName());
+ res = NDBT_OK;
+ }
+
+ if(res != NDBT_OK && pTab)
+ {
+ dict->dropTable(pTab->getName());
+ }
+
+ delete pNdb;
+ return NDBT_ProgramExit(res);
+}
+
+static
+const NdbDictionary::Table*
+create_random_table(Ndb* pNdb)
+{
+ do {
+ NdbDictionary::Table tab;
+ Uint32 cols = 1 + (rand() % (NDB_MAX_ATTRIBUTES_IN_TABLE - 1));
+ Uint32 keys = NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY;
+ Uint32 length = 4090;
+ Uint32 key_size = NDB_MAX_KEYSIZE_IN_WORDS;
+
+ BaseString name;
+ name.assfmt("TAB_%d", rand() & 65535);
+ tab.setName(name.c_str());
+ for(int i = 0; i<cols && length > 2; i++)
+ {
+ NdbDictionary::Column col;
+ name.assfmt("COL_%d", i);
+ col.setName(name.c_str());
+ if(i == 0 || i == 1)
+ {
+ col.setType(NdbDictionary::Column::Unsigned);
+ col.setLength(1);
+ col.setNullable(false);
+ col.setPrimaryKey(i == 0);
+ tab.addColumn(col);
+ continue;
+ }
+
+ col.setType(NdbDictionary::Column::Bit);
+
+ Uint32 len = 1 + (rand() % (length - 1));
+ col.setLength(len); length -= len;
+ int nullable = (rand() >> 16) & 1;
+ col.setNullable(nullable); length -= nullable;
+ col.setPrimaryKey(false);
+ tab.addColumn(col);
+ }
+
+ pNdb->getDictionary()->dropTable(tab.getName());
+ if(pNdb->getDictionary()->createTable(tab) == 0)
+ {
+ ndbout << (NDBT_Table&)tab << endl;
+ return pNdb->getDictionary()->getTable(tab.getName());
+ }
+ } while(0);
+ return 0;
+}
+
+static
+int
+transactions(Ndb* pNdb, const NdbDictionary::Table* tab)
+{
+ int i = 0;
+ HugoTransactions trans(* tab);
+ i |= trans.loadTable(pNdb, 1000);
+ i |= trans.pkReadRecords(pNdb, 1000, 13);
+ i |= trans.scanReadRecords(pNdb, 1000, 25);
+ i |= trans.pkUpdateRecords(pNdb, 1000, 37);
+ i |= trans.scanUpdateRecords(pNdb, 1000, 25);
+ i |= trans.pkDelRecords(pNdb, 500, 23);
+ i |= trans.clearTable(pNdb);
+ return i;
+}
+
+static
+int
+unique_indexes(Ndb* pNdb, const NdbDictionary::Table* tab)
+{
+ return 0;
+}
+
+static
+int
+ordered_indexes(Ndb* pNdb, const NdbDictionary::Table* tab)
+{
+ return 0;
+}
+
+static
+int
+node_restart(Ndb* pNdb, const NdbDictionary::Table* tab)
+{
+ return 0;
+}
+
+static
+int
+system_restart(Ndb* pNdb, const NdbDictionary::Table* tab)
+{
+ return 0;
+}
diff --git a/ndb/test/ndbapi/testBlobs.cpp b/ndb/test/ndbapi/testBlobs.cpp
index efbdceac5a6..a88d7d21820 100644
--- a/ndb/test/ndbapi/testBlobs.cpp
+++ b/ndb/test/ndbapi/testBlobs.cpp
@@ -23,14 +23,13 @@
#include <NdbOut.hpp>
#include <NdbTest.hpp>
#include <NdbTick.h>
-#include <ndb/src/ndbapi/NdbBlobImpl.hpp>
struct Bcol {
bool m_nullable;
unsigned m_inline;
unsigned m_partsize;
unsigned m_stripe;
- char m_btname[NdbBlobImpl::BlobTableNameSize];
+ char m_btname[200];
Bcol(bool a, unsigned b, unsigned c, unsigned d) :
m_nullable(a),
m_inline(b),
@@ -741,10 +740,9 @@ verifyBlobTable(const Bcol& b, const Bval& v, Uint32 pk1, bool exists)
NdbRecAttr* ra_pk;
NdbRecAttr* ra_part;
NdbRecAttr* ra_data;
- NdbResultSet* rs;
CHK((g_con = g_ndb->startTransaction()) != 0);
CHK((g_ops = g_con->getNdbScanOperation(b.m_btname)) != 0);
- CHK((rs = g_ops->readTuples()) != 0);
+ CHK(g_ops->readTuples() == 0);
CHK((ra_pk = g_ops->getValue("PK")) != 0);
CHK((ra_part = g_ops->getValue("PART")) != 0);
CHK((ra_data = g_ops->getValue("DATA")) != 0);
@@ -758,7 +756,7 @@ verifyBlobTable(const Bcol& b, const Bval& v, Uint32 pk1, bool exists)
memset(seen, 0, partcount);
while (1) {
int ret;
- CHK((ret = rs->nextResult()) == 0 || ret == 1);
+ CHK((ret = g_ops->nextResult()) == 0 || ret == 1);
if (ret == 1)
break;
if (pk1 != ra_pk->u_32_value())
@@ -1106,14 +1104,13 @@ readScan(int style, bool idx)
DBG("--- " << "readScan" << (idx ? "Idx" : "") << " " << stylename[style] << " ---");
Tup tup;
tup.alloc(); // allocate buffers
- NdbResultSet* rs;
CHK((g_con = g_ndb->startTransaction()) != 0);
if (! idx) {
CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
} else {
CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
}
- CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Read)) != 0);
+ CHK(g_ops->readTuples(NdbScanOperation::LM_Read) == 0);
CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
if (g_opt.m_pk2len != 0)
CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
@@ -1129,7 +1126,7 @@ readScan(int style, bool idx)
int ret;
tup.m_pk1 = (Uint32)-1;
memset(tup.m_pk2, 'x', g_opt.m_pk2len);
- CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
+ CHK((ret = g_ops->nextResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
DBG("readScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
@@ -1159,14 +1156,13 @@ updateScan(int style, bool idx)
DBG("--- " << "updateScan" << (idx ? "Idx" : "") << " " << stylename[style] << " ---");
Tup tup;
tup.alloc(); // allocate buffers
- NdbResultSet* rs;
CHK((g_con = g_ndb->startTransaction()) != 0);
if (! idx) {
CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
} else {
CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
}
- CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Exclusive)) != 0);
+ CHK(g_ops->readTuples(NdbScanOperation::LM_Exclusive) == 0);
CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
if (g_opt.m_pk2len != 0)
CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
@@ -1176,7 +1172,7 @@ updateScan(int style, bool idx)
int ret;
tup.m_pk1 = (Uint32)-1;
memset(tup.m_pk2, 'x', g_opt.m_pk2len);
- CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
+ CHK((ret = g_ops->nextResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
DBG("updateScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
@@ -1185,7 +1181,7 @@ updateScan(int style, bool idx)
// calculate new blob values
calcBval(g_tups[k], false);
tup.copyfrom(g_tups[k]);
- CHK((g_opr = rs->updateTuple()) != 0);
+ CHK((g_opr = g_ops->updateCurrentTuple()) != 0);
CHK(getBlobHandles(g_opr) == 0);
if (style == 0) {
CHK(setBlobValue(tup) == 0);
@@ -1212,14 +1208,13 @@ deleteScan(bool idx)
{
DBG("--- " << "deleteScan" << (idx ? "Idx" : "") << " ---");
Tup tup;
- NdbResultSet* rs;
CHK((g_con = g_ndb->startTransaction()) != 0);
if (! idx) {
CHK((g_ops = g_con->getNdbScanOperation(g_opt.m_tname)) != 0);
} else {
CHK((g_ops = g_con->getNdbIndexScanOperation(g_opt.m_x2name, g_opt.m_tname)) != 0);
}
- CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Exclusive)) != 0);
+ CHK(g_ops->readTuples(NdbScanOperation::LM_Exclusive) == 0);
CHK(g_ops->getValue("PK1", (char*)&tup.m_pk1) != 0);
if (g_opt.m_pk2len != 0)
CHK(g_ops->getValue("PK2", tup.m_pk2) != 0);
@@ -1229,11 +1224,11 @@ deleteScan(bool idx)
int ret;
tup.m_pk1 = (Uint32)-1;
memset(tup.m_pk2, 'x', g_opt.m_pk2len);
- CHK((ret = rs->nextResult()) == 0 || ret == 1);
+ CHK((ret = g_ops->nextResult()) == 0 || ret == 1);
if (ret == 1)
break;
DBG("deleteScan" << (idx ? "Idx" : "") << " pk1=" << hex << tup.m_pk1);
- CHK(rs->deleteTuple() == 0);
+ CHK(g_ops->deleteCurrentTuple() == 0);
CHK(g_con->execute(NoCommit) == 0);
Uint32 k = tup.m_pk1 - g_opt.m_pk1off;
CHK(k < g_opt.m_rows && g_tups[k].m_exists);
@@ -1611,12 +1606,11 @@ testperf()
// scan read char
{
DBG("--- scan read char ---");
- NdbResultSet* rs;
Uint32 a;
char b[20];
CHK((g_con = g_ndb->startTransaction()) != 0);
CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0);
- CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Read)) != 0);
+ CHK(g_ops->readTuples(NdbScanOperation::LM_Read) == 0);
CHK(g_ops->getValue(cA, (char*)&a) != 0);
CHK(g_ops->getValue(cB, b) != 0);
CHK(g_con->execute(NoCommit) == 0);
@@ -1626,7 +1620,7 @@ testperf()
a = (Uint32)-1;
b[0] = 0;
int ret;
- CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
+ CHK((ret = g_ops->nextResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
CHK(a < g_opt.m_rowsperf && strcmp(b, "b") == 0);
@@ -1641,12 +1635,11 @@ testperf()
// scan read text
{
DBG("--- read text ---");
- NdbResultSet* rs;
Uint32 a;
char c[20];
CHK((g_con = g_ndb->startTransaction()) != 0);
CHK((g_ops = g_con->getNdbScanOperation(tab.getName())) != 0);
- CHK((rs = g_ops->readTuples(NdbScanOperation::LM_Read)) != 0);
+ CHK(g_ops->readTuples(NdbScanOperation::LM_Read) == 0);
CHK(g_ops->getValue(cA, (char*)&a) != 0);
CHK((g_bh1 = g_ops->getBlobHandle(cC)) != 0);
CHK(g_con->execute(NoCommit) == 0);
@@ -1656,7 +1649,7 @@ testperf()
a = (Uint32)-1;
c[0] = 0;
int ret;
- CHK((ret = rs->nextResult(true)) == 0 || ret == 1);
+ CHK((ret = g_ops->nextResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
Uint32 m = 20;
diff --git a/ndb/test/ndbapi/testDataBuffers.cpp b/ndb/test/ndbapi/testDataBuffers.cpp
index 03d52252334..aaecb6ee61e 100644
--- a/ndb/test/ndbapi/testDataBuffers.cpp
+++ b/ndb/test/ndbapi/testDataBuffers.cpp
@@ -209,7 +209,7 @@ makeOff(int k)
}
static int
-testcase(int flag)
+testcase(Ndb_cluster_connection&cc, int flag)
{
ndbout << "--- case " << flag << " ---" << endl;
sprintf(tab, "TB%02d", flag);
@@ -254,7 +254,7 @@ testcase(int flag)
ndbout << "tab=" << tab << " cols=" << attrcnt
<< " size max=" << smax << " tot=" << stot << endl;
- ndb = new Ndb("TEST_DB");
+ ndb = new Ndb(&cc, "TEST_DB");
if (ndb->init() != 0)
return ndberror("init");
if (ndb->waitUntilReady(30) < 0)
@@ -443,9 +443,9 @@ testcase(int flag)
if ((con = ndb->startTransaction()) == 0)
return ndberror("startTransaction key=%d", key);
if ((op = sop = con->getNdbScanOperation(tab)) == 0)
- return ndberror("getNdbOperation key=%d", key);
- if ((rs = sop->readTuples(1)) == 0)
- return ndberror("openScanRead key=%d", key);
+ return ndberror("getNdbOperation key=%d", key);
+ if (sop->readTuples(1))
+ return ndberror("openScanRead key=%d", key);
{
col& c = ccol[0];
if (op->load_const_u32(1, key) < 0)
@@ -488,7 +488,7 @@ testcase(int flag)
if (con->execute(NoCommit) < 0)
return ndberror("executeScan key=%d", key);
int ret, cnt = 0;
- while ((ret = rs->nextResult()) == 0) {
+ while ((ret = sop->nextResult()) == 0) {
if (key != newkey)
return ndberror("unexpected key=%d newkey=%d", key, newkey);
for (i = 1; i < attrcnt; i++) {
@@ -606,10 +606,17 @@ NDB_COMMAND(testDataBuffers, "testDataBuffers", "testDataBuffers", "testDataBuff
}
}
unsigned ok = true;
+
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1))
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
for (i = 1; 0 == loopcnt || i <= loopcnt; i++) {
ndbout << "=== loop " << i << " ===" << endl;
for (int flag = 0; flag < (1<<testbits); flag++) {
- if (testcase(flag) < 0) {
+ if (testcase(con, flag) < 0) {
ok = false;
if (! kontinue)
goto out;
diff --git a/ndb/test/ndbapi/testDeadlock.cpp b/ndb/test/ndbapi/testDeadlock.cpp
index eb985e815ac..0070a7ecc83 100644
--- a/ndb/test/ndbapi/testDeadlock.cpp
+++ b/ndb/test/ndbapi/testDeadlock.cpp
@@ -49,7 +49,7 @@ printusage()
static Opt g_opt;
static NdbMutex *ndbout_mutex= NULL;
-
+static Ndb_cluster_connection *g_cluster_connection= 0;
#define DBG(x) \
do { \
if (! g_opt.m_dbg) break; \
@@ -91,7 +91,6 @@ struct Thr {
NdbConnection* m_con;
NdbScanOperation* m_scanop;
NdbIndexScanOperation* m_indexscanop;
- NdbResultSet* m_rs;
//
Thr(int no);
~Thr();
@@ -136,7 +135,6 @@ Thr::Thr(int no)
m_con = 0;
m_scanop = 0;
m_indexscanop = 0;
- m_rs = 0;
}
Thr::~Thr()
@@ -219,7 +217,7 @@ Thr::exit()
static int
runstep_connect(Thr& thr)
{
- Ndb* ndb = thr.m_ndb = new Ndb("TEST_DB");
+ Ndb* ndb = thr.m_ndb = new Ndb(g_cluster_connection, "TEST_DB");
CHN(ndb, ndb->init() == 0);
CHN(ndb, ndb->waitUntilReady() == 0);
DBG(thr << " connected");
@@ -374,7 +372,7 @@ wl1822_tx2_scanXY(Thr& thr)
CHN(con, (scanop = thr.m_scanop = indexscanop = thr.m_indexscanop = con->getNdbIndexScanOperation(g_opt.m_xname, g_opt.m_tname)) != 0);
DBG("tx2 scan exclusive " << g_opt.m_xname);
}
- CHN(scanop, (rs = thr.m_rs = scanop->readTuplesExclusive(16)) != 0);
+ CHN(scanop, scanop->readTuplesExclusive(16) == 0);
CHN(scanop, scanop->getValue("A", (char*)&wl1822_bufA) != 0);
CHN(scanop, scanop->getValue("B", (char*)&wl1822_bufB) != 0);
CHN(con, con->execute(NoCommit) == 0);
@@ -383,7 +381,7 @@ wl1822_tx2_scanXY(Thr& thr)
DBG("before row " << row);
int ret;
wl1822_bufA = wl1822_bufB = ~0;
- CHN(con, (ret = rs->nextResult(true)) == 0);
+ CHN(con, (ret = scanop->nextResult(true)) == 0);
DBG("got row " << row << " a=" << wl1822_bufA << " b=" << wl1822_bufB);
CHK(wl1822_bufA == wl1822_valA[wl1822_r2k[row]]);
CHK(wl1822_bufB == wl1822_valB[wl1822_r2k[row]]);
@@ -419,14 +417,13 @@ wl1822_tx2_scanZ_close(Thr& thr)
Ndb* ndb = thr.m_ndb;
NdbConnection* con = thr.m_con;
NdbScanOperation* scanop = thr.m_scanop;
- NdbResultSet* rs = thr.m_rs;
- assert(ndb != 0 && con != 0 && scanop != 0 && rs != 0);
+ assert(ndb != 0 && con != 0 && scanop != 0);
unsigned row = 2;
while (true) {
DBG("before row " << row);
int ret;
wl1822_bufA = wl1822_bufB = ~0;
- CHN(con, (ret = rs->nextResult(true)) == 0 || ret == 1);
+ CHN(con, (ret = scanop->nextResult(true)) == 0 || ret == 1);
if (ret == 1)
break;
DBG("got row " << row << " a=" << wl1822_bufA << " b=" << wl1822_bufB);
@@ -467,7 +464,7 @@ wl1822_main(char scantx)
// run the steps
for (unsigned i = 0; i < wl1822_stepcount; i++) {
DBG("step " << i << " start");
- for (int n = 0; n < thrcount; n++) {
+ for (n = 0; n < thrcount; n++) {
Thr& thr = *thrlist[n];
Runstep runstep = wl1822_step[i][n];
if (runstep != 0)
@@ -506,10 +503,18 @@ NDB_COMMAND(testOdbcDriver, "testDeadlock", "testDeadlock", "testDeadlock", 6553
printusage();
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
+
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ g_cluster_connection= &con;
+
if (
strchr(g_opt.m_scan, 't') != 0 && wl1822_main('t') == -1 ||
strchr(g_opt.m_scan, 'x') != 0 && wl1822_main('x') == -1
- ) {
+ ) {
return NDBT_ProgramExit(NDBT_FAILED);
}
return NDBT_ProgramExit(NDBT_OK);
diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp
index 5f88342705a..dd5846f0d62 100644
--- a/ndb/test/ndbapi/testDict.cpp
+++ b/ndb/test/ndbapi/testDict.cpp
@@ -125,6 +125,16 @@ int runCreateTheTable(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_OK;
}
+int runDropTheTable(NDBT_Context* ctx, NDBT_Step* step){
+ Ndb* pNdb = GETNDB(step);
+ const NdbDictionary::Table* pTab = ctx->getTab();
+
+ // Try to create table in db
+ pNdb->getDictionary()->dropTable(pTab->getName());
+
+ return NDBT_OK;
+}
+
int runCreateTableWhenDbIsFull(NDBT_Context* ctx, NDBT_Step* step){
Ndb* pNdb = GETNDB(step);
int result = NDBT_OK;
@@ -418,103 +428,99 @@ int runUseTableUntilStopped(NDBT_Context* ctx, NDBT_Step* step){
}
-int runCreateMaxTables(NDBT_Context* ctx, NDBT_Step* step){
- int failures = 0;
+int
+runCreateMaxTables(NDBT_Context* ctx, NDBT_Step* step)
+{
char tabName[256];
int numTables = ctx->getProperty("tables", 1000);
Ndb* pNdb = GETNDB(step);
-
- for (int i = 0; i < numTables && failures < 5; i++){
+ NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
+ int i = 0;
+ for (i = 0; i < numTables; i++) {
BaseString::snprintf(tabName, 256, "MAXTAB%d", i);
-
- if (pNdb->waitUntilReady(30) != 0){
+ if (pNdb->waitUntilReady(30) != 0) {
// Db is not ready, return with failure
return NDBT_FAILED;
}
-
const NdbDictionary::Table* pTab = ctx->getTab();
- ndbout << "|- " << tabName << endl;
-
+ //ndbout << "|- " << tabName << endl;
// Set new name for T1
NdbDictionary::Table newTab(* pTab);
newTab.setName(tabName);
-
+ // Drop any old (or try to)
+ (void)pDic->dropTable(newTab.getName());
// Try to create table in db
- if (newTab.createTableInDb(pNdb) != 0){
- ndbout << tabName << " coult not be created"<< endl;
- failures++;
- continue;
+ if (newTab.createTableInDb(pNdb) != 0) {
+ ndbout << tabName << " could not be created: "
+ << pDic->getNdbError() << endl;
+ if (pDic->getNdbError().code == 707 ||
+ pDic->getNdbError().code == 708 ||
+ pDic->getNdbError().code == 826 ||
+ pDic->getNdbError().code == 827)
+ break;
+ return NDBT_FAILED;
}
-
// Verify that table exists in db
const NdbDictionary::Table* pTab3 =
NDBT_Table::discoverTableFromDb(pNdb, tabName) ;
if (pTab3 == NULL){
- ndbout << tabName << " was not found in DB"<< endl;
- failures++;
- continue;
+ ndbout << tabName << " was not found in DB: "
+ << pDic->getNdbError() << endl;
+ return NDBT_FAILED;
}
-
- if (pTab->equal(*pTab3) == false){
- ndbout << "It was not equal" << endl;
- failures++;
+ if (! newTab.equal(*pTab3)) {
+ ndbout << "It was not equal" << endl; abort();
+ return NDBT_FAILED;
}
-
- int records = 1000;
+ int records = ctx->getNumRecords();
HugoTransactions hugoTrans(*pTab3);
- if (hugoTrans.loadTable(pNdb, records) != 0){
+ if (hugoTrans.loadTable(pNdb, records) != 0) {
ndbout << "It can NOT be loaded" << endl;
- } else{
- ndbout << "It can be loaded" << endl;
-
- UtilTransactions utilTrans(*pTab3);
- if (utilTrans.clearTable(pNdb, records, 64) != 0){
- ndbout << "It can NOT be cleared" << endl;
- } else{
- ndbout << "It can be cleared" << endl;
- }
+ return NDBT_FAILED;
+ }
+ UtilTransactions utilTrans(*pTab3);
+ if (utilTrans.clearTable(pNdb, records, 64) != 0) {
+ ndbout << "It can NOT be cleared" << endl;
+ return NDBT_FAILED;
}
-
}
- if (pNdb->waitUntilReady(30) != 0){
+ if (pNdb->waitUntilReady(30) != 0) {
// Db is not ready, return with failure
return NDBT_FAILED;
}
+ ctx->setProperty("maxtables", i);
// HURRAAA!
return NDBT_OK;
}
-int runDropMaxTables(NDBT_Context* ctx, NDBT_Step* step){
- int result = NDBT_OK;
+int runDropMaxTables(NDBT_Context* ctx, NDBT_Step* step)
+{
char tabName[256];
- int numTables = ctx->getProperty("tables", 1000);
+ int numTables = ctx->getProperty("maxtables", (Uint32)0);
Ndb* pNdb = GETNDB(step);
-
- for (int i = 0; i < numTables; i++){
+ NdbDictionary::Dictionary* pDic = pNdb->getDictionary();
+ for (int i = 0; i < numTables; i++) {
BaseString::snprintf(tabName, 256, "MAXTAB%d", i);
-
- if (pNdb->waitUntilReady(30) != 0){
+ if (pNdb->waitUntilReady(30) != 0) {
// Db is not ready, return with failure
return NDBT_FAILED;
}
-
// Verify that table exists in db
const NdbDictionary::Table* pTab3 =
NDBT_Table::discoverTableFromDb(pNdb, tabName) ;
- if (pTab3 == NULL){
- ndbout << tabName << " was not found in DB"<< endl;
- continue;
+ if (pTab3 == NULL) {
+ ndbout << tabName << " was not found in DB: "
+ << pDic->getNdbError() << endl;
+ return NDBT_FAILED;
}
-
-
// Try to drop table in db
- if (pNdb->getDictionary()->dropTable(pTab3->getName()) != 0){
- ndbout << tabName << " coult not be dropped"<< endl;
- result = NDBT_FAILED;
+ if (pDic->dropTable(pTab3->getName()) != 0) {
+ ndbout << tabName << " could not be dropped: "
+ << pDic->getNdbError() << endl;
+ return NDBT_FAILED;
}
-
}
- return result;
+ return NDBT_OK;
}
int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){
@@ -524,13 +530,6 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
NdbRestarter restarter;
- // enum FragmentType {
- // Unknown = 0,
- // Single = 1, ///< Only one fragment
- // All = 2, ///< Default value. One fragment per node group
- // AllLarge = 3 ///< Sixten fragments per node group.
- // };
-
if (pNdb->waitUntilReady(30) != 0){
// Db is not ready, return with failure
return NDBT_FAILED;
@@ -547,6 +546,7 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){
if (newTab.createTableInDb(pNdb) != 0){
ndbout << newTab.getName() << " could not be created"
<< ", fragmentType = "<<fragTtype <<endl;
+ ndbout << pNdb->getDictionary()->getNdbError() << endl;
return NDBT_FAILED;
}
@@ -564,13 +564,17 @@ int runTestFragmentTypes(NDBT_Context* ctx, NDBT_Step* step){
result = NDBT_FAILED;
goto drop_the_tab;
}
-
+/**
+ This test does not work since fragmentation is
+ decided by the kernel, hence the fragementation
+ attribute on the column will differ
+
if (newTab.equal(*pTab3) == false){
ndbout << "It was not equal" << endl;
result = NDBT_FAILED;
goto drop_the_tab;
}
-
+*/
do {
HugoTransactions hugoTrans(*pTab3);
@@ -1005,10 +1009,10 @@ int runGetPrimaryKey(NDBT_Context* ctx, NDBT_Step* step){
struct ErrorCodes { int error_id; bool crash;};
ErrorCodes
NF_codes[] = {
- {6003, true},
- {6004, true},
+ {6003, true}
+ ,{6004, true}
//,6005, true,
- {7173, false}
+ //{7173, false}
};
int
@@ -1027,17 +1031,6 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
- /**
- * Need to run LCP at high rate otherwise
- * packed replicas become "to many"
- */
- int val = DumpStateOrd::DihMinTimeBetweenLCP;
- if(restarter.dumpStateAllNodes(&val, 1) != 0){
- do { CHECK(0); } while(0);
- g_err << "Failed to set LCP to min value" << endl;
- return NDBT_FAILED;
- }
-
const int loops = ctx->getNumLoops();
for (int l = 0; l < loops && result == NDBT_OK ; l++){
const int sz = sizeof(NF_codes)/sizeof(NF_codes[0]);
@@ -1060,7 +1053,7 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
CHECK2(dict->createTable(* pTab) == 0,
"failed to create table");
-
+
if (crash) {
CHECK2(restarter.waitNodesNoStart(&nodeId, 1) == 0,
"waitNodesNoStart failed");
@@ -1084,9 +1077,6 @@ runNF1(NDBT_Context* ctx, NDBT_Step* step){
CHECK2(restarter.waitClusterStarted() == 0,
"waitClusterStarted failed");
}
-
- CHECK2(restarter.dumpStateOneNode(nodeId, &val, 1) == 0,
- "Failed to set LCP to min value");
}
}
}
@@ -1583,21 +1573,26 @@ TESTCASE("CreateTableWhenDbIsFull",
INITIALIZER(runFillTable);
INITIALIZER(runCreateTableWhenDbIsFull);
INITIALIZER(runDropTableWhenDbIsFull);
- FINALIZER(runClearTable);
+ FINALIZER(runDropTheTable);
}
TESTCASE("FragmentTypeSingle",
"Create the table with fragment type Single\n"){
- TC_PROPERTY("FragmentType", 1);
+ TC_PROPERTY("FragmentType", NdbDictionary::Table::FragSingle);
+ INITIALIZER(runTestFragmentTypes);
+}
+TESTCASE("FragmentTypeAllSmall",
+ "Create the table with fragment type AllSmall\n"){
+ TC_PROPERTY("FragmentType", NdbDictionary::Table::FragAllSmall);
INITIALIZER(runTestFragmentTypes);
}
-TESTCASE("FragmentTypeAll",
- "Create the table with fragment type All\n"){
- TC_PROPERTY("FragmentType", 2);
+TESTCASE("FragmentTypeAllMedium",
+ "Create the table with fragment type AllMedium\n"){
+ TC_PROPERTY("FragmentType", NdbDictionary::Table::FragAllMedium);
INITIALIZER(runTestFragmentTypes);
}
TESTCASE("FragmentTypeAllLarge",
"Create the table with fragment type AllLarge\n"){
- TC_PROPERTY("FragmentType", 3);
+ TC_PROPERTY("FragmentType", NdbDictionary::Table::FragAllLarge);
INITIALIZER(runTestFragmentTypes);
}
TESTCASE("TemporaryTables",
@@ -1609,7 +1604,7 @@ TESTCASE("CreateMaxTables",
"Create tables until db says that it can't create any more\n"){
TC_PROPERTY("tables", 1000);
INITIALIZER(runCreateMaxTables);
- FINALIZER(runDropMaxTables);
+ INITIALIZER(runDropMaxTables);
}
TESTCASE("PkSizes",
"Create tables with all different primary key sizes.\n"\
diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp
index d359f83257f..5785db232c4 100644
--- a/ndb/test/ndbapi/testIndex.cpp
+++ b/ndb/test/ndbapi/testIndex.cpp
@@ -1145,20 +1145,18 @@ runUniqueNullTransactions(NDBT_Context* ctx, NDBT_Step* step){
pTrans = pNdb->startTransaction();
NdbScanOperation * sOp;
NdbOperation * uOp;
- NdbResultSet * rs;
int eof;
if(!pTrans) goto done;
sOp = pTrans->getNdbScanOperation(pTab->getName());
if(!sOp) goto done;
- rs = sOp->readTuples(NdbScanOperation::LM_Exclusive);
- if(!rs) goto done;
+ if(sOp->readTuples(NdbScanOperation::LM_Exclusive)) goto done;
if(pTrans->execute(NoCommit) == -1) goto done;
- while((eof = rs->nextResult(true)) == 0){
+ while((eof = sOp->nextResult(true)) == 0){
do {
- NdbOperation * uOp = rs->updateTuple();
+ NdbOperation * uOp = sOp->updateCurrentTuple();
if(uOp == 0) goto done;
uOp->setValue(colId, 0);
- } while((eof = rs->nextResult(false)) == 0);
+ } while((eof = sOp->nextResult(false)) == 0);
eof = pTrans->execute(Commit);
if(eof == -1) goto done;
}
@@ -1279,7 +1277,7 @@ TESTCASE("CreateLoadDrop_O",
TESTCASE("NFNR1",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
- //TC_PROPERTY("Threads", 2);
+ TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(runLoadTable);
@@ -1294,6 +1292,7 @@ TESTCASE("NFNR1_O",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(runLoadTable);
@@ -1307,6 +1306,7 @@ TESTCASE("NFNR1_O",
TESTCASE("NFNR2",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex);
@@ -1323,6 +1323,7 @@ TESTCASE("NFNR2_O",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ TC_PROPERTY("PauseThreads", 1);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex);
@@ -1338,6 +1339,7 @@ TESTCASE("NFNR2_O",
TESTCASE("NFNR3",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex);
@@ -1353,6 +1355,7 @@ TESTCASE("NFNR3_O",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ TC_PROPERTY("PauseThreads", 2);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex);
@@ -1367,6 +1370,7 @@ TESTCASE("NFNR3_O",
TESTCASE("NFNR4",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ TC_PROPERTY("PauseThreads", 4);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex);
@@ -1385,6 +1389,7 @@ TESTCASE("NFNR4_O",
"Test that indexes are correctly maintained during node fail and node restart"){
TC_PROPERTY("OrderedIndex", 1);
TC_PROPERTY("LoggedIndexes", (unsigned)0);
+ TC_PROPERTY("PauseThreads", 4);
INITIALIZER(runClearTable);
INITIALIZER(createRandomIndex);
INITIALIZER(createPkIndex);
diff --git a/ndb/test/ndbapi/testLcp.cpp b/ndb/test/ndbapi/testLcp.cpp
index d11692db761..8bfc7ccf9b9 100644
--- a/ndb/test/ndbapi/testLcp.cpp
+++ b/ndb/test/ndbapi/testLcp.cpp
@@ -30,6 +30,7 @@ static CASE g_ops[] =
const size_t OP_COUNT = (sizeof(g_ops)/sizeof(g_ops[0]));
static Ndb* g_ndb = 0;
+static Ndb_cluster_connection *g_cluster_connection= 0;
static CASE* g_cases;
static HugoOperations* g_hugo_ops;
@@ -122,6 +123,7 @@ main(int argc, char ** argv){
static int init_ndb(int argc, char** argv)
{
+ ndb_init();
return 0;
}
@@ -132,7 +134,13 @@ static int parse_args(int argc, char** argv)
static int connect_ndb()
{
- g_ndb = new Ndb("TEST_DB");
+ g_cluster_connection = new Ndb_cluster_connection();
+ if(g_cluster_connection->connect(12, 5, 1) != 0)
+ {
+ return 1;
+ }
+
+ g_ndb = new Ndb(g_cluster_connection, "TEST_DB");
g_ndb->init();
if(g_ndb->waitUntilReady(30) == 0){
int args[] = { DumpStateOrd::DihMaxTimeBetweenLCP };
@@ -144,8 +152,10 @@ static int connect_ndb()
static int disconnect_ndb()
{
delete g_ndb;
+ delete g_cluster_connection;
g_ndb = 0;
g_table = 0;
+ g_cluster_connection= 0;
return 0;
}
diff --git a/ndb/test/ndbapi/testNdbApi.cpp b/ndb/test/ndbapi/testNdbApi.cpp
index 3a06269f8dc..f456d852898 100644
--- a/ndb/test/ndbapi/testNdbApi.cpp
+++ b/ndb/test/ndbapi/testNdbApi.cpp
@@ -56,7 +56,7 @@ int runTestMaxNdb(NDBT_Context* ctx, NDBT_Step* step){
int init = 0;
do {
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
errors++;
@@ -108,7 +108,7 @@ int runTestMaxTransaction(NDBT_Context* ctx, NDBT_Step* step){
int oldi = 0;
int result = NDBT_OK;
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -119,6 +119,9 @@ int runTestMaxTransaction(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
+ const NdbDictionary::Table* pTab = ctx->getTab();
+ if (pTab == 0) abort();
+
while (l < loops && result == NDBT_OK){
int errors = 0;
int maxErrors = 5;
@@ -131,39 +134,25 @@ int runTestMaxTransaction(NDBT_Context* ctx, NDBT_Step* step){
NdbConnection* pCon;
- int type = i%4;
+ int type = i%2;
switch (type){
case 0:
pCon = pNdb->startTransaction();
break;
case 1:
- pCon = pNdb->startTransaction(2,
- "DATA",
- 4);
- break;
- case 2:
- ndbout_c("startTransactionDGroup not supported");
- abort();
- /*
- pCon = pNdb->startTransactionDGroup(1,
- "TEST",
- 0);
- */
- break;
- case 3:
- ndbout_c("startTransactionDGroup not supported");
- abort();
- /*
- pCon = pNdb->startTransactionDGroup(2,
- "TEST",
- 1);
- */
- break;
-
+ {
+ BaseString key;
+ key.appfmt("DATA-%d", i);
+ ndbout_c("%s", key.c_str());
+ pCon = pNdb->startTransaction(pTab,
+ key.c_str(),
+ key.length());
+ }
+ break;
default:
abort();
}
-
+
if (pCon == NULL){
ERR(pNdb->getNdbError());
errors++;
@@ -209,7 +198,7 @@ int runTestMaxOperations(NDBT_Context* ctx, NDBT_Step* step){
int maxOpsLimit = 1;
const NdbDictionary::Table* pTab = ctx->getTab();
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -281,7 +270,7 @@ int runTestGetValue(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
const NdbDictionary::Table* pTab = ctx->getTab();
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -381,7 +370,7 @@ int runTestEqual(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
const NdbDictionary::Table* pTab = ctx->getTab();
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -502,7 +491,7 @@ int runTestDeleteNdb(NDBT_Context* ctx, NDBT_Step* step){
// Create 5 ndb objects
for( int i = 0; i < 5; i++){
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
result = NDBT_FAILED;
@@ -583,7 +572,7 @@ int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){
int runTestWaitUntilReady(NDBT_Context* ctx, NDBT_Step* step){
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
// Forget about calling pNdb->init();
@@ -604,7 +593,7 @@ int runTestWaitUntilReady(NDBT_Context* ctx, NDBT_Step* step){
int runGetNdbOperationNoTab(NDBT_Context* ctx, NDBT_Step* step){
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -645,7 +634,7 @@ int runMissingOperation(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table* pTab = ctx->getTab();
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -695,7 +684,7 @@ int runMissingOperation(NDBT_Context* ctx, NDBT_Step* step){
int runGetValueInUpdate(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table* pTab = ctx->getTab();
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -761,7 +750,7 @@ int runUpdateWithoutValues(NDBT_Context* ctx, NDBT_Step* step){
HugoOperations hugoOps(*pTab);
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -807,13 +796,13 @@ int runUpdateWithoutValues(NDBT_Context* ctx, NDBT_Step* step){
// Dont' call any setValues
- // Execute should not work
+ // Execute should work
int check = pCon->execute(Commit);
if (check == 0){
ndbout << "execute worked" << endl;
- result = NDBT_FAILED;
} else {
ERR(pCon->getNdbError());
+ result = NDBT_FAILED;
}
pNdb->closeTransaction(pCon);
@@ -827,7 +816,7 @@ int runUpdateWithoutKeys(NDBT_Context* ctx, NDBT_Step* step){
const NdbDictionary::Table* pTab = ctx->getTab();
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -953,8 +942,7 @@ int runReadWithoutGetValue(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
- NdbResultSet *rs;
- if ((rs = pOp->readTuples((NdbOperation::LockMode)lm)) == 0){
+ if ((pOp->readTuples((NdbOperation::LockMode)lm)) != 0){
pNdb->closeTransaction(pCon);
ERR(pOp->getNdbError());
return NDBT_FAILED;
@@ -973,7 +961,7 @@ int runReadWithoutGetValue(NDBT_Context* ctx, NDBT_Step* step){
}
int res;
- while((res = rs->nextResult()) == 0);
+ while((res = pOp->nextResult()) == 0);
pNdb->closeTransaction(pCon);
if(res != 1)
@@ -988,7 +976,7 @@ int runCheckGetNdbErrorOperation(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
const NdbDictionary::Table* pTab = ctx->getTab();
- Ndb* pNdb = new Ndb("TEST_DB");
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
if (pNdb == NULL){
ndbout << "pNdb == NULL" << endl;
return NDBT_FAILED;
@@ -1049,7 +1037,7 @@ int runCheckGetNdbErrorOperation(NDBT_Context* ctx, NDBT_Step* step){
return result;
}
-#define C2(x) { int _x= (x); if(_x == 0) return NDBT_FAILED; }
+#define C2(x) { int _x= (x); if(_x == 0){ ndbout << "line: " << __LINE__ << endl; return NDBT_FAILED;} }
int runBug_11133(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
@@ -1058,7 +1046,6 @@ int runBug_11133(NDBT_Context* ctx, NDBT_Step* step){
HugoOperations hugoOps(*pTab);
Ndb* pNdb = GETNDB(step);
-
C2(hugoOps.startTransaction(pNdb) == 0);
C2(hugoOps.pkInsertRecord(pNdb, 0, 1) == 0);
C2(hugoOps.execute_NoCommit(pNdb) == 0);
@@ -1101,7 +1088,7 @@ int runBug_11133(NDBT_Context* ctx, NDBT_Step* step){
C2(hugoOps.execute_Commit(pNdb) == 0);
C2(hugoOps.closeTransaction(pNdb) == 0);
- Ndb ndb2("TEST_DB");
+ Ndb ndb2(&ctx->m_cluster_connection, "TEST_DB");
C2(ndb2.init() == 0);
C2(ndb2.waitUntilReady() == 0);
HugoOperations hugoOps2(*pTab);
@@ -1110,8 +1097,8 @@ int runBug_11133(NDBT_Context* ctx, NDBT_Step* step){
C2(hugoOps.pkInsertRecord(pNdb, 0, 1) == 0);
C2(hugoOps.execute_NoCommit(pNdb) == 0);
C2(hugoOps2.startTransaction(&ndb2) == 0);
- C2(hugoOps2.pkWriteRecord(&ndb2, 0, 1) == 0);
- C2(hugoOps2.execute_async(&ndb2, NoCommit) == 0);
+ C2(hugoOps2.pkWritePartialRecord(&ndb2, 0) == 0);
+ C2(hugoOps2.execute_async(&ndb2, NdbTransaction::NoCommit) == 0);
C2(hugoOps.execute_Commit(pNdb) == 0);
C2(hugoOps2.wait_async(&ndb2) == 0);
C2(hugoOps.closeTransaction(pNdb) == 0);
@@ -1122,15 +1109,133 @@ int runBug_11133(NDBT_Context* ctx, NDBT_Step* step){
C2(hugoOps.execute_NoCommit(pNdb) == 0);
C2(hugoOps2.startTransaction(&ndb2) == 0);
C2(hugoOps2.pkWriteRecord(&ndb2, 0, 1) == 0);
- C2(hugoOps2.execute_async(&ndb2, NoCommit) == 0);
+ C2(hugoOps2.execute_async(&ndb2, NdbTransaction::NoCommit) == 0);
C2(hugoOps.execute_Commit(pNdb) == 0);
C2(hugoOps2.wait_async(&ndb2) == 0);
+ C2(hugoOps2.execute_Commit(pNdb) == 0);
C2(hugoOps.closeTransaction(pNdb) == 0);
C2(hugoOps2.closeTransaction(&ndb2) == 0);
+ C2(hugoOps.startTransaction(pNdb) == 0);
+ C2(hugoOps.pkUpdateRecord(pNdb, 0, 1) == 0);
+ C2(hugoOps.execute_NoCommit(pNdb) == 0);
+ C2(hugoOps2.startTransaction(&ndb2) == 0);
+ C2(hugoOps2.pkWritePartialRecord(&ndb2, 0) == 0);
+ C2(hugoOps2.execute_async(&ndb2, NdbTransaction::NoCommit) == 0);
+ C2(hugoOps.execute_Commit(pNdb) == 0);
+ C2(hugoOps2.wait_async(&ndb2) == 0);
+ C2(hugoOps.closeTransaction(pNdb) == 0);
+ C2(hugoOps2.closeTransaction(&ndb2) == 0);
+
+ C2(hugoOps.startTransaction(pNdb) == 0);
+ C2(hugoOps.pkDeleteRecord(pNdb, 0, 1) == 0);
+ C2(hugoOps.execute_NoCommit(pNdb) == 0);
+ C2(hugoOps2.startTransaction(&ndb2) == 0);
+ C2(hugoOps2.pkWritePartialRecord(&ndb2, 0) == 0);
+ C2(hugoOps2.execute_async(&ndb2, NdbTransaction::NoCommit) == 0);
+ C2(hugoOps.execute_Commit(pNdb) == 0);
+ C2(hugoOps2.wait_async(&ndb2) != 0);
+ C2(hugoOps.closeTransaction(pNdb) == 0);
+ C2(hugoOps2.closeTransaction(&ndb2) == 0);
+
+ return result;
+}
+
+int runScan_4006(NDBT_Context* ctx, NDBT_Step* step){
+ int result = NDBT_OK;
+ const Uint32 max= 5;
+ const NdbDictionary::Table* pTab = ctx->getTab();
+
+ Ndb* pNdb = new Ndb(&ctx->m_cluster_connection, "TEST_DB");
+ if (pNdb == NULL){
+ ndbout << "pNdb == NULL" << endl;
+ return NDBT_FAILED;
+ }
+ if (pNdb->init(max)){
+ ERR(pNdb->getNdbError());
+ delete pNdb;
+ return NDBT_FAILED;
+ }
+
+ NdbConnection* pCon = pNdb->startTransaction();
+ if (pCon == NULL){
+ pNdb->closeTransaction(pCon);
+ delete pNdb;
+ return NDBT_FAILED;
+ }
+
+ Uint32 i;
+ Vector<NdbScanOperation*> scans;
+ for(i = 0; i<10*max; i++)
+ {
+ NdbScanOperation* pOp = pCon->getNdbScanOperation(pTab->getName());
+ if (pOp == NULL){
+ ERR(pCon->getNdbError());
+ pNdb->closeTransaction(pCon);
+ delete pNdb;
+ return NDBT_FAILED;
+ }
+
+ if (pOp->readTuples() != 0){
+ pNdb->closeTransaction(pCon);
+ ERR(pOp->getNdbError());
+ delete pNdb;
+ return NDBT_FAILED;
+ }
+ scans.push_back(pOp);
+ }
+
+ // Dont' call any equal or setValues
+
+ // Execute should not work
+ int check = pCon->execute(NoCommit);
+ if (check == 0){
+ ndbout << "execute worked" << endl;
+ } else {
+ ERR(pCon->getNdbError());
+ }
+
+ for(i= 0; i<scans.size(); i++)
+ {
+ NdbScanOperation* pOp= scans[i];
+ while((check= pOp->nextResult()) == 0);
+ if(check != 1)
+ {
+ ERR(pOp->getNdbError());
+ pNdb->closeTransaction(pCon);
+ delete pNdb;
+ return NDBT_FAILED;
+ }
+ }
+
+ pNdb->closeTransaction(pCon);
+
+ Vector<NdbConnection*> cons;
+ for(i= 0; i<10*max; i++)
+ {
+ pCon= pNdb->startTransaction();
+ if(pCon)
+ cons.push_back(pCon);
+ else
+ break;
+ }
+
+ for(i= 0; i<cons.size(); i++)
+ {
+ cons[i]->close();
+ }
+
+ if(cons.size() != max)
+ {
+ result= NDBT_FAILED;
+ }
+
+ delete pNdb;
+
return result;
}
+template class Vector<NdbScanOperation*>;
NDBT_TESTSUITE(testNdbApi);
@@ -1212,6 +1317,12 @@ TESTCASE("Bug_11133",
INITIALIZER(runBug_11133);
FINALIZER(runClearTable);
}
+TESTCASE("Scan_4006",
+ "Check that getNdbScanOperation does not get 4006\n"){
+ INITIALIZER(runLoadTable);
+ INITIALIZER(runScan_4006);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testNdbApi);
int main(int argc, const char** argv){
diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp
index 9c25d715d07..6ef3da2d760 100644
--- a/ndb/test/ndbapi/testNodeRestart.cpp
+++ b/ndb/test/ndbapi/testNodeRestart.cpp
@@ -311,7 +311,7 @@ int runDirtyRead(NDBT_Context* ctx, NDBT_Step* step){
int id = i % restarter.getNumDbNodes();
int nodeId = restarter.getDbNodeId(id);
ndbout << "Restart node " << nodeId << endl;
- restarter.insertErrorInAllNodes(5041);
+ restarter.insertErrorInNode(nodeId, 5041);
restarter.insertErrorInAllNodes(8048 + (i & 1));
for(int j = 0; j<records; j++){
diff --git a/ndb/test/ndbapi/testOIBasic.cpp b/ndb/test/ndbapi/testOIBasic.cpp
index 30a76da306a..942ee2ec966 100644
--- a/ndb/test/ndbapi/testOIBasic.cpp
+++ b/ndb/test/ndbapi/testOIBasic.cpp
@@ -28,7 +28,10 @@
#include <NdbCondition.h>
#include <NdbThread.h>
#include <NdbTick.h>
+#include <NdbSleep.h>
#include <my_sys.h>
+#include <NdbSqlUtil.hpp>
+#include <ndb_version.h>
// options
@@ -37,6 +40,7 @@ struct Opt {
unsigned m_batch;
const char* m_bound;
const char* m_case;
+ bool m_collsp;
bool m_core;
const char* m_csname;
CHARSET_INFO* m_cs;
@@ -52,20 +56,20 @@ struct Opt {
unsigned m_pctnull;
unsigned m_rows;
unsigned m_samples;
- unsigned m_scanbat;
unsigned m_scanpar;
unsigned m_scanstop;
- unsigned m_seed;
+ int m_seed;
unsigned m_subloop;
const char* m_table;
unsigned m_threads;
- int m_v;
+ int m_v; // int for lint
Opt() :
m_batch(32),
m_bound("01234"),
m_case(0),
+ m_collsp(false),
m_core(false),
- m_csname("latin1_bin"),
+ m_csname("random"),
m_cs(0),
m_die(0),
m_dups(false),
@@ -79,13 +83,12 @@ struct Opt {
m_pctnull(10),
m_rows(1000),
m_samples(0),
- m_scanbat(0),
m_scanpar(0),
m_scanstop(0),
- m_seed(0),
+ m_seed(-1),
m_subloop(4),
m_table(0),
- m_threads(10),
+ m_threads(4),
m_v(1) {
}
};
@@ -104,23 +107,23 @@ printhelp()
<< " -batch N pk operations in batch [" << d.m_batch << "]" << endl
<< " -bound xyz use only these bound types 0-4 [" << d.m_bound << "]" << endl
<< " -case abc only given test cases (letters a-z)" << endl
+ << " -collsp use strnncollsp instead of strnxfrm" << endl
<< " -core core dump on error [" << d.m_core << "]" << endl
- << " -csname S charset (collation) of non-pk char column [" << d.m_csname << "]" << endl
+ << " -csname S charset or collation [" << d.m_csname << "]" << endl
<< " -die nnn exit immediately on NDB error code nnn" << endl
<< " -dups allow duplicate tuples from index scan [" << d.m_dups << "]" << endl
<< " -fragtype T fragment type single/small/medium/large" << endl
- << " -index xyz only given index numbers (digits 1-9)" << endl
+ << " -index xyz only given index numbers (digits 0-9)" << endl
<< " -loop N loop count full suite 0=forever [" << d.m_loop << "]" << endl
<< " -nologging create tables in no-logging mode" << endl
<< " -noverify skip index verifications" << endl
<< " -pctnull N pct NULL values in nullable column [" << d.m_pctnull << "]" << endl
<< " -rows N rows per thread [" << d.m_rows << "]" << endl
<< " -samples N samples for some timings (0=all) [" << d.m_samples << "]" << endl
- << " -scanbat N scan batch per fragment (ignored by ndb api) [" << d.m_scanbat << "]" << endl
<< " -scanpar N scan parallelism [" << d.m_scanpar << "]" << endl
- << " -seed N srandom seed 0=loop number[" << d.m_seed << "]" << endl
+ << " -seed N srandom seed 0=loop number -1=random [" << d.m_seed << "]" << endl
<< " -subloop N subtest loop count [" << d.m_subloop << "]" << endl
- << " -table xyz only given table numbers (digits 1-9)" << endl
+ << " -table xyz only given table numbers (digits 0-9)" << endl
<< " -threads N number of threads [" << d.m_threads << "]" << endl
<< " -vN verbosity [" << d.m_v << "]" << endl
<< " -h or -help print this help text" << endl
@@ -135,9 +138,84 @@ static const bool g_store_null_key = true;
// compare NULL like normal value (NULL < not NULL, NULL == NULL)
static const bool g_compare_null = true;
+static const char* hexstr = "0123456789abcdef";
+
+// random ints
+
+static unsigned
+urandom(unsigned n)
+{
+ if (n == 0)
+ return 0;
+ unsigned i = random() % n;
+ return i;
+}
+
+static int
+irandom(unsigned n)
+{
+ if (n == 0)
+ return 0;
+ int i = random() % n;
+ if (random() & 0x1)
+ i = -i;
+ return i;
+}
+
+static bool
+randompct(unsigned pct)
+{
+ if (pct == 0)
+ return false;
+ if (pct >= 100)
+ return true;
+ return urandom(100) < pct;
+}
+
+static unsigned
+random_coprime(unsigned n)
+{
+ unsigned prime[] = { 101, 211, 307, 401, 503, 601, 701, 809, 907 };
+ unsigned count = sizeof(prime) / sizeof(prime[0]);
+ if (n == 0)
+ return 0;
+ while (1) {
+ unsigned i = urandom(count);
+ if (n % prime[i] != 0)
+ return prime[i];
+ }
+}
+
+// random re-sequence of 0...(n-1)
+
+struct Rsq {
+ Rsq(unsigned n);
+ unsigned next();
+private:
+ unsigned m_n;
+ unsigned m_i;
+ unsigned m_start;
+ unsigned m_prime;
+};
+
+Rsq::Rsq(unsigned n)
+{
+ m_n = n;
+ m_i = 0;
+ m_start = urandom(n);
+ m_prime = random_coprime(n);
+}
+
+unsigned
+Rsq::next()
+{
+ assert(m_n != 0);
+ return (m_start + m_i++ * m_prime) % m_n;
+}
+
// log and error macros
-static NdbMutex *ndbout_mutex= NULL;
+static NdbMutex *ndbout_mutex = NULL;
static unsigned getthrno();
@@ -198,7 +276,7 @@ getthrstr()
return -1; \
} while (0)
-// method parameters base class
+// method parameters
class Thr;
class Con;
@@ -222,6 +300,8 @@ struct Par : public Opt {
// value calculation
unsigned m_range;
unsigned m_pctrange;
+ unsigned m_pctbrange;
+ int m_bdir;
// choice of key
bool m_randomkey;
// do verify after read
@@ -230,6 +310,11 @@ struct Par : public Opt {
bool m_deadlock;
// abort percentabge
unsigned m_abortpct;
+ NdbOperation::LockMode m_lockmode;
+ // scan options
+ bool m_tupscan;
+ bool m_ordered;
+ bool m_descending;
// timer location
Par(const Opt& opt) :
Opt(opt),
@@ -242,24 +327,30 @@ struct Par : public Opt {
m_slno(0),
m_totrows(m_threads * m_rows),
m_range(m_rows),
- m_pctrange(0),
+ m_pctrange(40),
+ m_pctbrange(80),
+ m_bdir(0),
m_randomkey(false),
m_verify(false),
m_deadlock(false),
- m_abortpct(0) {
+ m_abortpct(0),
+ m_lockmode(NdbOperation::LM_Read),
+ m_tupscan(false),
+ m_ordered(false),
+ m_descending(false) {
}
};
static bool
-usetable(unsigned i)
+usetable(Par par, unsigned i)
{
- return g_opt.m_table == 0 || strchr(g_opt.m_table, '1' + i) != 0;
+ return par.m_table == 0 || strchr(par.m_table, '0' + i) != 0;
}
static bool
-useindex(unsigned i)
+useindex(Par par, unsigned i)
{
- return g_opt.m_index == 0 || strchr(g_opt.m_index, '1' + i) != 0;
+ return par.m_index == 0 || strchr(par.m_index, '0' + i) != 0;
}
static unsigned
@@ -385,38 +476,285 @@ Lst::reset()
m_cnt = 0;
}
+// character sets
+
+static const unsigned maxcsnumber = 512;
+static const unsigned maxcharcount = 32;
+static const unsigned maxcharsize = 4;
+static const unsigned maxxmulsize = 8;
+
+// single mb char
+struct Chr {
+ unsigned char m_bytes[maxcharsize];
+ unsigned char m_xbytes[maxxmulsize * maxcharsize];
+ unsigned m_size;
+ Chr();
+};
+
+Chr::Chr()
+{
+ memset(m_bytes, 0, sizeof(m_bytes));
+ memset(m_xbytes, 0, sizeof(m_xbytes));
+ m_size = 0;
+}
+
+// charset and random valid chars to use
+struct Chs {
+ CHARSET_INFO* m_cs;
+ unsigned m_xmul;
+ Chr* m_chr;
+ Chs(CHARSET_INFO* cs);
+ ~Chs();
+};
+
+static NdbOut&
+operator<<(NdbOut& out, const Chs& chs);
+
+Chs::Chs(CHARSET_INFO* cs) :
+ m_cs(cs)
+{
+ m_xmul = m_cs->strxfrm_multiply;
+ if (m_xmul == 0)
+ m_xmul = 1;
+ assert(m_xmul <= maxxmulsize);
+ m_chr = new Chr [maxcharcount];
+ unsigned i = 0;
+ unsigned miss1 = 0;
+ unsigned miss2 = 0;
+ unsigned miss3 = 0;
+ unsigned miss4 = 0;
+ while (i < maxcharcount) {
+ unsigned char* bytes = m_chr[i].m_bytes;
+ unsigned char* xbytes = m_chr[i].m_xbytes;
+ unsigned& size = m_chr[i].m_size;
+ bool ok;
+ size = m_cs->mbminlen + urandom(m_cs->mbmaxlen - m_cs->mbminlen + 1);
+ assert(m_cs->mbminlen <= size && size <= m_cs->mbmaxlen);
+ // prefer longer chars
+ if (size == m_cs->mbminlen && m_cs->mbminlen < m_cs->mbmaxlen && urandom(5) != 0)
+ continue;
+ for (unsigned j = 0; j < size; j++) {
+ bytes[j] = urandom(256);
+ }
+ int not_used;
+ // check wellformed
+ const char* sbytes = (const char*)bytes;
+ if ((*cs->cset->well_formed_len)(cs, sbytes, sbytes + size, 1, &not_used) != size) {
+ miss1++;
+ continue;
+ }
+ // check no proper prefix wellformed
+ ok = true;
+ for (unsigned j = 1; j < size; j++) {
+ if ((*cs->cset->well_formed_len)(cs, sbytes, sbytes + j, 1, &not_used) == j) {
+ ok = false;
+ break;
+ }
+ }
+ if (! ok) {
+ miss2++;
+ continue;
+ }
+ // normalize
+ memset(xbytes, 0, sizeof(xbytes));
+ // currently returns buffer size always
+ int xlen = (*cs->coll->strnxfrm)(cs, xbytes, m_xmul * size, bytes, size);
+ // check we got something
+ ok = false;
+ for (unsigned j = 0; j < xlen; j++) {
+ if (xbytes[j] != 0) {
+ ok = true;
+ break;
+ }
+ }
+ if (! ok) {
+ miss3++;
+ continue;
+ }
+ // check for duplicate (before normalize)
+ ok = true;
+ for (unsigned j = 0; j < i; j++) {
+ const Chr& chr = m_chr[j];
+ if (chr.m_size == size && memcmp(chr.m_bytes, bytes, size) == 0) {
+ ok = false;
+ break;
+ }
+ }
+ if (! ok) {
+ miss4++;
+ continue;
+ }
+ i++;
+ }
+ bool disorder = true;
+ unsigned bubbles = 0;
+ while (disorder) {
+ disorder = false;
+ for (unsigned i = 1; i < maxcharcount; i++) {
+ unsigned len = sizeof(m_chr[i].m_xbytes);
+ if (memcmp(m_chr[i-1].m_xbytes, m_chr[i].m_xbytes, len) > 0) {
+ Chr chr = m_chr[i];
+ m_chr[i] = m_chr[i-1];
+ m_chr[i-1] = chr;
+ disorder = true;
+ bubbles++;
+ }
+ }
+ }
+ LL3("inited charset " << *this << " miss=" << miss1 << "," << miss2 << "," << miss3 << "," << miss4 << " bubbles=" << bubbles);
+}
+
+Chs::~Chs()
+{
+ delete [] m_chr;
+}
+
+static NdbOut&
+operator<<(NdbOut& out, const Chs& chs)
+{
+ CHARSET_INFO* cs = chs.m_cs;
+ out << cs->name << "[" << cs->mbminlen << "-" << cs->mbmaxlen << "," << chs.m_xmul << "]";
+ return out;
+}
+
+static Chs* cslist[maxcsnumber];
+
+static void
+resetcslist()
+{
+ for (unsigned i = 0; i < maxcsnumber; i++) {
+ delete cslist[i];
+ cslist[i] = 0;
+ }
+}
+
+static Chs*
+getcs(Par par)
+{
+ CHARSET_INFO* cs;
+ if (par.m_cs != 0) {
+ cs = par.m_cs;
+ } else {
+ while (1) {
+ unsigned n = urandom(maxcsnumber);
+ cs = get_charset(n, MYF(0));
+ if (cs != 0) {
+ // prefer complex charsets
+ if (cs->mbmaxlen != 1 || urandom(5) == 0)
+ break;
+ }
+ }
+ }
+ if (cslist[cs->number] == 0)
+ cslist[cs->number] = new Chs(cs);
+ return cslist[cs->number];
+}
+
// tables and indexes
// Col - table column
struct Col {
+ enum Type {
+ Unsigned = NdbDictionary::Column::Unsigned,
+ Char = NdbDictionary::Column::Char,
+ Varchar = NdbDictionary::Column::Varchar,
+ Longvarchar = NdbDictionary::Column::Longvarchar
+ };
+ const class Tab& m_tab;
unsigned m_num;
const char* m_name;
bool m_pk;
- NdbDictionary::Column::Type m_type;
+ Type m_type;
unsigned m_length;
+ unsigned m_bytelength; // multiplied by char width
+ unsigned m_attrsize; // base type size
+ unsigned m_headsize; // length bytes
+ unsigned m_bytesize; // full value size
bool m_nullable;
- void verify(const void* addr) const;
+ const Chs* m_chs;
+ Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs);
+ ~Col();
+ bool equal(const Col& col2) const;
+ void wellformed(const void* addr) const;
};
+Col::Col(const class Tab& tab, unsigned num, const char* name, bool pk, Type type, unsigned length, bool nullable, const Chs* chs) :
+ m_tab(tab),
+ m_num(num),
+ m_name(strcpy(new char [strlen(name) + 1], name)),
+ m_pk(pk),
+ m_type(type),
+ m_length(length),
+ m_bytelength(length * (chs == 0 ? 1 : chs->m_cs->mbmaxlen)),
+ m_attrsize(
+ type == Unsigned ? sizeof(Uint32) :
+ type == Char ? sizeof(char) :
+ type == Varchar ? sizeof(char) :
+ type == Longvarchar ? sizeof(char) : ~0),
+ m_headsize(
+ type == Unsigned ? 0 :
+ type == Char ? 0 :
+ type == Varchar ? 1 :
+ type == Longvarchar ? 2 : ~0),
+ m_bytesize(m_headsize + m_attrsize * m_bytelength),
+ m_nullable(nullable),
+ m_chs(chs)
+{
+ // fix long varchar
+ if (type == Varchar && m_bytelength > 255) {
+ m_type = Longvarchar;
+ m_headsize += 1;
+ m_bytesize += 1;
+ }
+}
+
+Col::~Col()
+{
+ delete [] m_name;
+}
+
+bool
+Col::equal(const Col& col2) const
+{
+ return m_type == col2.m_type && m_length == col2.m_length && m_chs == col2.m_chs;
+}
+
void
-Col::verify(const void* addr) const
+Col::wellformed(const void* addr) const
{
switch (m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
break;
- case NdbDictionary::Column::Varchar:
+ case Col::Char:
{
- const unsigned char* p = (const unsigned char*)addr;
- unsigned n = (p[0] << 8) | p[1];
- assert(n <= m_length);
- unsigned i;
- for (i = 0; i < n; i++) {
- assert(p[2 + i] != 0);
- }
- for (i = n; i < m_length; i++) {
- assert(p[2 + i] == 0);
- }
+ CHARSET_INFO* cs = m_chs->m_cs;
+ const char* src = (const char*)addr;
+ unsigned len = m_bytelength;
+ int not_used;
+ assert((*cs->cset->well_formed_len)(cs, src, src + len, 0xffff, &not_used) == len);
+ }
+ break;
+ case Col::Varchar:
+ {
+ CHARSET_INFO* cs = m_chs->m_cs;
+ const unsigned char* src = (const unsigned char*)addr;
+ const char* ssrc = (const char*)src;
+ unsigned len = src[0];
+ int not_used;
+ assert(len <= m_bytelength);
+ assert((*cs->cset->well_formed_len)(cs, ssrc + 1, ssrc + 1 + len, 0xffff, &not_used) == len);
+ }
+ break;
+ case Col::Longvarchar:
+ {
+ CHARSET_INFO* cs = m_chs->m_cs;
+ const unsigned char* src = (const unsigned char*)addr;
+ const char* ssrc = (const char*)src;
+ unsigned len = src[0] + (src[1] << 8);
+ int not_used;
+ assert(len <= m_bytelength);
+ assert((*cs->cset->well_formed_len)(cs, ssrc + 2, ssrc + 2 + len, 0xffff, &not_used) == len);
}
break;
default:
@@ -428,14 +766,28 @@ Col::verify(const void* addr) const
static NdbOut&
operator<<(NdbOut& out, const Col& col)
{
- out << "col " << col.m_num;
- out << " " << col.m_name;
+ out << "col[" << col.m_num << "] " << col.m_name;
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
out << " unsigned";
break;
- case NdbDictionary::Column::Varchar:
- out << " varchar(" << col.m_length << ")";
+ case Col::Char:
+ {
+ CHARSET_INFO* cs = col.m_chs->m_cs;
+ out << " char(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")";
+ }
+ break;
+ case Col::Varchar:
+ {
+ CHARSET_INFO* cs = col.m_chs->m_cs;
+ out << " varchar(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")";
+ }
+ break;
+ case Col::Longvarchar:
+ {
+ CHARSET_INFO* cs = col.m_chs->m_cs;
+ out << " longvarchar(" << col.m_length << "*" << cs->mbmaxlen << ";" << cs->name << ")";
+ }
break;
default:
out << "type" << (int)col.m_type;
@@ -450,25 +802,84 @@ operator<<(NdbOut& out, const Col& col)
// ICol - index column
struct ICol {
+ const class ITab& m_itab;
unsigned m_num;
- struct Col m_col;
+ const Col& m_col;
+ ICol(const class ITab& itab, unsigned num, const Col& col);
+ ~ICol();
};
+ICol::ICol(const class ITab& itab, unsigned num, const Col& col) :
+ m_itab(itab),
+ m_num(num),
+ m_col(col)
+{
+}
+
+ICol::~ICol()
+{
+}
+
+static NdbOut&
+operator<<(NdbOut& out, const ICol& icol)
+{
+ out << "icol[" << icol.m_num << "] " << icol.m_col;
+ return out;
+}
+
// ITab - index
struct ITab {
+ enum Type {
+ OrderedIndex = NdbDictionary::Index::OrderedIndex,
+ UniqueHashIndex = NdbDictionary::Index::UniqueHashIndex
+ };
+ const class Tab& m_tab;
const char* m_name;
+ Type m_type;
unsigned m_icols;
- const ICol* m_icol;
+ const ICol** m_icol;
+ unsigned m_colmask;
+ ITab(const class Tab& tab, const char* name, Type type, unsigned icols);
+ ~ITab();
+ void icoladd(unsigned k, const ICol* icolptr);
};
+ITab::ITab(const class Tab& tab, const char* name, Type type, unsigned icols) :
+ m_tab(tab),
+ m_name(strcpy(new char [strlen(name) + 1], name)),
+ m_type(type),
+ m_icols(icols),
+ m_icol(new const ICol* [icols + 1]),
+ m_colmask(0)
+{
+ for (unsigned k = 0; k <= m_icols; k++)
+ m_icol[k] = 0;
+}
+
+ITab::~ITab()
+{
+ delete [] m_name;
+ for (unsigned i = 0; i < m_icols; i++)
+ delete m_icol[i];
+ delete [] m_icol;
+}
+
+void
+ITab::icoladd(unsigned k, const ICol* icolptr)
+{
+ assert(k == icolptr->m_num && k < m_icols && m_icol[k] == 0);
+ m_icol[k] = icolptr;
+ m_colmask |= (1 << icolptr->m_col.m_num);
+}
+
static NdbOut&
operator<<(NdbOut& out, const ITab& itab)
{
- out << "itab " << itab.m_name << " " << itab.m_icols;
+ out << "itab " << itab.m_name << " icols=" << itab.m_icols;
for (unsigned k = 0; k < itab.m_icols; k++) {
- out << endl;
- out << "icol " << k << " " << itab.m_icol[k].m_col;
+ const ICol& icol = *itab.m_icol[k];
+ out << endl << icol;
}
return out;
}
@@ -478,200 +889,298 @@ operator<<(NdbOut& out, const ITab& itab)
struct Tab {
const char* m_name;
unsigned m_cols;
- const Col* m_col;
+ const Col** m_col;
unsigned m_itabs;
- const ITab* m_itab;
+ const ITab** m_itab;
+ // pk must contain an Unsigned column
+ unsigned m_keycol;
+ void coladd(unsigned k, Col* colptr);
+ void itabadd(unsigned j, ITab* itab);
+ Tab(const char* name, unsigned cols, unsigned itabs, unsigned keycol);
+ ~Tab();
};
+Tab::Tab(const char* name, unsigned cols, unsigned itabs, unsigned keycol) :
+ m_name(strcpy(new char [strlen(name) + 1], name)),
+ m_cols(cols),
+ m_col(new const Col* [cols + 1]),
+ m_itabs(itabs),
+ m_itab(new const ITab* [itabs + 1]),
+ m_keycol(keycol)
+{
+ for (unsigned k = 0; k <= cols; k++)
+ m_col[k] = 0;
+ for (unsigned j = 0; j <= itabs; j++)
+ m_itab[j] = 0;
+}
+
+Tab::~Tab()
+{
+ delete [] m_name;
+ for (unsigned i = 0; i < m_cols; i++)
+ delete m_col[i];
+ delete [] m_col;
+ for (unsigned i = 0; i < m_itabs; i++)
+ delete m_itab[i];
+ delete [] m_itab;
+}
+
+void
+Tab::coladd(unsigned k, Col* colptr)
+{
+ assert(k == colptr->m_num && k < m_cols && m_col[k] == 0);
+ m_col[k] = colptr;
+}
+
+void
+Tab::itabadd(unsigned j, ITab* itabptr)
+{
+ assert(j < m_itabs && m_itab[j] == 0);
+ m_itab[j] = itabptr;
+}
+
static NdbOut&
operator<<(NdbOut& out, const Tab& tab)
{
- out << "tab " << tab.m_name << " " << tab.m_cols;
+ out << "tab " << tab.m_name << " cols=" << tab.m_cols;
for (unsigned k = 0; k < tab.m_cols; k++) {
- out << endl;
- out << tab.m_col[k];
+ const Col& col = *tab.m_col[k];
+ out << endl << col;
}
for (unsigned i = 0; i < tab.m_itabs; i++) {
- if (! useindex(i))
+ if (tab.m_itab[i] == 0)
continue;
- out << endl;
- out << tab.m_itab[i];
+ const ITab& itab = *tab.m_itab[i];
+ out << endl << itab;
}
return out;
}
-// tt1 + tt1x1 tt1x2 tt1x3 tt1x4 tt1x5
-
-static const Col
-tt1col[] = {
- { 0, "A", 1, NdbDictionary::Column::Unsigned, 1, 0 },
- { 1, "B", 0, NdbDictionary::Column::Unsigned, 1, 1 },
- { 2, "C", 0, NdbDictionary::Column::Unsigned, 1, 1 },
- { 3, "D", 0, NdbDictionary::Column::Unsigned, 1, 1 },
- { 4, "E", 0, NdbDictionary::Column::Unsigned, 1, 1 }
-};
-
-static const ICol
-tt1x1col[] = {
- { 0, tt1col[0] }
-};
-
-static const ICol
-tt1x2col[] = {
- { 0, tt1col[1] }
-};
-
-static const ICol
-tt1x3col[] = {
- { 0, tt1col[1] },
- { 1, tt1col[2] }
-};
-
-static const ICol
-tt1x4col[] = {
- { 0, tt1col[3] },
- { 1, tt1col[2] },
- { 2, tt1col[1] }
-};
-
-static const ICol
-tt1x5col[] = {
- { 0, tt1col[1] },
- { 1, tt1col[4] },
- { 2, tt1col[2] },
- { 3, tt1col[3] }
-};
-
-static const ITab
-tt1x1 = {
- "TT1X1", 1, tt1x1col
-};
-
-static const ITab
-tt1x2 = {
- "TT1X2", 1, tt1x2col
-};
-
-static const ITab
-tt1x3 = {
- "TT1X3", 2, tt1x3col
-};
-
-static const ITab
-tt1x4 = {
- "TT1X4", 3, tt1x4col
-};
-
-static const ITab
-tt1x5 = {
- "TT1X5", 4, tt1x5col
-};
-
-static const ITab
-tt1itab[] = {
- tt1x1,
- tt1x2,
- tt1x3,
- tt1x4,
- tt1x5
-};
-
-static const Tab
-tt1 = {
- "TT1", 5, tt1col, 5, tt1itab
-};
-
-// tt2 + tt2x1 tt2x2 tt2x3 tt2x4 tt2x5
-
-static const Col
-tt2col[] = {
- { 0, "A", 1, NdbDictionary::Column::Unsigned, 1, 0 },
- { 1, "B", 0, NdbDictionary::Column::Unsigned, 1, 1 },
- { 2, "C", 0, NdbDictionary::Column::Varchar, 20, 1 },
- { 3, "D", 0, NdbDictionary::Column::Varchar, 5, 1 },
- { 4, "E", 0, NdbDictionary::Column::Varchar, 5, 1 }
-};
-
-static const ICol
-tt2x1col[] = {
- { 0, tt2col[0] }
-};
-
-static const ICol
-tt2x2col[] = {
- { 0, tt2col[1] },
- { 1, tt2col[2] }
-};
-
-static const ICol
-tt2x3col[] = {
- { 0, tt2col[2] },
- { 1, tt2col[1] }
-};
-
-static const ICol
-tt2x4col[] = {
- { 0, tt2col[3] },
- { 1, tt2col[4] }
-};
-
-static const ICol
-tt2x5col[] = {
- { 0, tt2col[4] },
- { 1, tt2col[3] },
- { 2, tt2col[2] },
- { 3, tt2col[1] }
-};
-
-static const ITab
-tt2x1 = {
- "TT2X1", 1, tt2x1col
-};
-
-static const ITab
-tt2x2 = {
- "TT2X2", 2, tt2x2col
-};
-
-static const ITab
-tt2x3 = {
- "TT2X3", 2, tt2x3col
-};
-
-static const ITab
-tt2x4 = {
- "TT2X4", 2, tt2x4col
-};
-
-static const ITab
-tt2x5 = {
- "TT2X5", 4, tt2x5col
-};
-
-static const ITab
-tt2itab[] = {
- tt2x1,
- tt2x2,
- tt2x3,
- tt2x4,
- tt2x5
-};
-
-static const Tab
-tt2 = {
- "TT2", 5, tt2col, 5, tt2itab
-};
+// make table structs
-// all tables
+static const Tab** tablist = 0;
+static unsigned tabcount = 0;
-static const Tab
-tablist[] = {
- tt1,
- tt2
-};
+static void
+verifytables()
+{
+ for (unsigned j = 0; j < tabcount; j++) {
+ const Tab* t = tablist[j];
+ if (t == 0)
+ continue;
+ assert(t->m_cols != 0 && t->m_col != 0);
+ for (unsigned k = 0; k < t->m_cols; k++) {
+ const Col* c = t->m_col[k];
+ assert(c != 0 && c->m_num == k);
+ assert(! (c->m_pk && c->m_nullable));
+ }
+ assert(t->m_col[t->m_cols] == 0);
+ {
+ assert(t->m_keycol < t->m_cols);
+ const Col* c = t->m_col[t->m_keycol];
+ assert(c->m_pk && c->m_type == Col::Unsigned);
+ }
+ assert(t->m_itabs != 0 && t->m_itab != 0);
+ for (unsigned i = 0; i < t->m_itabs; i++) {
+ const ITab* x = t->m_itab[i];
+ if (x == 0)
+ continue;
+ assert(x != 0 && x->m_icols != 0 && x->m_icol != 0);
+ for (unsigned k = 0; k < x->m_icols; k++) {
+ const ICol* c = x->m_icol[k];
+ assert(c != 0 && c->m_num == k && c->m_col.m_num < t->m_cols);
+ if (x->m_type == ITab::UniqueHashIndex) {
+ assert(! c->m_col.m_nullable);
+ }
+ }
+ }
+ assert(t->m_itab[t->m_itabs] == 0);
+ }
+}
-static const unsigned
-tabcount = sizeof(tablist) / sizeof(tablist[0]);
+static void
+makebuiltintables(Par par)
+{
+ LL2("makebuiltintables");
+ resetcslist();
+ tabcount = 3;
+ if (tablist == 0) {
+ tablist = new const Tab* [tabcount];
+ for (unsigned j = 0; j < tabcount; j++) {
+ tablist[j] = 0;
+ }
+ } else {
+ for (unsigned j = 0; j < tabcount; j++) {
+ delete tablist[j];
+ tablist[j] = 0;
+ }
+ }
+ // ti0 - basic
+ if (usetable(par, 0)) {
+ Tab* t = new Tab("ti0", 5, 7, 0);
+ // name - pk - type - length - nullable - cs
+ t->coladd(0, new Col(*t, 0, "a", 1, Col::Unsigned, 1, 0, 0));
+ t->coladd(1, new Col(*t, 1, "b", 0, Col::Unsigned, 1, 1, 0));
+ t->coladd(2, new Col(*t, 2, "c", 0, Col::Unsigned, 1, 0, 0));
+ t->coladd(3, new Col(*t, 3, "d", 0, Col::Unsigned, 1, 1, 0));
+ t->coladd(4, new Col(*t, 4, "e", 0, Col::Unsigned, 1, 0, 0));
+ if (useindex(par, 0)) {
+ // a
+ ITab* x = new ITab(*t, "ti0x0", ITab::OrderedIndex, 1);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[0]));
+ t->itabadd(0, x);
+ }
+ if (useindex(par, 1)) {
+ // b
+ ITab* x = new ITab(*t, "ti0x1", ITab::OrderedIndex, 1);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[1]));
+ t->itabadd(1, x);
+ }
+ if (useindex(par, 2)) {
+ // b, c
+ ITab* x = new ITab(*t, "ti0x2", ITab::OrderedIndex, 2);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[1]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[2]));
+ t->itabadd(2, x);
+ }
+ if (useindex(par, 3)) {
+ // b, e, c, d
+ ITab* x = new ITab(*t, "ti0x3", ITab::OrderedIndex, 4);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[1]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[4]));
+ x->icoladd(2, new ICol(*x, 2, *t->m_col[2]));
+ x->icoladd(3, new ICol(*x, 3, *t->m_col[3]));
+ t->itabadd(3, x);
+ }
+ if (useindex(par, 4)) {
+ // a, c
+ ITab* x = new ITab(*t, "ti0z4", ITab::UniqueHashIndex, 2);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[0]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[2]));
+ t->itabadd(4, x);
+ }
+ if (useindex(par, 5)) {
+ // a, e
+ ITab* x = new ITab(*t, "ti0z5", ITab::UniqueHashIndex, 2);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[0]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[4]));
+ t->itabadd(5, x);
+ }
+ tablist[0] = t;
+ }
+ // ti1 - simple char fields
+ if (usetable(par, 1)) {
+ Tab* t = new Tab("ti1", 5, 7, 1);
+ // name - pk - type - length - nullable - cs
+ t->coladd(0, new Col(*t, 0, "a", 0, Col::Unsigned, 1, 0, 0));
+ t->coladd(1, new Col(*t, 1, "b", 1, Col::Unsigned, 1, 0, 0));
+ t->coladd(2, new Col(*t, 2, "c", 0, Col::Varchar, 20, 0, getcs(par)));
+ t->coladd(3, new Col(*t, 3, "d", 0, Col::Char, 5, 0, getcs(par)));
+ t->coladd(4, new Col(*t, 4, "e", 0, Col::Longvarchar, 5, 1, getcs(par)));
+ if (useindex(par, 0)) {
+ // b
+ ITab* x = new ITab(*t, "ti1x0", ITab::OrderedIndex, 1);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[1]));
+ t->itabadd(0, x);
+ }
+ if (useindex(par, 1)) {
+ // c, a
+ ITab* x = new ITab(*t, "ti1x1", ITab::OrderedIndex, 2);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[2]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[0]));
+ t->itabadd(1, x);
+ }
+ if (useindex(par, 2)) {
+ // d
+ ITab* x = new ITab(*t, "ti1x2", ITab::OrderedIndex, 1);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[3]));
+ t->itabadd(2, x);
+ }
+ if (useindex(par, 3)) {
+ // e, d, c, b
+ ITab* x = new ITab(*t, "ti1x3", ITab::OrderedIndex, 4);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[4]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[3]));
+ x->icoladd(2, new ICol(*x, 2, *t->m_col[2]));
+ x->icoladd(3, new ICol(*x, 3, *t->m_col[1]));
+ t->itabadd(3, x);
+ }
+ if (useindex(par, 4)) {
+ // a, b
+ ITab* x = new ITab(*t, "ti1z4", ITab::UniqueHashIndex, 2);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[0]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[1]));
+ t->itabadd(4, x);
+ }
+ if (useindex(par, 5)) {
+ // b, c, d
+ ITab* x = new ITab(*t, "ti1z5", ITab::UniqueHashIndex, 3);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[1]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[2]));
+ x->icoladd(2, new ICol(*x, 2, *t->m_col[3]));
+ t->itabadd(5, x);
+ }
+ tablist[1] = t;
+ }
+ // ti2 - complex char fields
+ if (usetable(par, 2)) {
+ Tab* t = new Tab("ti2", 5, 7, 2);
+ // name - pk - type - length - nullable - cs
+ t->coladd(0, new Col(*t, 0, "a", 1, Col::Char, 31, 0, getcs(par)));
+ t->coladd(1, new Col(*t, 1, "b", 0, Col::Char, 4, 1, getcs(par)));
+ t->coladd(2, new Col(*t, 2, "c", 1, Col::Unsigned, 1, 0, 0));
+ t->coladd(3, new Col(*t, 3, "d", 1, Col::Varchar, 128, 0, getcs(par)));
+ t->coladd(4, new Col(*t, 4, "e", 0, Col::Varchar, 7, 0, getcs(par)));
+ if (useindex(par, 0)) {
+ // a, c, d
+ ITab* x = new ITab(*t, "ti2x0", ITab::OrderedIndex, 3);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[0]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[2]));
+ x->icoladd(2, new ICol(*x, 2, *t->m_col[3]));
+ t->itabadd(0, x);
+ }
+ if (useindex(par, 1)) {
+ // e, d, c, b, a
+ ITab* x = new ITab(*t, "ti2x1", ITab::OrderedIndex, 5);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[4]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[3]));
+ x->icoladd(2, new ICol(*x, 2, *t->m_col[2]));
+ x->icoladd(3, new ICol(*x, 3, *t->m_col[1]));
+ x->icoladd(4, new ICol(*x, 4, *t->m_col[0]));
+ t->itabadd(1, x);
+ }
+ if (useindex(par, 2)) {
+ // d
+ ITab* x = new ITab(*t, "ti2x2", ITab::OrderedIndex, 1);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[3]));
+ t->itabadd(2, x);
+ }
+ if (useindex(par, 3)) {
+ // b
+ ITab* x = new ITab(*t, "ti2x3", ITab::OrderedIndex, 1);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[1]));
+ t->itabadd(3, x);
+ }
+ if (useindex(par, 4)) {
+ // a, c
+ ITab* x = new ITab(*t, "ti2z4", ITab::UniqueHashIndex, 2);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[0]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[2]));
+ t->itabadd(4, x);
+ }
+ if (useindex(par, 5)) {
+ // a, c, d, e
+ ITab* x = new ITab(*t, "ti2z5", ITab::UniqueHashIndex, 4);
+ x->icoladd(0, new ICol(*x, 0, *t->m_col[0]));
+ x->icoladd(1, new ICol(*x, 1, *t->m_col[2]));
+ x->icoladd(2, new ICol(*x, 2, *t->m_col[3]));
+ x->icoladd(3, new ICol(*x, 3, *t->m_col[4]));
+ t->itabadd(5, x);
+ }
+ tablist[2] = t;
+ }
+ verifytables();
+}
// connections
@@ -682,16 +1191,18 @@ struct Con {
NdbDictionary::Dictionary* m_dic;
NdbConnection* m_tx;
NdbOperation* m_op;
+ NdbIndexOperation* m_indexop;
NdbScanOperation* m_scanop;
NdbIndexScanOperation* m_indexscanop;
- NdbResultSet* m_resultset;
+ NdbScanFilter* m_scanfilter;
enum ScanMode { ScanNo = 0, Committed, Latest, Exclusive };
ScanMode m_scanmode;
enum ErrType { ErrNone = 0, ErrDeadlock, ErrNospace, ErrOther };
ErrType m_errtype;
Con() :
- m_ndb(0), m_dic(0), m_tx(0), m_op(0),
- m_scanop(0), m_indexscanop(0), m_resultset(0), m_scanmode(ScanNo), m_errtype(ErrNone) {}
+ m_ndb(0), m_dic(0), m_tx(0), m_op(0), m_indexop(0),
+ m_scanop(0), m_indexscanop(0), m_scanfilter(0),
+ m_scanmode(ScanNo), m_errtype(ErrNone) {}
~Con() {
if (m_tx != 0)
closeTransaction();
@@ -701,16 +1212,23 @@ struct Con {
void disconnect();
int startTransaction();
int getNdbOperation(const Tab& tab);
+ int getNdbIndexOperation1(const ITab& itab, const Tab& tab);
+ int getNdbIndexOperation(const ITab& itab, const Tab& tab);
int getNdbScanOperation(const Tab& tab);
- int getNdbScanOperation(const ITab& itab, const Tab& tab);
+ int getNdbIndexScanOperation1(const ITab& itab, const Tab& tab);
+ int getNdbIndexScanOperation(const ITab& itab, const Tab& tab);
+ int getNdbScanFilter();
int equal(int num, const char* addr);
int getValue(int num, NdbRecAttr*& rec);
int setValue(int num, const char* addr);
int setBound(int num, int type, const void* value);
+ int beginFilter(int group);
+ int endFilter();
+ int setFilter(int num, int cond, const void* value, unsigned len);
int execute(ExecType t);
int execute(ExecType t, bool& deadlock, bool& nospace);
- int openScanRead(unsigned scanbat, unsigned scanpar);
- int openScanExclusive(unsigned scanbat, unsigned scanpar);
+ int readTuples(Par par);
+ int readIndexTuples(Par par);
int executeScan();
int nextScanResult(bool fetchAllowed);
int nextScanResult(bool fetchAllowed, bool& deadlock);
@@ -765,6 +1283,28 @@ Con::getNdbOperation(const Tab& tab)
}
int
+Con::getNdbIndexOperation1(const ITab& itab, const Tab& tab)
+{
+ assert(m_tx != 0);
+ CHKCON((m_op = m_indexop = m_tx->getNdbIndexOperation(itab.m_name, tab.m_name)) != 0, *this);
+ return 0;
+}
+
+int
+Con::getNdbIndexOperation(const ITab& itab, const Tab& tab)
+{
+ assert(m_tx != 0);
+ unsigned tries = 0;
+ while (1) {
+ if (getNdbIndexOperation1(itab, tab) == 0)
+ break;
+ CHK(++tries < 10);
+ NdbSleep_MilliSleep(100);
+ }
+ return 0;
+}
+
+int
Con::getNdbScanOperation(const Tab& tab)
{
assert(m_tx != 0);
@@ -773,7 +1313,7 @@ Con::getNdbScanOperation(const Tab& tab)
}
int
-Con::getNdbScanOperation(const ITab& itab, const Tab& tab)
+Con::getNdbIndexScanOperation1(const ITab& itab, const Tab& tab)
{
assert(m_tx != 0);
CHKCON((m_op = m_scanop = m_indexscanop = m_tx->getNdbIndexScanOperation(itab.m_name, tab.m_name)) != 0, *this);
@@ -781,6 +1321,29 @@ Con::getNdbScanOperation(const ITab& itab, const Tab& tab)
}
int
+Con::getNdbIndexScanOperation(const ITab& itab, const Tab& tab)
+{
+ assert(m_tx != 0);
+ unsigned tries = 0;
+ while (1) {
+ if (getNdbIndexScanOperation1(itab, tab) == 0)
+ break;
+ CHK(++tries < 10);
+ NdbSleep_MilliSleep(100);
+ }
+ return 0;
+}
+
+int
+Con::getNdbScanFilter()
+{
+ assert(m_tx != 0 && m_scanop != 0);
+ delete m_scanfilter;
+ m_scanfilter = new NdbScanFilter(m_scanop);
+ return 0;
+}
+
+int
Con::equal(int num, const char* addr)
{
assert(m_tx != 0 && m_op != 0);
@@ -807,12 +1370,36 @@ Con::setValue(int num, const char* addr)
int
Con::setBound(int num, int type, const void* value)
{
- assert(m_tx != 0 && m_op != 0);
+ assert(m_tx != 0 && m_indexscanop != 0);
CHKCON(m_indexscanop->setBound(num, type, value) == 0, *this);
return 0;
}
int
+Con::beginFilter(int group)
+{
+ assert(m_tx != 0 && m_scanfilter != 0);
+ CHKCON(m_scanfilter->begin((NdbScanFilter::Group)group) == 0, *this);
+ return 0;
+}
+
+int
+Con::endFilter()
+{
+ assert(m_tx != 0 && m_scanfilter != 0);
+ CHKCON(m_scanfilter->end() == 0, *this);
+ return 0;
+}
+
+int
+Con::setFilter(int num, int cond, const void* value, unsigned len)
+{
+ assert(m_tx != 0 && m_scanfilter != 0);
+ CHKCON(m_scanfilter->cmp((NdbScanFilter::BinaryCondition)cond, num, value, len) == 0, *this);
+ return 0;
+}
+
+int
Con::execute(ExecType t)
{
assert(m_tx != 0);
@@ -841,20 +1428,21 @@ Con::execute(ExecType t, bool& deadlock, bool& nospace)
}
int
-Con::openScanRead(unsigned scanbat, unsigned scanpar)
+Con::readTuples(Par par)
{
- assert(m_tx != 0 && m_op != 0);
- NdbOperation::LockMode lm = NdbOperation::LM_Read;
- CHKCON((m_resultset = m_scanop->readTuples(lm, scanbat, scanpar)) != 0, *this);
+ assert(m_tx != 0 && m_scanop != 0);
+ int scan_flags = 0;
+ if (par.m_tupscan)
+ scan_flags |= NdbScanOperation::SF_TupScan;
+ CHKCON(m_scanop->readTuples(par.m_lockmode, scan_flags, par.m_scanpar) == 0, *this);
return 0;
}
int
-Con::openScanExclusive(unsigned scanbat, unsigned scanpar)
+Con::readIndexTuples(Par par)
{
- assert(m_tx != 0 && m_op != 0);
- NdbOperation::LockMode lm = NdbOperation::LM_Exclusive;
- CHKCON((m_resultset = m_scanop->readTuples(lm, scanbat, scanpar)) != 0, *this);
+ assert(m_tx != 0 && m_indexscanop != 0);
+ CHKCON(m_indexscanop->readTuples(par.m_lockmode, 0, par.m_scanpar, par.m_ordered, par.m_descending) == 0, *this);
return 0;
}
@@ -869,8 +1457,8 @@ int
Con::nextScanResult(bool fetchAllowed)
{
int ret;
- assert(m_resultset != 0);
- CHKCON((ret = m_resultset->nextResult(fetchAllowed)) != -1, *this);
+ assert(m_scanop != 0);
+ CHKCON((ret = m_scanop->nextResult(fetchAllowed)) != -1, *this);
assert(ret == 0 || ret == 1 || (! fetchAllowed && ret == 2));
return ret;
}
@@ -895,7 +1483,7 @@ int
Con::updateScanTuple(Con& con2)
{
assert(con2.m_tx != 0);
- CHKCON((con2.m_op = m_resultset->updateTuple(con2.m_tx)) != 0, *this);
+ CHKCON((con2.m_op = m_scanop->updateCurrentTuple(con2.m_tx)) != 0, *this);
return 0;
}
@@ -903,16 +1491,16 @@ int
Con::deleteScanTuple(Con& con2)
{
assert(con2.m_tx != 0);
- CHKCON(m_resultset->deleteTuple(con2.m_tx) == 0, *this);
+ CHKCON(m_scanop->deleteCurrentTuple(con2.m_tx) == 0, *this);
return 0;
}
void
Con::closeScan()
{
- assert(m_resultset != 0);
- m_resultset->close();
- m_scanop = 0, m_indexscanop = 0, m_resultset = 0;
+ assert(m_scanop != 0);
+ m_scanop->close();
+ m_scanop = 0, m_indexscanop = 0;
}
@@ -922,7 +1510,7 @@ Con::closeTransaction()
assert(m_ndb != 0 && m_tx != 0);
m_ndb->closeTransaction(m_tx);
m_tx = 0, m_op = 0;
- m_scanop = 0, m_indexscanop = 0, m_resultset = 0;
+ m_scanop = 0, m_indexscanop = 0;
}
void
@@ -945,7 +1533,8 @@ Con::printerror(NdbOut& out)
if ((code = m_tx->getNdbError().code) != 0) {
LL0(++any << " con: error " << m_tx->getNdbError());
die += (code == g_opt.m_die);
- if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499)
+ // 631 is new, occurs only on 4 db nodes, needs to be checked out
+ if (code == 266 || code == 274 || code == 296 || code == 297 || code == 499 || code == 631)
m_errtype = ErrDeadlock;
if (code == 826 || code == 827 || code == 902)
m_errtype = ErrNospace;
@@ -983,9 +1572,9 @@ invalidateindex(Par par)
Con& con = par.con();
const Tab& tab = par.tab();
for (unsigned i = 0; i < tab.m_itabs; i++) {
- if (! useindex(i))
+ if (tab.m_itab[i] == 0)
continue;
- const ITab& itab = tab.m_itab[i];
+ const ITab& itab = *tab.m_itab[i];
invalidateindex(par, itab);
}
return 0;
@@ -1033,16 +1622,14 @@ createtable(Par par)
t.setLogging(false);
}
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Col& col = tab.m_col[k];
+ const Col& col = *tab.m_col[k];
NdbDictionary::Column c(col.m_name);
- c.setType(col.m_type);
- c.setLength(col.m_length);
+ c.setType((NdbDictionary::Column::Type)col.m_type);
+ c.setLength(col.m_bytelength); // for char NDB API uses length in bytes
c.setPrimaryKey(col.m_pk);
c.setNullable(col.m_nullable);
- if (c.getCharset()) { // test if char type
- if (! col.m_pk)
- c.setCharset(par.m_cs);
- }
+ if (col.m_chs != 0)
+ c.setCharset(col.m_chs->m_cs);
t.addColumn(c);
}
con.m_dic = con.m_ndb->getDictionary();
@@ -1073,9 +1660,9 @@ dropindex(Par par)
{
const Tab& tab = par.tab();
for (unsigned i = 0; i < tab.m_itabs; i++) {
- if (! useindex(i))
+ if (tab.m_itab[i] == 0)
continue;
- const ITab& itab = tab.m_itab[i];
+ const ITab& itab = *tab.m_itab[i];
CHK(dropindex(par, itab) == 0);
}
return 0;
@@ -1090,10 +1677,13 @@ createindex(Par par, const ITab& itab)
LL4(itab);
NdbDictionary::Index x(itab.m_name);
x.setTable(tab.m_name);
- x.setType(NdbDictionary::Index::OrderedIndex);
- x.setLogging(false);
+ x.setType((NdbDictionary::Index::Type)itab.m_type);
+ if (par.m_nologging || itab.m_type == ITab::OrderedIndex) {
+ x.setLogging(false);
+ }
for (unsigned k = 0; k < itab.m_icols; k++) {
- const Col& col = itab.m_icol[k].m_col;
+ const ICol& icol = *itab.m_icol[k];
+ const Col& col = icol.m_col;
x.addColumnName(col.m_name);
}
con.m_dic = con.m_ndb->getDictionary();
@@ -1107,9 +1697,9 @@ createindex(Par par)
{
const Tab& tab = par.tab();
for (unsigned i = 0; i < tab.m_itabs; i++) {
- if (! useindex(i))
+ if (tab.m_itab[i] == 0)
continue;
- const ITab& itab = tab.m_itab[i];
+ const ITab& itab = *tab.m_itab[i];
CHK(createindex(par, itab) == 0);
}
return 0;
@@ -1117,43 +1707,15 @@ createindex(Par par)
// data sets
-static unsigned
-urandom(unsigned n)
-{
- if (n == 0)
- return 0;
- unsigned i = random() % n;
- return i;
-}
-
-static int
-irandom(unsigned n)
-{
- if (n == 0)
- return 0;
- int i = random() % n;
- if (random() & 0x1)
- i = -i;
- return i;
-}
-
-static bool
-randompct(unsigned pct)
-{
- if (pct == 0)
- return false;
- if (pct >= 100)
- return true;
- return urandom(100) < pct;
-}
-
// Val - typed column value
struct Val {
const Col& m_col;
union {
Uint32 m_uint32;
- char* m_varchar;
+ unsigned char* m_char;
+ unsigned char* m_varchar;
+ unsigned char* m_longvarchar;
};
Val(const Col& col);
~Val();
@@ -1161,10 +1723,17 @@ struct Val {
void copy(const void* addr);
const void* dataaddr() const;
bool m_null;
+ int equal(Par par) const;
+ int equal(Par par, const ICol& icol) const;
int setval(Par par) const;
void calc(Par par, unsigned i);
- int verify(const Val& val2) const;
- int cmp(const Val& val2) const;
+ void calckey(Par par, unsigned i);
+ void calckeychars(Par par, unsigned i, unsigned& n, unsigned char* buf);
+ void calcnokey(Par par);
+ void calcnokeychars(Par par, unsigned& n, unsigned char* buf);
+ int verify(Par par, const Val& val2) const;
+ int cmp(Par par, const Val& val2) const;
+ int cmpchars(Par par, const unsigned char* buf1, unsigned len1, const unsigned char* buf2, unsigned len2) const;
private:
Val& operator=(const Val& val2);
};
@@ -1176,10 +1745,16 @@ Val::Val(const Col& col) :
m_col(col)
{
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
+ break;
+ case Col::Char:
+ m_char = new unsigned char [col.m_bytelength];
+ break;
+ case Col::Varchar:
+ m_varchar = new unsigned char [1 + col.m_bytelength];
break;
- case NdbDictionary::Column::Varchar:
- m_varchar = new char [2 + col.m_length];
+ case Col::Longvarchar:
+ m_longvarchar = new unsigned char [2 + col.m_bytelength];
break;
default:
assert(false);
@@ -1191,11 +1766,17 @@ Val::~Val()
{
const Col& col = m_col;
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
break;
- case NdbDictionary::Column::Varchar:
+ case Col::Char:
+ delete [] m_char;
+ break;
+ case Col::Varchar:
delete [] m_varchar;
break;
+ case Col::Longvarchar:
+ delete [] m_longvarchar;
+ break;
default:
assert(false);
break;
@@ -1220,11 +1801,17 @@ Val::copy(const void* addr)
{
const Col& col = m_col;
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
m_uint32 = *(const Uint32*)addr;
break;
- case NdbDictionary::Column::Varchar:
- memcpy(m_varchar, addr, 2 + col.m_length);
+ case Col::Char:
+ memcpy(m_char, addr, col.m_bytelength);
+ break;
+ case Col::Varchar:
+ memcpy(m_varchar, addr, 1 + col.m_bytelength);
+ break;
+ case Col::Longvarchar:
+ memcpy(m_longvarchar, addr, 2 + col.m_bytelength);
break;
default:
assert(false);
@@ -1238,10 +1825,14 @@ Val::dataaddr() const
{
const Col& col = m_col;
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
return &m_uint32;
- case NdbDictionary::Column::Varchar:
+ case Col::Char:
+ return m_char;
+ case Col::Varchar:
return m_varchar;
+ case Col::Longvarchar:
+ return m_longvarchar;
default:
break;
}
@@ -1250,18 +1841,37 @@ Val::dataaddr() const
}
int
-Val::setval(Par par) const
+Val::equal(Par par) const
{
Con& con = par.con();
const Col& col = m_col;
+ assert(col.m_pk && ! m_null);
const char* addr = (const char*)dataaddr();
- if (m_null)
- addr = 0;
- if (col.m_pk)
- CHK(con.equal(col.m_num, addr) == 0);
- else
- CHK(con.setValue(col.m_num, addr) == 0);
- LL5("setval [" << m_col << "] " << *this);
+ LL5("equal [" << col << "] " << *this);
+ CHK(con.equal(col.m_num, addr) == 0);
+ return 0;
+}
+
+int
+Val::equal(Par par, const ICol& icol) const
+{
+ Con& con = par.con();
+ assert(! m_null);
+ const char* addr = (const char*)dataaddr();
+ LL5("equal [" << icol << "] " << *this);
+ CHK(con.equal(icol.m_num, addr) == 0);
+ return 0;
+}
+
+int
+Val::setval(Par par) const
+{
+ Con& con = par.con();
+ const Col& col = m_col;
+ assert(! col.m_pk);
+ const char* addr = ! m_null ? (const char*)dataaddr() : 0;
+ LL5("setval [" << col << "] " << *this);
+ CHK(con.setValue(col.m_num, addr) == 0);
return 0;
}
@@ -1269,58 +1879,170 @@ void
Val::calc(Par par, unsigned i)
{
const Col& col = m_col;
+ col.m_pk ? calckey(par, i) : calcnokey(par);
+ if (! m_null)
+ col.wellformed(dataaddr());
+}
+
+void
+Val::calckey(Par par, unsigned i)
+{
+ const Col& col = m_col;
m_null = false;
- if (col.m_pk) {
+ switch (col.m_type) {
+ case Col::Unsigned:
m_uint32 = i;
- return;
+ break;
+ case Col::Char:
+ {
+ const Chs* chs = col.m_chs;
+ CHARSET_INFO* cs = chs->m_cs;
+ unsigned n = 0;
+ calckeychars(par, i, n, m_char);
+ // extend by appropriate space
+ (*cs->cset->fill)(cs, (char*)&m_char[n], col.m_bytelength - n, 0x20);
+ }
+ break;
+ case Col::Varchar:
+ {
+ unsigned n = 0;
+ calckeychars(par, i, n, m_varchar + 1);
+ // set length and pad with nulls
+ m_varchar[0] = n;
+ memset(&m_varchar[1 + n], 0, col.m_bytelength - n);
+ }
+ break;
+ case Col::Longvarchar:
+ {
+ unsigned n = 0;
+ calckeychars(par, i, n, m_longvarchar + 2);
+ // set length and pad with nulls
+ m_longvarchar[0] = (n & 0xff);
+ m_longvarchar[1] = (n >> 8);
+ memset(&m_longvarchar[2 + n], 0, col.m_bytelength - n);
+ }
+ break;
+ default:
+ assert(false);
+ break;
}
+}
+
+void
+Val::calckeychars(Par par, unsigned i, unsigned& n, unsigned char* buf)
+{
+ const Col& col = m_col;
+ const Chs* chs = col.m_chs;
+ CHARSET_INFO* cs = chs->m_cs;
+ n = 0;
+ unsigned len = 0;
+ while (len < col.m_length) {
+ if (i % (1 + n) == 0) {
+ break;
+ }
+ const Chr& chr = chs->m_chr[i % maxcharcount];
+ assert(n + chr.m_size <= col.m_bytelength);
+ memcpy(buf + n, chr.m_bytes, chr.m_size);
+ n += chr.m_size;
+ len++;
+ }
+}
+
+void
+Val::calcnokey(Par par)
+{
+ const Col& col = m_col;
+ m_null = false;
if (col.m_nullable && urandom(100) < par.m_pctnull) {
m_null = true;
return;
}
- unsigned v = par.m_range + irandom((par.m_pctrange * par.m_range) / 100);
+ int r = irandom((par.m_pctrange * par.m_range) / 100);
+ if (par.m_bdir != 0 && urandom(10) != 0) {
+ if (r < 0 && par.m_bdir > 0 || r > 0 && par.m_bdir < 0)
+ r = -r;
+ }
+ unsigned v = par.m_range + r;
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
m_uint32 = v;
break;
- case NdbDictionary::Column::Varchar:
+ case Col::Char:
{
+ const Chs* chs = col.m_chs;
+ CHARSET_INFO* cs = chs->m_cs;
unsigned n = 0;
- while (n < col.m_length) {
- if (urandom(1 + col.m_length) == 0) {
- // nice distribution on lengths
- break;
- }
- m_varchar[2 + n++] = 'a' + urandom((par.m_pctrange * 10) / 100);
- }
- m_varchar[0] = (n >> 8);
- m_varchar[1] = (n & 0xff);
- while (n < col.m_length) {
- m_varchar[2 + n++] = 0;
- }
+ calcnokeychars(par, n, m_char);
+ // extend by appropriate space
+ (*cs->cset->fill)(cs, (char*)&m_char[n], col.m_bytelength - n, 0x20);
+ }
+ break;
+ case Col::Varchar:
+ {
+ unsigned n = 0;
+ calcnokeychars(par, n, m_varchar + 1);
+ // set length and pad with nulls
+ m_varchar[0] = n;
+ memset(&m_varchar[1 + n], 0, col.m_bytelength - n);
+ }
+ break;
+ case Col::Longvarchar:
+ {
+ unsigned n = 0;
+ calcnokeychars(par, n, m_longvarchar + 2);
+ // set length and pad with nulls
+ m_longvarchar[0] = (n & 0xff);
+ m_longvarchar[1] = (n >> 8);
+ memset(&m_longvarchar[2 + n], 0, col.m_bytelength - n);
}
break;
default:
assert(false);
break;
}
- // verify format
- col.verify(dataaddr());
+}
+
+void
+Val::calcnokeychars(Par par, unsigned& n, unsigned char* buf)
+{
+ const Col& col = m_col;
+ const Chs* chs = col.m_chs;
+ CHARSET_INFO* cs = chs->m_cs;
+ n = 0;
+ unsigned len = 0;
+ while (len < col.m_length) {
+ if (urandom(1 + col.m_bytelength) == 0) {
+ break;
+ }
+ unsigned half = maxcharcount / 2;
+ int r = irandom((par.m_pctrange * half) / 100);
+ if (par.m_bdir != 0 && urandom(10) != 0) {
+ if (r < 0 && par.m_bdir > 0 || r > 0 && par.m_bdir < 0)
+ r = -r;
+ }
+ unsigned i = half + r;
+ assert(i < maxcharcount);
+ const Chr& chr = chs->m_chr[i];
+ assert(n + chr.m_size <= col.m_bytelength);
+ memcpy(buf + n, chr.m_bytes, chr.m_size);
+ n += chr.m_size;
+ len++;
+ }
}
int
-Val::verify(const Val& val2) const
+Val::verify(Par par, const Val& val2) const
{
- CHK(cmp(val2) == 0);
+ CHK(cmp(par, val2) == 0);
return 0;
}
int
-Val::cmp(const Val& val2) const
+Val::cmp(Par par, const Val& val2) const
{
const Col& col = m_col;
const Col& col2 = val2.m_col;
- assert(col.m_type == col2.m_type && col.m_length == col2.m_length);
+ assert(col.equal(col2));
if (m_null || val2.m_null) {
if (! m_null)
return +1;
@@ -1329,18 +2051,39 @@ Val::cmp(const Val& val2) const
return 0;
}
// verify data formats
- col.verify(dataaddr());
- col.verify(val2.dataaddr());
+ col.wellformed(dataaddr());
+ col.wellformed(val2.dataaddr());
// compare
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
- if (m_uint32 < val2.m_uint32)
- return -1;
- if (m_uint32 > val2.m_uint32)
- return +1;
- return 0;
- case NdbDictionary::Column::Varchar:
- return memcmp(&m_varchar[2], &val2.m_varchar[2], col.m_length);
+ case Col::Unsigned:
+ {
+ if (m_uint32 < val2.m_uint32)
+ return -1;
+ if (m_uint32 > val2.m_uint32)
+ return +1;
+ return 0;
+ }
+ break;
+ case Col::Char:
+ {
+ unsigned len = col.m_bytelength;
+ return cmpchars(par, m_char, len, val2.m_char, len);
+ }
+ break;
+ case Col::Varchar:
+ {
+ unsigned len1 = m_varchar[0];
+ unsigned len2 = val2.m_varchar[0];
+ return cmpchars(par, m_varchar + 1, len1, val2.m_varchar + 1, len2);
+ }
+ break;
+ case Col::Longvarchar:
+ {
+ unsigned len1 = m_longvarchar[0] + (m_longvarchar[1] << 8);
+ unsigned len2 = val2.m_longvarchar[0] + (val2.m_longvarchar[1] << 8);
+ return cmpchars(par, m_longvarchar + 2, len1, val2.m_longvarchar + 2, len2);
+ }
+ break;
default:
break;
}
@@ -1348,6 +2091,56 @@ Val::cmp(const Val& val2) const
return 0;
}
+int
+Val::cmpchars(Par par, const unsigned char* buf1, unsigned len1, const unsigned char* buf2, unsigned len2) const
+{
+ const Col& col = m_col;
+ const Chs* chs = col.m_chs;
+ CHARSET_INFO* cs = chs->m_cs;
+ int k;
+ if (! par.m_collsp) {
+ unsigned char x1[maxxmulsize * 8000];
+ unsigned char x2[maxxmulsize * 8000];
+ // make strxfrm pad both to same length
+ unsigned len = maxxmulsize * col.m_bytelength;
+ int n1 = NdbSqlUtil::strnxfrm_bug7284(cs, x1, chs->m_xmul * len, buf1, len1);
+ int n2 = NdbSqlUtil::strnxfrm_bug7284(cs, x2, chs->m_xmul * len, buf2, len2);
+ assert(n1 != -1 && n1 == n2);
+ k = memcmp(x1, x2, n1);
+ } else {
+ k = (*cs->coll->strnncollsp)(cs, buf1, len1, buf2, len2, false);
+ }
+ return k < 0 ? -1 : k > 0 ? +1 : 0;
+}
+
+static void
+printstring(NdbOut& out, const unsigned char* str, unsigned len, bool showlen)
+{
+ char buf[4 * 8000];
+ char *p = buf;
+ *p++ = '[';
+ if (showlen) {
+ sprintf(p, "%u:", len);
+ p += strlen(p);
+ }
+ for (unsigned i = 0; i < len; i++) {
+ unsigned char c = str[i];
+ if (c == '\\') {
+ *p++ = '\\';
+ *p++ = c;
+ } else if (0x20 <= c && c < 0x7e) {
+ *p++ = c;
+ } else {
+ *p++ = '\\';
+ *p++ = hexstr[c >> 4];
+ *p++ = hexstr[c & 15];
+ }
+ }
+ *p++ = ']';
+ *p = 0;
+ out << buf;
+}
+
static NdbOut&
operator<<(NdbOut& out, const Val& val)
{
@@ -1357,16 +2150,25 @@ operator<<(NdbOut& out, const Val& val)
return out;
}
switch (col.m_type) {
- case NdbDictionary::Column::Unsigned:
+ case Col::Unsigned:
out << val.m_uint32;
break;
- case NdbDictionary::Column::Varchar:
+ case Col::Char:
{
- char buf[8000];
- unsigned n = (val.m_varchar[0] << 8) | val.m_varchar[1];
- assert(n <= col.m_length);
- sprintf(buf, "'%.*s'[%d]", n, &val.m_varchar[2], n);
- out << buf;
+ unsigned len = col.m_bytelength;
+ printstring(out, val.m_char, len, false);
+ }
+ break;
+ case Col::Varchar:
+ {
+ unsigned len = val.m_varchar[0];
+ printstring(out, val.m_varchar + 1, len, true);
+ }
+ break;
+ case Col::Longvarchar:
+ {
+ unsigned len = val.m_longvarchar[0] + (val.m_longvarchar[1] << 8);
+ printstring(out, val.m_longvarchar + 2, len, true);
}
break;
default:
@@ -1383,19 +2185,25 @@ struct Row {
const Tab& m_tab;
Val** m_val;
bool m_exist;
- enum Op { NoOp = 0, ReadOp, InsOp, UpdOp, DelOp };
+ enum Op { NoOp = 0, ReadOp = 1, InsOp = 2, UpdOp = 4, DelOp = 8, AnyOp = 15 };
Op m_pending;
+ Row* m_dbrow; // copy of db row before update
Row(const Tab& tab);
~Row();
void copy(const Row& row2);
- void calc(Par par, unsigned i);
- int verify(const Row& row2) const;
+ void calc(Par par, unsigned i, unsigned mask = 0);
+ const Row& dbrow() const;
+ int verify(Par par, const Row& row2) const;
int insrow(Par par);
int updrow(Par par);
+ int updrow(Par par, const ITab& itab);
int delrow(Par par);
+ int delrow(Par par, const ITab& itab);
int selrow(Par par);
+ int selrow(Par par, const ITab& itab);
int setrow(Par par);
- int cmp(const Row& row2) const;
+ int cmp(Par par, const Row& row2) const;
+ int cmp(Par par, const Row& row2, const ITab& itab) const;
private:
Row& operator=(const Row& row2);
};
@@ -1405,11 +2213,12 @@ Row::Row(const Tab& tab) :
{
m_val = new Val* [tab.m_cols];
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Col& col = tab.m_col[k];
+ const Col& col = *tab.m_col[k];
m_val[k] = new Val(col);
}
m_exist = false;
m_pending = NoOp;
+ m_dbrow = 0;
}
Row::~Row()
@@ -1419,6 +2228,7 @@ Row::~Row()
delete m_val[k];
}
delete [] m_val;
+ delete m_dbrow;
}
void
@@ -1431,27 +2241,49 @@ Row::copy(const Row& row2)
const Val& val2 = *row2.m_val[k];
val.copy(val2);
}
+ m_exist = row2.m_exist;
+ m_pending = row2.m_pending;
+ if (row2.m_dbrow == 0) {
+ m_dbrow = 0;
+ } else {
+ assert(row2.m_dbrow->m_dbrow == 0);
+ if (m_dbrow == 0)
+ m_dbrow = new Row(tab);
+ m_dbrow->copy(*row2.m_dbrow);
+ }
}
void
-Row::calc(Par par, unsigned i)
+Row::calc(Par par, unsigned i, unsigned mask)
{
const Tab& tab = m_tab;
for (unsigned k = 0; k < tab.m_cols; k++) {
- Val& val = *m_val[k];
- val.calc(par, i);
+ if (! (mask & (1 << k))) {
+ Val& val = *m_val[k];
+ val.calc(par, i);
+ }
}
}
+const Row&
+Row::dbrow() const
+{
+ if (m_dbrow == 0)
+ return *this;
+ assert(m_pending == Row::UpdOp || m_pending == Row::DelOp);
+ return *m_dbrow;
+}
+
int
-Row::verify(const Row& row2) const
+Row::verify(Par par, const Row& row2) const
{
const Tab& tab = m_tab;
- assert(&tab == &row2.m_tab && m_exist && row2.m_exist);
+ const Row& row1 = *this;
+ assert(&row1.m_tab == &row2.m_tab && row1.m_exist && row2.m_exist);
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Val& val = *m_val[k];
+ const Val& val1 = *row1.m_val[k];
const Val& val2 = *row2.m_val[k];
- CHK(val.verify(val2) == 0);
+ CHK(val1.verify(par, val2) == 0);
}
return 0;
}
@@ -1464,9 +2296,21 @@ Row::insrow(Par par)
assert(! m_exist);
CHK(con.getNdbOperation(tab) == 0);
CHKCON(con.m_op->insertTuple() == 0, con);
+ Rsq rsq1(tab.m_cols);
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Val& val = *m_val[k];
- CHK(val.setval(par) == 0);
+ unsigned k2 = rsq1.next();
+ const Val& val = *m_val[k2];
+ const Col& col = val.m_col;
+ if (col.m_pk)
+ CHK(val.equal(par) == 0);
+ }
+ Rsq rsq2(tab.m_cols);
+ for (unsigned k = 0; k < tab.m_cols; k++) {
+ unsigned k2 = rsq2.next();
+ const Val& val = *m_val[k2];
+ const Col& col = val.m_col;
+ if (! col.m_pk)
+ CHK(val.setval(par) == 0);
}
m_pending = InsOp;
return 0;
@@ -1480,9 +2324,51 @@ Row::updrow(Par par)
assert(m_exist);
CHK(con.getNdbOperation(tab) == 0);
CHKCON(con.m_op->updateTuple() == 0, con);
+ Rsq rsq1(tab.m_cols);
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Val& val = *m_val[k];
- CHK(val.setval(par) == 0);
+ unsigned k2 = rsq1.next();
+ const Val& val = *m_val[k2];
+ const Col& col = val.m_col;
+ if (col.m_pk)
+ CHK(val.equal(par) == 0);
+ }
+ Rsq rsq2(tab.m_cols);
+ for (unsigned k = 0; k < tab.m_cols; k++) {
+ unsigned k2 = rsq2.next();
+ const Val& val = *m_val[k2];
+ const Col& col = val.m_col;
+ if (! col.m_pk)
+ CHK(val.setval(par) == 0);
+ }
+ m_pending = UpdOp;
+ return 0;
+}
+
+int
+Row::updrow(Par par, const ITab& itab)
+{
+ Con& con = par.con();
+ const Tab& tab = m_tab;
+ assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab);
+ assert(m_exist);
+ CHK(con.getNdbIndexOperation(itab, tab) == 0);
+ CHKCON(con.m_op->updateTuple() == 0, con);
+ Rsq rsq1(itab.m_icols);
+ for (unsigned k = 0; k < itab.m_icols; k++) {
+ unsigned k2 = rsq1.next();
+ const ICol& icol = *itab.m_icol[k2];
+ const Col& col = icol.m_col;
+ unsigned m = col.m_num;
+ const Val& val = *m_val[m];
+ CHK(val.equal(par, icol) == 0);
+ }
+ Rsq rsq2(tab.m_cols);
+ for (unsigned k = 0; k < tab.m_cols; k++) {
+ unsigned k2 = rsq2.next();
+ const Val& val = *m_val[k2];
+ const Col& col = val.m_col;
+ if (! col.m_pk)
+ CHK(val.setval(par) == 0);
}
m_pending = UpdOp;
return 0;
@@ -1496,11 +2382,35 @@ Row::delrow(Par par)
assert(m_exist);
CHK(con.getNdbOperation(m_tab) == 0);
CHKCON(con.m_op->deleteTuple() == 0, con);
+ Rsq rsq1(tab.m_cols);
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Val& val = *m_val[k];
+ unsigned k2 = rsq1.next();
+ const Val& val = *m_val[k2];
const Col& col = val.m_col;
if (col.m_pk)
- CHK(val.setval(par) == 0);
+ CHK(val.equal(par) == 0);
+ }
+ m_pending = DelOp;
+ return 0;
+}
+
+int
+Row::delrow(Par par, const ITab& itab)
+{
+ Con& con = par.con();
+ const Tab& tab = m_tab;
+ assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab);
+ assert(m_exist);
+ CHK(con.getNdbIndexOperation(itab, tab) == 0);
+ CHKCON(con.m_op->deleteTuple() == 0, con);
+ Rsq rsq1(itab.m_icols);
+ for (unsigned k = 0; k < itab.m_icols; k++) {
+ unsigned k2 = rsq1.next();
+ const ICol& icol = *itab.m_icol[k2];
+ const Col& col = icol.m_col;
+ unsigned m = col.m_num;
+ const Val& val = *m_val[m];
+ CHK(val.equal(par, icol) == 0);
}
m_pending = DelOp;
return 0;
@@ -1513,11 +2423,33 @@ Row::selrow(Par par)
const Tab& tab = m_tab;
CHK(con.getNdbOperation(m_tab) == 0);
CHKCON(con.m_op->readTuple() == 0, con);
+ Rsq rsq1(tab.m_cols);
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Val& val = *m_val[k];
+ unsigned k2 = rsq1.next();
+ const Val& val = *m_val[k2];
const Col& col = val.m_col;
if (col.m_pk)
- CHK(val.setval(par) == 0);
+ CHK(val.equal(par) == 0);
+ }
+ return 0;
+}
+
+int
+Row::selrow(Par par, const ITab& itab)
+{
+ Con& con = par.con();
+ const Tab& tab = m_tab;
+ assert(itab.m_type == ITab::UniqueHashIndex && &itab.m_tab == &tab);
+ CHK(con.getNdbIndexOperation(itab, tab) == 0);
+ CHKCON(con.m_op->readTuple() == 0, con);
+ Rsq rsq1(itab.m_icols);
+ for (unsigned k = 0; k < itab.m_icols; k++) {
+ unsigned k2 = rsq1.next();
+ const ICol& icol = *itab.m_icol[k2];
+ const Col& col = icol.m_col;
+ unsigned m = col.m_num;
+ const Val& val = *m_val[m];
+ CHK(val.equal(par, icol) == 0);
}
return 0;
}
@@ -1527,8 +2459,10 @@ Row::setrow(Par par)
{
Con& con = par.con();
const Tab& tab = m_tab;
+ Rsq rsq1(tab.m_cols);
for (unsigned k = 0; k < tab.m_cols; k++) {
- const Val& val = *m_val[k];
+ unsigned k2 = rsq1.next();
+ const Val& val = *m_val[k2];
const Col& col = val.m_col;
if (! col.m_pk)
CHK(val.setval(par) == 0);
@@ -1538,7 +2472,7 @@ Row::setrow(Par par)
}
int
-Row::cmp(const Row& row2) const
+Row::cmp(Par par, const Row& row2) const
{
const Tab& tab = m_tab;
assert(&tab == &row2.m_tab);
@@ -1546,12 +2480,46 @@ Row::cmp(const Row& row2) const
for (unsigned k = 0; k < tab.m_cols; k++) {
const Val& val = *m_val[k];
const Val& val2 = *row2.m_val[k];
- if ((c = val.cmp(val2)) != 0)
+ if ((c = val.cmp(par, val2)) != 0)
break;
}
return c;
}
+int
+Row::cmp(Par par, const Row& row2, const ITab& itab) const
+{
+ const Tab& tab = m_tab;
+ int c = 0;
+ for (unsigned i = 0; i < itab.m_icols; i++) {
+ const ICol& icol = *itab.m_icol[i];
+ const Col& col = icol.m_col;
+ unsigned k = col.m_num;
+ assert(k < tab.m_cols);
+ const Val& val = *m_val[k];
+ const Val& val2 = *row2.m_val[k];
+ if ((c = val.cmp(par, val2)) != 0)
+ break;
+ }
+ return c;
+}
+
+static NdbOut&
+operator<<(NdbOut& out, const Row::Op op)
+{
+ if (op == Row::NoOp)
+ out << "NoOp";
+ else if (op == Row::InsOp)
+ out << "InsOp";
+ else if (op == Row::UpdOp)
+ out << "UpdOp";
+ else if (op == Row::DelOp)
+ out << "DelOp";
+ else
+ out << op;
+ return out;
+}
+
static NdbOut&
operator<<(NdbOut& out, const Row& row)
{
@@ -1561,10 +2529,21 @@ operator<<(NdbOut& out, const Row& row)
out << " ";
out << *row.m_val[i];
}
- out << " [exist=" << row.m_exist;
+ out << " exist=" << row.m_exist;
if (row.m_pending)
out << " pending=" << row.m_pending;
- out << "]";
+ if (row.m_dbrow != 0)
+ out << " [dbrow=" << *row.m_dbrow << "]";
+ return out;
+}
+
+static NdbOut&
+operator<<(NdbOut& out, const Row* rowptr)
+{
+ if (rowptr == 0)
+ out << "null";
+ else
+ out << *rowptr;
return out;
}
@@ -1574,38 +2553,47 @@ struct Set {
const Tab& m_tab;
unsigned m_rows;
Row** m_row;
- Row** m_saverow;
+ unsigned* m_rowkey; // maps row number (from 0) in scan to tuple key
Row* m_keyrow;
NdbRecAttr** m_rec;
Set(const Tab& tab, unsigned rows);
~Set();
void reset();
unsigned count() const;
- // row methods
+ // old and new values
bool exist(unsigned i) const;
- Row::Op pending(unsigned i) const;
+ void dbsave(unsigned i);
+ void calc(Par par, unsigned i, unsigned mask = 0);
+ bool pending(unsigned i, unsigned mask) const;
void notpending(unsigned i, ExecType et = Commit);
void notpending(const Lst& lst, ExecType et = Commit);
- void calc(Par par, unsigned i);
+ void dbdiscard(unsigned i);
+ void dbdiscard(const Lst& lst);
+ const Row& dbrow(unsigned i) const;
+ // operations
int insrow(Par par, unsigned i);
int updrow(Par par, unsigned i);
+ int updrow(Par par, const ITab& itab, unsigned i);
int delrow(Par par, unsigned i);
- int selrow(Par par, unsigned i);
+ int delrow(Par par, const ITab& itab, unsigned i);
+ int selrow(Par par, const Row& keyrow);
+ int selrow(Par par, const ITab& itab, const Row& keyrow);
+ // set and get
+ void setkey(Par par, const Row& keyrow);
+ void setkey(Par par, const ITab& itab, const Row& keyrow);
int setrow(Par par, unsigned i);
int getval(Par par);
int getkey(Par par, unsigned* i);
- int putval(unsigned i, bool force);
- // set methods
- int verify(const Set& set2) const;
- void savepoint();
- void commit();
- void rollback();
+ int putval(unsigned i, bool force, unsigned n = ~0);
+ // verify
+ int verify(Par par, const Set& set2) const;
+ int verifyorder(Par par, const ITab& itab, bool descending) const;
// protect structure
NdbMutex* m_mutex;
- void lock() {
+ void lock() const {
NdbMutex_Lock(m_mutex);
}
- void unlock() {
+ void unlock() const {
NdbMutex_Unlock(m_mutex);
}
private:
@@ -1621,7 +2609,11 @@ Set::Set(const Tab& tab, unsigned rows) :
// allocate on need to save space
m_row[i] = 0;
}
- m_saverow = 0;
+ m_rowkey = new unsigned [m_rows];
+ for (unsigned n = 0; n < m_rows; n++) {
+ // initialize to null
+ m_rowkey[n] = ~0;
+ }
m_keyrow = new Row(tab);
m_rec = new NdbRecAttr* [tab.m_cols];
for (unsigned k = 0; k < tab.m_cols; k++) {
@@ -1635,11 +2627,9 @@ Set::~Set()
{
for (unsigned i = 0; i < m_rows; i++) {
delete m_row[i];
- if (m_saverow != 0)
- delete m_saverow[i];
}
delete [] m_row;
- delete [] m_saverow;
+ delete [] m_rowkey;
delete m_keyrow;
delete [] m_rec;
NdbMutex_Destroy(m_mutex);
@@ -1670,6 +2660,8 @@ Set::count() const
return count;
}
+// old and new values
+
bool
Set::exist(unsigned i) const
{
@@ -1679,27 +2671,97 @@ Set::exist(unsigned i) const
return m_row[i]->m_exist;
}
-Row::Op
-Set::pending(unsigned i) const
+void
+Set::dbsave(unsigned i)
{
- assert(i < m_rows);
- if (m_row[i] == 0) // not allocated => not pending
- return Row::NoOp;
- return m_row[i]->m_pending;
+ const Tab& tab = m_tab;
+ assert(i < m_rows && m_row[i] != 0);
+ Row& row = *m_row[i];
+ LL5("dbsave " << i << ": " << row);
+ assert(row.m_exist && ! row.m_pending && row.m_dbrow == 0);
+ // could swap pointers but making copy is safer
+ Row* rowptr = new Row(tab);
+ rowptr->copy(row);
+ row.m_dbrow = rowptr;
}
void
-Set::calc(Par par, unsigned i)
+Set::calc(Par par, unsigned i, unsigned mask)
{
const Tab& tab = m_tab;
if (m_row[i] == 0)
m_row[i] = new Row(tab);
Row& row = *m_row[i];
- // value generation parameters
- par.m_pctrange = 40;
- row.calc(par, i);
+ row.calc(par, i, mask);
+}
+
+bool
+Set::pending(unsigned i, unsigned mask) const
+{
+ assert(i < m_rows);
+ if (m_row[i] == 0) // not allocated => not pending
+ return Row::NoOp;
+ return m_row[i]->m_pending & mask;
+}
+
+void
+Set::notpending(unsigned i, ExecType et)
+{
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ if (et == Commit) {
+ if (row.m_pending == Row::InsOp)
+ row.m_exist = true;
+ if (row.m_pending == Row::DelOp)
+ row.m_exist = false;
+ } else {
+ if (row.m_pending == Row::InsOp)
+ row.m_exist = false;
+ if (row.m_pending == Row::DelOp)
+ row.m_exist = true;
+ }
+ row.m_pending = Row::NoOp;
+}
+
+void
+Set::notpending(const Lst& lst, ExecType et)
+{
+ for (unsigned j = 0; j < lst.m_cnt; j++) {
+ unsigned i = lst.m_arr[j];
+ notpending(i, et);
+ }
+}
+
+void
+Set::dbdiscard(unsigned i)
+{
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ LL5("dbdiscard " << i << ": " << row);
+ assert(row.m_dbrow != 0);
+ delete row.m_dbrow;
+ row.m_dbrow = 0;
+}
+
+const Row&
+Set::dbrow(unsigned i) const
+{
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ return row.dbrow();
+}
+
+void
+Set::dbdiscard(const Lst& lst)
+{
+ for (unsigned j = 0; j < lst.m_cnt; j++) {
+ unsigned i = lst.m_arr[j];
+ dbdiscard(i);
+ }
}
+// operations
+
int
Set::insrow(Par par, unsigned i)
{
@@ -1719,6 +2781,15 @@ Set::updrow(Par par, unsigned i)
}
int
+Set::updrow(Par par, const ITab& itab, unsigned i)
+{
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ CHK(row.updrow(par, itab) == 0);
+ return 0;
+}
+
+int
Set::delrow(Par par, unsigned i)
{
assert(m_row[i] != 0);
@@ -1728,16 +2799,68 @@ Set::delrow(Par par, unsigned i)
}
int
-Set::selrow(Par par, unsigned i)
+Set::delrow(Par par, const ITab& itab, unsigned i)
+{
+ assert(m_row[i] != 0);
+ Row& row = *m_row[i];
+ CHK(row.delrow(par, itab) == 0);
+ return 0;
+}
+
+int
+Set::selrow(Par par, const Row& keyrow)
{
Con& con = par.con();
- m_keyrow->calc(par, i);
+ const Tab& tab = par.tab();
+ setkey(par, keyrow);
+ LL5("selrow " << tab.m_name << ": keyrow: " << keyrow);
CHK(m_keyrow->selrow(par) == 0);
CHK(getval(par) == 0);
return 0;
}
int
+Set::selrow(Par par, const ITab& itab, const Row& keyrow)
+{
+ Con& con = par.con();
+ setkey(par, itab, keyrow);
+ LL5("selrow " << itab.m_name << ": keyrow: " << keyrow);
+ CHK(m_keyrow->selrow(par, itab) == 0);
+ CHK(getval(par) == 0);
+ return 0;
+}
+
+// set and get
+
+void
+Set::setkey(Par par, const Row& keyrow)
+{
+ const Tab& tab = m_tab;
+ for (unsigned k = 0; k < tab.m_cols; k++) {
+ const Col& col = *tab.m_col[k];
+ if (col.m_pk) {
+ Val& val1 = *m_keyrow->m_val[k];
+ const Val& val2 = *keyrow.dbrow().m_val[k];
+ val1.copy(val2);
+ }
+ }
+}
+
+void
+Set::setkey(Par par, const ITab& itab, const Row& keyrow)
+{
+ const Tab& tab = m_tab;
+ for (unsigned k = 0; k < itab.m_icols; k++) {
+ const ICol& icol = *itab.m_icol[k];
+ const Col& col = icol.m_col;
+ unsigned m = col.m_num;
+ Val& val1 = *m_keyrow->m_val[m];
+ const Val& val2 = *keyrow.dbrow().m_val[m];
+ val1.copy(val2);
+ }
+}
+
+int
Set::setrow(Par par, unsigned i)
{
Con& con = par.con();
@@ -1751,8 +2874,10 @@ Set::getval(Par par)
{
Con& con = par.con();
const Tab& tab = m_tab;
+ Rsq rsq1(tab.m_cols);
for (unsigned k = 0; k < tab.m_cols; k++) {
- CHK(con.getValue(k, m_rec[k]) == 0);
+ unsigned k2 = rsq1.next();
+ CHK(con.getValue(k2, m_rec[k2]) == 0);
}
return 0;
}
@@ -1760,16 +2885,18 @@ Set::getval(Par par)
int
Set::getkey(Par par, unsigned* i)
{
- assert(m_rec[0] != 0);
- const char* aRef0 = m_rec[0]->aRef();
- Uint32 key = *(const Uint32*)aRef0;
+ const Tab& tab = m_tab;
+ unsigned k = tab.m_keycol;
+ assert(m_rec[k] != 0);
+ const char* aRef = m_rec[k]->aRef();
+ Uint32 key = *(const Uint32*)aRef;
CHK(key < m_rows);
*i = key;
return 0;
}
int
-Set::putval(unsigned i, bool force)
+Set::putval(unsigned i, bool force, unsigned n)
{
const Tab& tab = m_tab;
if (m_row[i] == 0)
@@ -1790,82 +2917,53 @@ Set::putval(unsigned i, bool force)
}
if (! row.m_exist)
row.m_exist = true;
+ if (n != ~0)
+ m_rowkey[n] = i;
return 0;
}
-void
-Set::notpending(unsigned i, ExecType et)
-{
- assert(m_row[i] != 0);
- Row& row = *m_row[i];
- if (et == Commit) {
- if (row.m_pending == Row::InsOp)
- row.m_exist = true;
- if (row.m_pending == Row::DelOp)
- row.m_exist = false;
- } else {
- if (row.m_pending == Row::InsOp)
- row.m_exist = false;
- if (row.m_pending == Row::DelOp)
- row.m_exist = true;
- }
- row.m_pending = Row::NoOp;
-}
-
-void
-Set::notpending(const Lst& lst, ExecType et)
-{
- for (unsigned j = 0; j < lst.m_cnt; j++) {
- unsigned i = lst.m_arr[j];
- notpending(i, et);
- }
-}
-
int
-Set::verify(const Set& set2) const
+Set::verify(Par par, const Set& set2) const
{
- const Tab& tab = m_tab;
- assert(&tab == &set2.m_tab && m_rows == set2.m_rows);
+ assert(&m_tab == &set2.m_tab && m_rows == set2.m_rows);
+ LL4("verify set1 count=" << count() << " vs set2 count=" << set2.count());
for (unsigned i = 0; i < m_rows; i++) {
- CHK(exist(i) == set2.exist(i));
- if (! exist(i))
- continue;
- Row& row = *m_row[i];
- Row& row2 = *set2.m_row[i];
- CHK(row.verify(row2) == 0);
+ bool ok = true;
+ if (exist(i) != set2.exist(i)) {
+ ok = false;
+ } else if (exist(i)) {
+ if (dbrow(i).verify(par, set2.dbrow(i)) != 0)
+ ok = false;
+ }
+ if (! ok) {
+ LL1("verify failed: key=" << i << " row1=" << m_row[i] << " row2=" << set2.m_row[i]);
+ CHK(0 == 1);
+ }
}
return 0;
}
-void
-Set::savepoint()
+int
+Set::verifyorder(Par par, const ITab& itab, bool descending) const
{
const Tab& tab = m_tab;
- assert(m_saverow == 0);
- m_saverow = new Row* [m_rows];
- for (unsigned i = 0; i < m_rows; i++) {
- if (m_row[i] == 0)
- m_saverow[i] = 0;
- else {
- m_saverow[i] = new Row(tab);
- m_saverow[i]->copy(*m_row[i]);
- }
+ for (unsigned n = 0; n < m_rows; n++) {
+ unsigned i2 = m_rowkey[n];
+ if (i2 == ~0)
+ break;
+ if (n == 0)
+ continue;
+ unsigned i1 = m_rowkey[n - 1];
+ assert(i1 < m_rows && i2 < m_rows);
+ const Row& row1 = *m_row[i1];
+ const Row& row2 = *m_row[i2];
+ assert(row1.m_exist && row2.m_exist);
+ if (! descending)
+ CHK(row1.cmp(par, row2, itab) <= 0);
+ else
+ CHK(row1.cmp(par, row2, itab) >= 0);
}
-}
-
-void
-Set::commit()
-{
- delete [] m_saverow;
- m_saverow = 0;
-}
-
-void
-Set::rollback()
-{
- assert(m_saverow != 0);
- m_row = m_saverow;
- m_saverow = 0;
+ return 0;
}
static NdbOut&
@@ -1887,6 +2985,7 @@ struct BVal : public Val {
int m_type;
BVal(const ICol& icol);
int setbnd(Par par) const;
+ int setflt(Par par) const;
};
BVal::BVal(const ICol& icol) :
@@ -1906,16 +3005,37 @@ BVal::setbnd(Par par) const
return 0;
}
+int
+BVal::setflt(Par par) const
+{
+ static unsigned index_bound_to_filter_bound[5] = {
+ NdbScanFilter::COND_GE,
+ NdbScanFilter::COND_GT,
+ NdbScanFilter::COND_LE,
+ NdbScanFilter::COND_LT,
+ NdbScanFilter::COND_EQ
+ };
+ Con& con = par.con();
+ assert(g_compare_null || ! m_null);
+ const char* addr = ! m_null ? (const char*)dataaddr() : 0;
+ const ICol& icol = m_icol;
+ const Col& col = icol.m_col;
+ unsigned length = col.m_bytesize;
+ unsigned cond = index_bound_to_filter_bound[m_type];
+ CHK(con.setFilter(col.m_num, cond, addr, length) == 0);
+ return 0;
+}
+
static NdbOut&
operator<<(NdbOut& out, const BVal& bval)
{
const ICol& icol = bval.m_icol;
const Col& col = icol.m_col;
const Val& val = bval;
- out << "type " << bval.m_type;
- out << " icol " << icol.m_num;
- out << " col " << col.m_name << "(" << col.m_num << ")";
- out << " value " << val;
+ out << "type=" << bval.m_type;
+ out << " icol=" << icol.m_num;
+ out << " col=" << col.m_num << "," << col.m_name;
+ out << " value=" << val;
return out;
}
@@ -1933,7 +3053,8 @@ struct BSet {
void calc(Par par);
void calcpk(Par par, unsigned i);
int setbnd(Par par) const;
- void filter(const Set& set, Set& set2) const;
+ int setflt(Par par) const;
+ void filter(Par par, const Set& set, Set& set2) const;
};
BSet::BSet(const Tab& tab, const ITab& itab, unsigned rows) :
@@ -1967,12 +3088,15 @@ void
BSet::calc(Par par)
{
const ITab& itab = m_itab;
+ par.m_pctrange = par.m_pctbrange;
reset();
for (unsigned k = 0; k < itab.m_icols; k++) {
- const ICol& icol = itab.m_icol[k];
+ const ICol& icol = *itab.m_icol[k];
const Col& col = icol.m_col;
for (unsigned i = 0; i <= 1; i++) {
- if (urandom(10) == 0)
+ if (m_bvals == 0 && urandom(100) == 0)
+ return;
+ if (m_bvals != 0 && urandom(3) == 0)
return;
assert(m_bvals < m_alloc);
BVal& bval = *new BVal(icol);
@@ -1991,17 +3115,19 @@ BSet::calc(Par par)
bval.m_type = 4;
if (k + 1 < itab.m_icols)
bval.m_type = 4;
- // value generation parammeters
if (! g_compare_null)
par.m_pctnull = 0;
- par.m_pctrange = 50; // bit higher
+ if (bval.m_type == 0 || bval.m_type == 1)
+ par.m_bdir = -1;
+ if (bval.m_type == 2 || bval.m_type == 3)
+ par.m_bdir = +1;
do {
- bval.calc(par, 0);
+ bval.calcnokey(par);
if (i == 1) {
assert(m_bvals >= 2);
const BVal& bv1 = *m_bval[m_bvals - 2];
const BVal& bv2 = *m_bval[m_bvals - 1];
- if (bv1.cmp(bv2) > 0 && urandom(100) != 0)
+ if (bv1.cmp(par, bv2) > 0 && urandom(100) != 0)
continue;
}
} while (0);
@@ -2018,7 +3144,7 @@ BSet::calcpk(Par par, unsigned i)
const ITab& itab = m_itab;
reset();
for (unsigned k = 0; k < itab.m_icols; k++) {
- const ICol& icol = itab.m_icol[k];
+ const ICol& icol = *itab.m_icol[k];
const Col& col = icol.m_col;
assert(col.m_pk);
assert(m_bvals < m_alloc);
@@ -2033,26 +3159,48 @@ int
BSet::setbnd(Par par) const
{
if (m_bvals != 0) {
- unsigned p1 = urandom(m_bvals);
- unsigned p2 = 10009; // prime
- // random order
+ Rsq rsq1(m_bvals);
for (unsigned j = 0; j < m_bvals; j++) {
- unsigned k = p1 + p2 * j;
- const BVal& bval = *m_bval[k % m_bvals];
+ unsigned j2 = rsq1.next();
+ const BVal& bval = *m_bval[j2];
CHK(bval.setbnd(par) == 0);
}
// duplicate
if (urandom(5) == 0) {
- unsigned k = urandom(m_bvals);
- const BVal& bval = *m_bval[k];
+ unsigned j3 = urandom(m_bvals);
+ const BVal& bval = *m_bval[j3];
CHK(bval.setbnd(par) == 0);
}
}
return 0;
}
+int
+BSet::setflt(Par par) const
+{
+ Con& con = par.con();
+ CHK(con.getNdbScanFilter() == 0);
+ CHK(con.beginFilter(NdbScanFilter::AND) == 0);
+ if (m_bvals != 0) {
+ Rsq rsq1(m_bvals);
+ for (unsigned j = 0; j < m_bvals; j++) {
+ unsigned j2 = rsq1.next();
+ const BVal& bval = *m_bval[j2];
+ CHK(bval.setflt(par) == 0);
+ }
+ // duplicate
+ if (urandom(5) == 0) {
+ unsigned j3 = urandom(m_bvals);
+ const BVal& bval = *m_bval[j3];
+ CHK(bval.setflt(par) == 0);
+ }
+ }
+ CHK(con.endFilter() == 0);
+ return 0;
+}
+
void
-BSet::filter(const Set& set, Set& set2) const
+BSet::filter(Par par, const Set& set, Set& set2) const
{
const Tab& tab = m_tab;
const ITab& itab = m_itab;
@@ -2061,11 +3209,13 @@ BSet::filter(const Set& set, Set& set2) const
for (unsigned i = 0; i < set.m_rows; i++) {
if (! set.exist(i))
continue;
- const Row& row = *set.m_row[i];
+ set.lock();
+ const Row& row = set.dbrow(i);
+ set.unlock();
if (! g_store_null_key) {
bool ok1 = false;
for (unsigned k = 0; k < itab.m_icols; k++) {
- const ICol& icol = itab.m_icol[k];
+ const ICol& icol = *itab.m_icol[k];
const Col& col = icol.m_col;
const Val& val = *row.m_val[col.m_num];
if (! val.m_null) {
@@ -2082,7 +3232,8 @@ BSet::filter(const Set& set, Set& set2) const
const ICol& icol = bval.m_icol;
const Col& col = icol.m_col;
const Val& val = *row.m_val[col.m_num];
- int ret = bval.cmp(val);
+ int ret = bval.cmp(par, val);
+ LL5("cmp: ret=" << ret << " " << bval << " vs " << val);
if (bval.m_type == 0)
ok2 = (ret <= 0);
else if (bval.m_type == 1)
@@ -2106,7 +3257,6 @@ BSet::filter(const Set& set, Set& set2) const
Row& row2 = *set2.m_row[i];
assert(! row2.m_exist);
row2.copy(row);
- row2.m_exist = true;
}
}
@@ -2115,9 +3265,8 @@ operator<<(NdbOut& out, const BSet& bset)
{
out << "bounds=" << bset.m_bvals;
for (unsigned j = 0; j < bset.m_bvals; j++) {
- out << endl;
const BVal& bval = *bset.m_bval[j];
- out << "bound " << j << ": " << bval;
+ out << " [bound " << j << ": " << bval << "]";
}
return out;
}
@@ -2128,15 +3277,16 @@ static int
pkinsert(Par par)
{
Con& con = par.con();
+ const Tab& tab = par.tab();
Set& set = par.set();
- LL3("pkinsert");
+ LL3("pkinsert " << tab.m_name);
CHK(con.startTransaction() == 0);
Lst lst;
for (unsigned j = 0; j < par.m_rows; j++) {
unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
unsigned i = thrrow(par, j2);
set.lock();
- if (set.exist(i) || set.pending(i)) {
+ if (set.exist(i) || set.pending(i, Row::AnyOp)) {
set.unlock();
continue;
}
@@ -2152,7 +3302,7 @@ pkinsert(Par par)
CHK(con.execute(et, deadlock, nospace) == 0);
con.closeTransaction();
if (deadlock) {
- LL1("pkinsert: stop on deadlock");
+ LL1("pkinsert: stop on deadlock [at 1]");
return 0;
}
if (nospace) {
@@ -2173,7 +3323,7 @@ pkinsert(Par par)
CHK(con.execute(et, deadlock, nospace) == 0);
con.closeTransaction();
if (deadlock) {
- LL1("pkinsert: stop on deadlock");
+ LL1("pkinsert: stop on deadlock [at 2]");
return 0;
}
if (nospace) {
@@ -2193,8 +3343,9 @@ static int
pkupdate(Par par)
{
Con& con = par.con();
+ const Tab& tab = par.tab();
Set& set = par.set();
- LL3("pkupdate");
+ LL3("pkupdate " << tab.m_name);
CHK(con.startTransaction() == 0);
Lst lst;
bool deadlock = false;
@@ -2203,10 +3354,11 @@ pkupdate(Par par)
unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
unsigned i = thrrow(par, j2);
set.lock();
- if (! set.exist(i) || set.pending(i)) {
+ if (! set.exist(i) || set.pending(i, Row::AnyOp)) {
set.unlock();
continue;
}
+ set.dbsave(i);
set.calc(par, i);
CHK(set.updrow(par, i) == 0);
set.unlock();
@@ -2218,16 +3370,17 @@ pkupdate(Par par)
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) {
- LL1("pkupdate: stop on deadlock");
+ LL1("pkupdate: stop on deadlock [at 1]");
break;
}
if (nospace) {
- LL1("pkupdate: cnt=" << j << " stop on nospace");
+ LL1("pkupdate: cnt=" << j << " stop on nospace [at 1]");
break;
}
con.closeTransaction();
set.lock();
set.notpending(lst, et);
+ set.dbdiscard(lst);
set.unlock();
lst.reset();
CHK(con.startTransaction() == 0);
@@ -2239,12 +3392,13 @@ pkupdate(Par par)
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) {
- LL1("pkupdate: stop on deadlock");
+ LL1("pkupdate: stop on deadlock [at 2]");
} else if (nospace) {
- LL1("pkupdate: end: stop on nospace");
+ LL1("pkupdate: end: stop on nospace [at 2]");
} else {
set.lock();
set.notpending(lst, et);
+ set.dbdiscard(lst);
set.unlock();
}
}
@@ -2256,8 +3410,9 @@ static int
pkdelete(Par par)
{
Con& con = par.con();
+ const Tab& tab = par.tab();
Set& set = par.set();
- LL3("pkdelete");
+ LL3("pkdelete " << tab.m_name);
CHK(con.startTransaction() == 0);
Lst lst;
bool deadlock = false;
@@ -2266,7 +3421,7 @@ pkdelete(Par par)
unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
unsigned i = thrrow(par, j2);
set.lock();
- if (! set.exist(i) || set.pending(i)) {
+ if (! set.exist(i) || set.pending(i, Row::AnyOp)) {
set.unlock();
continue;
}
@@ -2280,7 +3435,7 @@ pkdelete(Par par)
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) {
- LL1("pkdelete: stop on deadlock");
+ LL1("pkdelete: stop on deadlock [at 1]");
break;
}
con.closeTransaction();
@@ -2297,7 +3452,7 @@ pkdelete(Par par)
ExecType et = randompct(par.m_abortpct) ? Rollback : Commit;
CHK(con.execute(et, deadlock, nospace) == 0);
if (deadlock) {
- LL1("pkdelete: stop on deadlock");
+ LL1("pkdelete: stop on deadlock [at 2]");
} else {
set.lock();
set.notpending(lst, et);
@@ -2314,19 +3469,19 @@ pkread(Par par)
Con& con = par.con();
const Tab& tab = par.tab();
Set& set = par.set();
- LL3((par.m_verify ? "pkverify " : "pkread ") << tab.m_name);
+ LL3("pkread " << tab.m_name << " verify=" << par.m_verify);
// expected
const Set& set1 = set;
Set set2(tab, set.m_rows);
for (unsigned i = 0; i < set.m_rows; i++) {
set.lock();
- if (! set.exist(i) || set.pending(i)) {
+ if (! set.exist(i)) {
set.unlock();
continue;
}
set.unlock();
CHK(con.startTransaction() == 0);
- CHK(set2.selrow(par, i) == 0);
+ CHK(set2.selrow(par, *set1.m_row[i]) == 0);
CHK(con.execute(Commit) == 0);
unsigned i2 = (unsigned)-1;
CHK(set2.getkey(par, &i2) == 0 && i == i2);
@@ -2335,7 +3490,7 @@ pkread(Par par)
con.closeTransaction();
}
if (par.m_verify)
- CHK(set1.verify(set2) == 0);
+ CHK(set1.verify(par, set2) == 0);
return 0;
}
@@ -2364,6 +3519,148 @@ pkreadfast(Par par, unsigned count)
return 0;
}
+// hash index operations
+
+static int
+hashindexupdate(Par par, const ITab& itab)
+{
+ Con& con = par.con();
+ Set& set = par.set();
+ LL3("hashindexupdate " << itab.m_name);
+ CHK(con.startTransaction() == 0);
+ Lst lst;
+ bool deadlock = false;
+ bool nospace = false;
+ for (unsigned j = 0; j < par.m_rows; j++) {
+ unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
+ unsigned i = thrrow(par, j2);
+ set.lock();
+ if (! set.exist(i) || set.pending(i, Row::AnyOp)) {
+ set.unlock();
+ continue;
+ }
+ set.dbsave(i);
+ // index key columns are not re-calculated
+ set.calc(par, i, itab.m_colmask);
+ CHK(set.updrow(par, itab, i) == 0);
+ set.unlock();
+ LL4("hashindexupdate " << i << ": " << *set.m_row[i]);
+ lst.push(i);
+ if (lst.cnt() == par.m_batch) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("hashindexupdate: stop on deadlock [at 1]");
+ break;
+ }
+ con.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.dbdiscard(lst);
+ set.unlock();
+ lst.reset();
+ CHK(con.startTransaction() == 0);
+ }
+ }
+ if (! deadlock && lst.cnt() != 0) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("hashindexupdate: stop on deadlock [at 2]");
+ } else {
+ set.lock();
+ set.notpending(lst);
+ set.dbdiscard(lst);
+ set.unlock();
+ }
+ }
+ con.closeTransaction();
+ return 0;
+}
+
+static int
+hashindexdelete(Par par, const ITab& itab)
+{
+ Con& con = par.con();
+ Set& set = par.set();
+ LL3("hashindexdelete " << itab.m_name);
+ CHK(con.startTransaction() == 0);
+ Lst lst;
+ bool deadlock = false;
+ bool nospace = false;
+ for (unsigned j = 0; j < par.m_rows; j++) {
+ unsigned j2 = ! par.m_randomkey ? j : urandom(par.m_rows);
+ unsigned i = thrrow(par, j2);
+ set.lock();
+ if (! set.exist(i) || set.pending(i, Row::AnyOp)) {
+ set.unlock();
+ continue;
+ }
+ CHK(set.delrow(par, itab, i) == 0);
+ set.unlock();
+ LL4("hashindexdelete " << i << ": " << *set.m_row[i]);
+ lst.push(i);
+ if (lst.cnt() == par.m_batch) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("hashindexdelete: stop on deadlock [at 1]");
+ break;
+ }
+ con.closeTransaction();
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ lst.reset();
+ CHK(con.startTransaction() == 0);
+ }
+ }
+ if (! deadlock && lst.cnt() != 0) {
+ deadlock = par.m_deadlock;
+ CHK(con.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("hashindexdelete: stop on deadlock [at 2]");
+ } else {
+ set.lock();
+ set.notpending(lst);
+ set.unlock();
+ }
+ }
+ con.closeTransaction();
+ return 0;
+}
+
+static int
+hashindexread(Par par, const ITab& itab)
+{
+ Con& con = par.con();
+ const Tab& tab = par.tab();
+ Set& set = par.set();
+ LL3("hashindexread " << itab.m_name << " verify=" << par.m_verify);
+ // expected
+ const Set& set1 = set;
+ Set set2(tab, set.m_rows);
+ for (unsigned i = 0; i < set.m_rows; i++) {
+ set.lock();
+ if (! set.exist(i)) {
+ set.unlock();
+ continue;
+ }
+ set.unlock();
+ CHK(con.startTransaction() == 0);
+ CHK(set2.selrow(par, itab, *set1.m_row[i]) == 0);
+ CHK(con.execute(Commit) == 0);
+ unsigned i2 = (unsigned)-1;
+ CHK(set2.getkey(par, &i2) == 0 && i == i2);
+ CHK(set2.putval(i, false) == 0);
+ LL4("row " << set2.count() << ": " << *set2.m_row[i]);
+ con.closeTransaction();
+ }
+ if (par.m_verify)
+ CHK(set1.verify(par, set2) == 0);
+ return 0;
+}
+
// scan read
static int
@@ -2374,26 +3671,35 @@ scanreadtable(Par par)
const Set& set = par.set();
// expected
const Set& set1 = set;
- LL3((par.m_verify ? "scanverify " : "scanread ") << tab.m_name);
+ LL3("scanread " << tab.m_name << " lockmode=" << par.m_lockmode << " tupscan=" << par.m_tupscan << " expect=" << set1.count() << " verify=" << par.m_verify);
Set set2(tab, set.m_rows);
CHK(con.startTransaction() == 0);
CHK(con.getNdbScanOperation(tab) == 0);
- CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0);
+ CHK(con.readTuples(par) == 0);
set2.getval(par);
CHK(con.executeScan() == 0);
+ unsigned n = 0;
+ bool deadlock = false;
while (1) {
int ret;
- CHK((ret = con.nextScanResult(true)) == 0 || ret == 1);
+ deadlock = par.m_deadlock;
+ CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1);
if (ret == 1)
break;
+ if (deadlock) {
+ LL1("scanreadtable: stop on deadlock");
+ break;
+ }
unsigned i = (unsigned)-1;
CHK(set2.getkey(par, &i) == 0);
- CHK(set2.putval(i, false) == 0);
- LL4("row " << set2.count() << ": " << *set2.m_row[i]);
+ CHK(set2.putval(i, false, n) == 0);
+ LL4("row " << n << ": " << *set2.m_row[i]);
+ n++;
}
con.closeTransaction();
if (par.m_verify)
- CHK(set1.verify(set2) == 0);
+ CHK(set1.verify(par, set2) == 0);
+ LL3("scanread " << tab.m_name << " done rows=" << n);
return 0;
}
@@ -2406,7 +3712,7 @@ scanreadtablefast(Par par, unsigned countcheck)
LL3("scanfast " << tab.m_name);
CHK(con.startTransaction() == 0);
CHK(con.getNdbScanOperation(tab) == 0);
- CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0);
+ CHK(con.readTuples(par) == 0);
// get 1st column
NdbRecAttr* rec;
CHK(con.getValue((Uint32)0, rec) == 0);
@@ -2425,37 +3731,60 @@ scanreadtablefast(Par par, unsigned countcheck)
}
static int
-scanreadindex(Par par, const ITab& itab, const BSet& bset)
+scanreadindex(Par par, const ITab& itab, BSet& bset, bool calc)
{
Con& con = par.con();
const Tab& tab = par.tab();
const Set& set = par.set();
- // expected
Set set1(tab, set.m_rows);
- bset.filter(set, set1);
- LL3((par.m_verify ? "scanverify " : "scanread ") << itab.m_name << " bounds=" << bset.m_bvals);
- LL4(bset);
+ if (calc) {
+ while (true) {
+ bset.calc(par);
+ bset.filter(par, set, set1);
+ unsigned n = set1.count();
+ // prefer proper subset
+ if (0 < n && n < set.m_rows)
+ break;
+ if (urandom(3) == 0)
+ break;
+ set1.reset();
+ }
+ } else {
+ bset.filter(par, set, set1);
+ }
+ LL3("scanread " << itab.m_name << " " << bset << " lockmode=" << par.m_lockmode << " expect=" << set1.count() << " verify=" << par.m_verify << " ordered=" << par.m_ordered << " descending=" << par.m_descending);
Set set2(tab, set.m_rows);
CHK(con.startTransaction() == 0);
- CHK(con.getNdbScanOperation(itab, tab) == 0);
- CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0);
+ CHK(con.getNdbIndexScanOperation(itab, tab) == 0);
+ CHK(con.readIndexTuples(par) == 0);
CHK(bset.setbnd(par) == 0);
set2.getval(par);
CHK(con.executeScan() == 0);
+ unsigned n = 0;
+ bool deadlock = false;
while (1) {
int ret;
- CHK((ret = con.nextScanResult(true)) == 0 || ret == 1);
+ deadlock = par.m_deadlock;
+ CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1);
if (ret == 1)
break;
+ if (deadlock) {
+ LL1("scanreadindex: stop on deadlock");
+ break;
+ }
unsigned i = (unsigned)-1;
CHK(set2.getkey(par, &i) == 0);
- LL4("key " << i);
- CHK(set2.putval(i, par.m_dups) == 0);
- LL4("row " << set2.count() << ": " << *set2.m_row[i]);
+ CHK(set2.putval(i, par.m_dups, n) == 0);
+ LL4("key " << i << " row " << n << ": " << *set2.m_row[i]);
+ n++;
}
con.closeTransaction();
- if (par.m_verify)
- CHK(set1.verify(set2) == 0);
+ if (par.m_verify) {
+ CHK(set1.verify(par, set2) == 0);
+ if (par.m_ordered)
+ CHK(set2.verifyorder(par, itab, par.m_descending) == 0);
+ }
+ LL3("scanread " << itab.m_name << " done rows=" << n);
return 0;
}
@@ -2465,11 +3794,11 @@ scanreadindexfast(Par par, const ITab& itab, const BSet& bset, unsigned countche
Con& con = par.con();
const Tab& tab = par.tab();
const Set& set = par.set();
- LL3("scanfast " << itab.m_name << " bounds=" << bset.m_bvals);
+ LL3("scanfast " << itab.m_name << " " << bset);
LL4(bset);
CHK(con.startTransaction() == 0);
- CHK(con.getNdbScanOperation(itab, tab) == 0);
- CHK(con.openScanRead(par.m_scanbat, par.m_scanpar) == 0);
+ CHK(con.getNdbIndexScanOperation(itab, tab) == 0);
+ CHK(con.readIndexTuples(par) == 0);
CHK(bset.setbnd(par) == 0);
// get 1st column
NdbRecAttr* rec;
@@ -2489,13 +3818,71 @@ scanreadindexfast(Par par, const ITab& itab, const BSet& bset, unsigned countche
}
static int
+scanreadfilter(Par par, const ITab& itab, BSet& bset, bool calc)
+{
+ Con& con = par.con();
+ const Tab& tab = par.tab();
+ const Set& set = par.set();
+ Set set1(tab, set.m_rows);
+ if (calc) {
+ while (true) {
+ bset.calc(par);
+ bset.filter(par, set, set1);
+ unsigned n = set1.count();
+ // prefer proper subset
+ if (0 < n && n < set.m_rows)
+ break;
+ if (urandom(3) == 0)
+ break;
+ set1.reset();
+ }
+ } else {
+ bset.filter(par, set, set1);
+ }
+ LL3("scanfilter " << itab.m_name << " " << bset << " lockmode=" << par.m_lockmode << " expect=" << set1.count() << " verify=" << par.m_verify);
+ Set set2(tab, set.m_rows);
+ CHK(con.startTransaction() == 0);
+ CHK(con.getNdbScanOperation(tab) == 0);
+ CHK(con.readTuples(par) == 0);
+ CHK(bset.setflt(par) == 0);
+ set2.getval(par);
+ CHK(con.executeScan() == 0);
+ unsigned n = 0;
+ bool deadlock = false;
+ while (1) {
+ int ret;
+ deadlock = par.m_deadlock;
+ CHK((ret = con.nextScanResult(true, deadlock)) == 0 || ret == 1);
+ if (ret == 1)
+ break;
+ if (deadlock) {
+ LL1("scanfilter: stop on deadlock");
+ break;
+ }
+ unsigned i = (unsigned)-1;
+ CHK(set2.getkey(par, &i) == 0);
+ CHK(set2.putval(i, par.m_dups, n) == 0);
+ LL4("key " << i << " row " << n << ": " << *set2.m_row[i]);
+ n++;
+ }
+ con.closeTransaction();
+ if (par.m_verify) {
+ CHK(set1.verify(par, set2) == 0);
+ }
+ LL3("scanfilter " << itab.m_name << " done rows=" << n);
+ return 0;
+}
+
+static int
scanreadindex(Par par, const ITab& itab)
{
const Tab& tab = par.tab();
for (unsigned i = 0; i < par.m_subsubloop; i++) {
- BSet bset(tab, itab, par.m_rows);
- bset.calc(par);
- CHK(scanreadindex(par, itab, bset) == 0);
+ if (itab.m_type == ITab::OrderedIndex) {
+ BSet bset(tab, itab, par.m_rows);
+ CHK(scanreadfilter(par, itab, bset, true) == 0);
+ CHK(scanreadindex(par, itab, bset, true) == 0);
+ }
}
return 0;
}
@@ -2505,10 +3892,14 @@ scanreadindex(Par par)
{
const Tab& tab = par.tab();
for (unsigned i = 0; i < tab.m_itabs; i++) {
- if (! useindex(i))
+ if (tab.m_itab[i] == 0)
continue;
- const ITab& itab = tab.m_itab[i];
- CHK(scanreadindex(par, itab) == 0);
+ const ITab& itab = *tab.m_itab[i];
+ if (itab.m_type == ITab::OrderedIndex) {
+ CHK(scanreadindex(par, itab) == 0);
+ } else {
+ CHK(hashindexread(par, itab) == 0);
+ }
}
return 0;
}
@@ -2516,8 +3907,7 @@ scanreadindex(Par par)
static int
scanreadall(Par par)
{
- if (par.m_no < 11)
- CHK(scanreadtable(par) == 0);
+ CHK(scanreadtable(par) == 0);
CHK(scanreadindex(par) == 0);
return 0;
}
@@ -2537,7 +3927,7 @@ static int
timescanpkindex(Par par)
{
const Tab& tab = par.tab();
- const ITab& itab = tab.m_itab[0]; // 1st index is on PK
+ const ITab& itab = *tab.m_itab[0]; // 1st index is on PK
BSet bset(tab, itab, par.m_rows);
par.tmr().on();
CHK(scanreadindexfast(par, itab, bset, par.m_totrows) == 0);
@@ -2561,7 +3951,7 @@ static int
timepkreadindex(Par par)
{
const Tab& tab = par.tab();
- const ITab& itab = tab.m_itab[0]; // 1st index is on PK
+ const ITab& itab = *tab.m_itab[0]; // 1st index is on PK
BSet bset(tab, itab, par.m_rows);
unsigned count = par.m_samples;
if (count == 0)
@@ -2586,9 +3976,10 @@ scanupdatetable(Par par)
Set& set = par.set();
LL3("scan update " << tab.m_name);
Set set2(tab, set.m_rows);
+ par.m_lockmode = NdbOperation::LM_Exclusive;
CHK(con.startTransaction() == 0);
CHK(con.getNdbScanOperation(tab) == 0);
- CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0);
+ CHK(con.readTuples(par) == 0);
set2.getval(par);
CHK(con.executeScan() == 0);
unsigned count = 0;
@@ -2598,6 +3989,7 @@ scanupdatetable(Par par)
CHK(con2.startTransaction() == 0);
Lst lst;
bool deadlock = false;
+ bool nospace = false;
while (1) {
int ret;
deadlock = par.m_deadlock;
@@ -2605,7 +3997,7 @@ scanupdatetable(Par par)
if (ret == 1)
break;
if (deadlock) {
- LL1("scanupdatetable: stop on deadlock");
+ LL1("scanupdatetable: stop on deadlock [at 1]");
break;
}
if (par.m_scanstop != 0 && urandom(par.m_scanstop) == 0) {
@@ -2617,13 +4009,14 @@ scanupdatetable(Par par)
CHK(set2.getkey(par, &i) == 0);
const Row& row = *set.m_row[i];
set.lock();
- if (! set.exist(i) || set.pending(i)) {
+ if (! set.exist(i) || set.pending(i, Row::AnyOp)) {
LL4("scan update " << tab.m_name << ": skip: " << row);
} else {
CHKTRY(set2.putval(i, false) == 0, set.unlock());
CHKTRY(con.updateScanTuple(con2) == 0, set.unlock());
Par par2 = par;
par2.m_con = &con2;
+ set.dbsave(i);
set.calc(par, i);
CHKTRY(set.setrow(par2, i) == 0, set.unlock());
LL4("scan update " << tab.m_name << ": " << row);
@@ -2631,10 +4024,16 @@ scanupdatetable(Par par)
}
set.unlock();
if (lst.cnt() == par.m_batch) {
- CHK(con2.execute(Commit) == 0);
+ deadlock = par.m_deadlock;
+ CHK(con2.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("scanupdatetable: stop on deadlock [at 2]");
+ goto out;
+ }
con2.closeTransaction();
set.lock();
set.notpending(lst);
+ set.dbdiscard(lst);
set.unlock();
count += lst.cnt();
lst.reset();
@@ -2642,10 +4041,16 @@ scanupdatetable(Par par)
}
CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2);
if (ret == 2 && lst.cnt() != 0) {
- CHK(con2.execute(Commit) == 0);
+ deadlock = par.m_deadlock;
+ CHK(con2.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("scanupdatetable: stop on deadlock [at 3]");
+ goto out;
+ }
con2.closeTransaction();
set.lock();
set.notpending(lst);
+ set.dbdiscard(lst);
set.unlock();
count += lst.cnt();
lst.reset();
@@ -2655,6 +4060,7 @@ scanupdatetable(Par par)
if (ret == 1)
break;
}
+out:
con2.closeTransaction();
LL3("scan update " << tab.m_name << " rows updated=" << count);
con.closeTransaction();
@@ -2669,9 +4075,10 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
Set& set = par.set();
LL3("scan update " << itab.m_name);
Set set2(tab, set.m_rows);
+ par.m_lockmode = NdbOperation::LM_Exclusive;
CHK(con.startTransaction() == 0);
- CHK(con.getNdbScanOperation(itab, tab) == 0);
- CHK(con.openScanExclusive(par.m_scanbat, par.m_scanpar) == 0);
+ CHK(con.getNdbIndexScanOperation(itab, tab) == 0);
+ CHK(con.readTuples(par) == 0);
CHK(bset.setbnd(par) == 0);
set2.getval(par);
CHK(con.executeScan() == 0);
@@ -2682,6 +4089,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
CHK(con2.startTransaction() == 0);
Lst lst;
bool deadlock = false;
+ bool nospace = false;
while (1) {
int ret;
deadlock = par.m_deadlock;
@@ -2689,7 +4097,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
if (ret == 1)
break;
if (deadlock) {
- LL1("scanupdateindex: stop on deadlock");
+ LL1("scanupdateindex: stop on deadlock [at 1]");
break;
}
if (par.m_scanstop != 0 && urandom(par.m_scanstop) == 0) {
@@ -2701,13 +4109,14 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
CHK(set2.getkey(par, &i) == 0);
const Row& row = *set.m_row[i];
set.lock();
- if (! set.exist(i) || set.pending(i)) {
+ if (! set.exist(i) || set.pending(i, Row::AnyOp)) {
LL4("scan update " << itab.m_name << ": skip: " << row);
} else {
CHKTRY(set2.putval(i, par.m_dups) == 0, set.unlock());
CHKTRY(con.updateScanTuple(con2) == 0, set.unlock());
Par par2 = par;
par2.m_con = &con2;
+ set.dbsave(i);
set.calc(par, i);
CHKTRY(set.setrow(par2, i) == 0, set.unlock());
LL4("scan update " << itab.m_name << ": " << row);
@@ -2715,10 +4124,16 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
}
set.unlock();
if (lst.cnt() == par.m_batch) {
- CHK(con2.execute(Commit) == 0);
+ deadlock = par.m_deadlock;
+ CHK(con2.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("scanupdateindex: stop on deadlock [at 2]");
+ goto out;
+ }
con2.closeTransaction();
set.lock();
set.notpending(lst);
+ set.dbdiscard(lst);
set.unlock();
count += lst.cnt();
lst.reset();
@@ -2726,10 +4141,16 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
}
CHK((ret = con.nextScanResult(false)) == 0 || ret == 1 || ret == 2);
if (ret == 2 && lst.cnt() != 0) {
- CHK(con2.execute(Commit) == 0);
+ deadlock = par.m_deadlock;
+ CHK(con2.execute(Commit, deadlock, nospace) == 0);
+ if (deadlock) {
+ LL1("scanupdateindex: stop on deadlock [at 3]");
+ goto out;
+ }
con2.closeTransaction();
set.lock();
set.notpending(lst);
+ set.dbdiscard(lst);
set.unlock();
count += lst.cnt();
lst.reset();
@@ -2737,6 +4158,7 @@ scanupdateindex(Par par, const ITab& itab, const BSet& bset)
}
} while (ret == 0);
}
+out:
con2.closeTransaction();
LL3("scan update " << itab.m_name << " rows updated=" << count);
con.closeTransaction();
@@ -2748,9 +4170,13 @@ scanupdateindex(Par par, const ITab& itab)
{
const Tab& tab = par.tab();
for (unsigned i = 0; i < par.m_subsubloop; i++) {
- BSet bset(tab, itab, par.m_rows);
- bset.calc(par);
- CHK(scanupdateindex(par, itab, bset) == 0);
+ if (itab.m_type == ITab::OrderedIndex) {
+ BSet bset(tab, itab, par.m_rows);
+ bset.calc(par);
+ CHK(scanupdateindex(par, itab, bset) == 0);
+ } else {
+ CHK(hashindexupdate(par, itab) == 0);
+ }
}
return 0;
}
@@ -2760,9 +4186,9 @@ scanupdateindex(Par par)
{
const Tab& tab = par.tab();
for (unsigned i = 0; i < tab.m_itabs; i++) {
- if (! useindex(i))
+ if (tab.m_itab[i] == 0)
continue;
- const ITab& itab = tab.m_itab[i];
+ const ITab& itab = *tab.m_itab[i];
CHK(scanupdateindex(par, itab) == 0);
}
return 0;
@@ -2788,6 +4214,7 @@ readverify(Par par)
LL2("skip verify in this version"); // implement in 5.0 version
par.m_verify = false;
}
+ par.m_lockmode = NdbOperation::LM_CommittedRead;
CHK(pkread(par) == 0);
CHK(scanreadall(par) == 0);
return 0;
@@ -2799,43 +4226,106 @@ readverifyfull(Par par)
if (par.m_noverify)
return 0;
par.m_verify = true;
- if (par.m_no == 0)
+ if (par.m_abortpct != 0) {
+ LL2("skip verify in this version"); // implement in 5.0 version
+ par.m_verify = false;
+ }
+ par.m_lockmode = NdbOperation::LM_CommittedRead;
+ const Tab& tab = par.tab();
+ if (par.m_no == 0) {
+ // thread 0 scans table
CHK(scanreadtable(par) == 0);
- else {
- const Tab& tab = par.tab();
- unsigned i = par.m_no;
- if (i <= tab.m_itabs && useindex(i)) {
- const ITab& itab = tab.m_itab[i - 1];
+ // once more via tup scan
+ par.m_tupscan = true;
+ if (NDB_VERSION < MAKE_VERSION(5, 1, 0)) //TODO
+ CHK(scanreadtable(par) == 0);
+ }
+ // each thread scans different indexes
+ for (unsigned i = 0; i < tab.m_itabs; i++) {
+ if (i % par.m_threads != par.m_no)
+ continue;
+ if (tab.m_itab[i] == 0)
+ continue;
+ const ITab& itab = *tab.m_itab[i];
+ if (itab.m_type == ITab::OrderedIndex) {
BSet bset(tab, itab, par.m_rows);
- CHK(scanreadindex(par, itab, bset) == 0);
+ CHK(scanreadindex(par, itab, bset, false) == 0);
+ } else {
+ CHK(hashindexread(par, itab) == 0);
}
}
return 0;
}
static int
+readverifyindex(Par par)
+{
+ if (par.m_noverify)
+ return 0;
+ par.m_verify = true;
+ par.m_lockmode = NdbOperation::LM_CommittedRead;
+ unsigned sel = urandom(10);
+ if (sel < 9) {
+ par.m_ordered = true;
+ par.m_descending = (sel < 5);
+ }
+ CHK(scanreadindex(par) == 0);
+ return 0;
+}
+
+static int
pkops(Par par)
{
+ const Tab& tab = par.tab();
par.m_randomkey = true;
for (unsigned i = 0; i < par.m_subsubloop; i++) {
+ unsigned j = 0;
+ while (j < tab.m_itabs) {
+ if (tab.m_itab[j] != 0) {
+ const ITab& itab = *tab.m_itab[j];
+ if (itab.m_type == ITab::UniqueHashIndex && urandom(5) == 0)
+ break;
+ }
+ j++;
+ }
unsigned sel = urandom(10);
if (par.m_slno % 2 == 0) {
// favor insert
if (sel < 8) {
CHK(pkinsert(par) == 0);
} else if (sel < 9) {
- CHK(pkupdate(par) == 0);
+ if (j == tab.m_itabs)
+ CHK(pkupdate(par) == 0);
+ else {
+ const ITab& itab = *tab.m_itab[j];
+ CHK(hashindexupdate(par, itab) == 0);
+ }
} else {
- CHK(pkdelete(par) == 0);
+ if (j == tab.m_itabs)
+ CHK(pkdelete(par) == 0);
+ else {
+ const ITab& itab = *tab.m_itab[j];
+ CHK(hashindexdelete(par, itab) == 0);
+ }
}
} else {
// favor delete
if (sel < 1) {
CHK(pkinsert(par) == 0);
} else if (sel < 2) {
- CHK(pkupdate(par) == 0);
+ if (j == tab.m_itabs)
+ CHK(pkupdate(par) == 0);
+ else {
+ const ITab& itab = *tab.m_itab[j];
+ CHK(hashindexupdate(par, itab) == 0);
+ }
} else {
- CHK(pkdelete(par) == 0);
+ if (j == tab.m_itabs)
+ CHK(pkdelete(par) == 0);
+ else {
+ const ITab& itab = *tab.m_itab[j];
+ CHK(hashindexdelete(par, itab) == 0);
+ }
}
}
}
@@ -2846,6 +4336,7 @@ static int
pkupdatescanread(Par par)
{
par.m_dups = true;
+ par.m_deadlock = true;
unsigned sel = urandom(10);
if (sel < 5) {
CHK(pkupdate(par) == 0);
@@ -2854,6 +4345,10 @@ pkupdatescanread(Par par)
CHK(scanreadtable(par) == 0);
} else {
par.m_verify = false;
+ if (sel < 8) {
+ par.m_ordered = true;
+ par.m_descending = (sel < 7);
+ }
CHK(scanreadindex(par) == 0);
}
return 0;
@@ -2873,6 +4368,10 @@ mixedoperations(Par par)
} else if (sel < 6) {
CHK(scanupdatetable(par) == 0);
} else {
+ if (sel < 8) {
+ par.m_ordered = true;
+ par.m_descending = (sel < 7);
+ }
CHK(scanupdateindex(par) == 0);
}
return 0;
@@ -3134,6 +4633,24 @@ tbuild(Par par)
}
static int
+tindexscan(Par par)
+{
+ RUNSTEP(par, droptable, ST);
+ RUNSTEP(par, createtable, ST);
+ RUNSTEP(par, invalidatetable, MT);
+ RUNSTEP(par, createindex, ST);
+ RUNSTEP(par, invalidateindex, MT);
+ RUNSTEP(par, pkinsert, MT);
+ RUNSTEP(par, readverifyfull, MT);
+ for (par.m_slno = 0; par.m_slno < par.m_subloop; par.m_slno++) {
+ LL4("subloop " << par.m_slno);
+ RUNSTEP(par, readverifyindex, MT);
+ }
+ return 0;
+}
+
+
+static int
tpkops(Par par)
{
RUNSTEP(par, droptable, ST);
@@ -3266,6 +4783,10 @@ ttimemaint(Par par)
static int
ttimescan(Par par)
{
+ if (par.tab().m_itab[0] == 0) {
+ LL1("ttimescan - no index 0, skipped");
+ return 0;
+ }
Tmr t1, t2;
RUNSTEP(par, droptable, ST);
RUNSTEP(par, createtable, ST);
@@ -3288,6 +4809,10 @@ ttimescan(Par par)
static int
ttimepkread(Par par)
{
+ if (par.tab().m_itab[0] == 0) {
+ LL1("ttimescan - no index 0, skipped");
+ return 0;
+ }
Tmr t1, t2;
RUNSTEP(par, droptable, ST);
RUNSTEP(par, createtable, ST);
@@ -3328,7 +4853,7 @@ struct TCase {
static const TCase
tcaselist[] = {
TCase("a", tbuild, "index build"),
- // "b" in 5.0
+ TCase("b", tindexscan, "index scans"),
TCase("c", tpkops, "pk operations"),
TCase("d", tpkopsread, "pk operations and scan reads"),
TCase("e", tmixedops, "pk operations and scan operations"),
@@ -3357,13 +4882,32 @@ printcases()
static void
printtables()
{
- ndbout << "tables and indexes (X1 is on table PK):" << endl;
+ Par par(g_opt);
+ makebuiltintables(par);
+ ndbout << "tables and indexes (x=ordered z=hash x0=on pk):" << endl;
for (unsigned j = 0; j < tabcount; j++) {
- const Tab& tab = tablist[j];
- ndbout << " " << tab.m_name;
+ if (tablist[j] == 0)
+ continue;
+ const Tab& tab = *tablist[j];
+ const char* tname = tab.m_name;
+ ndbout << " " << tname;
for (unsigned i = 0; i < tab.m_itabs; i++) {
- const ITab& itab = tab.m_itab[i];
- ndbout << " " << itab.m_name;
+ if (tab.m_itab[i] == 0)
+ continue;
+ const ITab& itab = *tab.m_itab[i];
+ const char* iname = itab.m_name;
+ if (strncmp(tname, iname, strlen(tname)) == 0)
+ iname += strlen(tname);
+ ndbout << " " << iname;
+ ndbout << "(";
+ for (unsigned k = 0; k < itab.m_icols; k++) {
+ if (k != 0)
+ ndbout << ",";
+ const ICol& icol = *itab.m_icol[k];
+ const Col& col = icol.m_col;
+ ndbout << col.m_name;
+ }
+ ndbout << ")";
}
ndbout << endl;
}
@@ -3373,15 +4917,29 @@ static int
runtest(Par par)
{
LL1("start");
- if (par.m_seed != 0)
+ if (par.m_seed == -1) {
+ // good enough for daily run
+ unsigned short seed = (getpid() ^ time(0));
+ LL1("random seed: " << seed);
+ srandom((unsigned)seed);
+ } else if (par.m_seed != 0) {
+ LL1("random seed: " << par.m_seed);
srandom(par.m_seed);
+ } else {
+ LL1("random seed: loop number");
+ }
+ // cs
assert(par.m_csname != 0);
- CHARSET_INFO* cs;
- CHK((cs = get_charset_by_name(par.m_csname, MYF(0))) != 0 || (cs = get_charset_by_csname(par.m_csname, MY_CS_PRIMARY, MYF(0))) != 0);
- par.m_cs = cs;
+ if (strcmp(par.m_csname, "random") != 0) {
+ CHARSET_INFO* cs;
+ CHK((cs = get_charset_by_name(par.m_csname, MYF(0))) != 0 || (cs = get_charset_by_csname(par.m_csname, MY_CS_PRIMARY, MYF(0))) != 0);
+ par.m_cs = cs;
+ }
+ // con
Con con;
CHK(con.connect() == 0);
par.m_con = &con;
+ // threads
g_thrlist = new Thr* [par.m_threads];
unsigned n;
for (n = 0; n < par.m_threads; n++) {
@@ -3400,16 +4958,18 @@ runtest(Par par)
const TCase& tcase = tcaselist[i];
if (par.m_case != 0 && strchr(par.m_case, tcase.m_name[0]) == 0)
continue;
+ makebuiltintables(par);
LL1("case " << tcase.m_name << " - " << tcase.m_desc);
for (unsigned j = 0; j < tabcount; j++) {
- if (! usetable(j))
+ if (tablist[j] == 0)
continue;
- const Tab& tab = tablist[j];
+ const Tab& tab = *tablist[j];
par.m_tab = &tab;
- delete par.m_set;
par.m_set = new Set(tab, par.m_totrows);
LL1("table " << tab.m_name);
CHK(tcase.m_func(par) == 0);
+ delete par.m_set;
+ par.m_set = 0;
}
}
}
@@ -3433,7 +4993,7 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
{
ndb_init();
if (ndbout_mutex == NULL)
- ndbout_mutex= NdbMutex_Create();
+ ndbout_mutex = NdbMutex_Create();
while (++argv, --argc > 0) {
const char* arg = argv[0];
if (*arg != '-') {
@@ -3461,6 +5021,10 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
continue;
}
}
+ if (strcmp(arg, "-collsp") == 0) {
+ g_opt.m_collsp = true;
+ continue;
+ }
if (strcmp(arg, "-core") == 0) {
g_opt.m_core = true;
continue;
@@ -3539,12 +5103,6 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
continue;
}
}
- if (strcmp(arg, "-scanbat") == 0) {
- if (++argv, --argc > 0) {
- g_opt.m_scanbat = atoi(argv[0]);
- continue;
- }
- }
if (strcmp(arg, "-scanpar") == 0) {
if (++argv, --argc > 0) {
g_opt.m_scanpar = atoi(argv[0]);
@@ -3572,7 +5130,8 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
if (strcmp(arg, "-threads") == 0) {
if (++argv, --argc > 0) {
g_opt.m_threads = atoi(argv[0]);
- continue;
+ if (1 <= g_opt.m_threads)
+ continue;
}
}
if (strcmp(arg, "-v") == 0) {
@@ -3589,7 +5148,7 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
printhelp();
goto wrongargs;
}
- ndbout << "testOIBasic: unknown option " << arg;
+ ndbout << "testOIBasic: bad or unknown option " << arg;
goto usage;
}
{
@@ -3600,7 +5159,6 @@ NDB_COMMAND(testOIBasic, "testOIBasic", "testOIBasic", "testOIBasic", 65535)
delete g_ncc;
g_ncc = 0;
}
- // always exit with NDBT code
ok:
return NDBT_ProgramExit(NDBT_OK);
failed:
diff --git a/ndb/test/ndbapi/testPartitioning.cpp b/ndb/test/ndbapi/testPartitioning.cpp
new file mode 100644
index 00000000000..9d67c27354b
--- /dev/null
+++ b/ndb/test/ndbapi/testPartitioning.cpp
@@ -0,0 +1,430 @@
+/* Copyright (C) 2003 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <NDBT_Test.hpp>
+#include <NDBT_ReturnCodes.h>
+#include <HugoTransactions.hpp>
+#include <UtilTransactions.hpp>
+#include <NdbRestarter.hpp>
+
+#define GETNDB(ps) ((NDBT_NdbApiStep*)ps)->getNdb()
+
+static Uint32 max_dks = 0;
+
+static
+int
+run_drop_table(NDBT_Context* ctx, NDBT_Step* step)
+{
+ NdbDictionary::Dictionary* dict = GETNDB(step)->getDictionary();
+ dict->dropTable(ctx->getTab()->getName());
+ return 0;
+}
+
+static
+int
+add_distribution_key(Ndb*, NdbDictionary::Table& tab, int when)
+{
+ switch(when){
+ case 0: // Before
+ break;
+ case 1: // After
+ return 0;
+ default:
+ return 0;
+ }
+
+ int keys = tab.getNoOfPrimaryKeys();
+ int dks = (2 * keys + 2) / 3; dks = (dks > max_dks ? max_dks : dks);
+ int cnt = 0;
+
+ for(unsigned i = 0; i<tab.getNoOfColumns(); i++)
+ if(tab.getColumn(i)->getPrimaryKey() &&
+ tab.getColumn(i)->getCharset() != 0)
+ keys--;
+
+ Uint32 max = NDB_MAX_NO_OF_ATTRIBUTES_IN_KEY - tab.getNoOfPrimaryKeys();
+
+ if(max_dks < max)
+ max = max_dks;
+
+ if(keys <= 1 && max > 0)
+ {
+ dks = 1 + (rand() % max);
+ ndbout_c("%s pks: %d dks: %d", tab.getName(), keys, dks);
+ while(dks--)
+ {
+ NdbDictionary::Column col;
+ BaseString name;
+ name.assfmt("PK_DK_%d", dks);
+ col.setName(name.c_str());
+ col.setType(NdbDictionary::Column::Unsigned);
+ col.setLength(1);
+ col.setNullable(false);
+ col.setPrimaryKey(true);
+ col.setDistributionKey(true);
+ tab.addColumn(col);
+ }
+ }
+ else
+ {
+ for(unsigned i = 0; i<tab.getNoOfColumns(); i++)
+ {
+ NdbDictionary::Column* col = tab.getColumn(i);
+ if(col->getPrimaryKey() && col->getCharset() == 0)
+ {
+ if(dks >= keys || (rand() % 100) > 50)
+ {
+ col->setDistributionKey(true);
+ dks--;
+ }
+ keys--;
+ }
+ }
+ }
+ ndbout << (NDBT_Table&)tab << endl;
+
+ return 0;
+}
+
+static int
+run_create_table(NDBT_Context* ctx, NDBT_Step* step)
+{
+ max_dks = ctx->getProperty("distributionkey", (unsigned)0);
+
+ if(NDBT_Tables::createTable(GETNDB(step),
+ ctx->getTab()->getName(),
+ false, false,
+ max_dks?add_distribution_key:0) == NDBT_OK)
+ {
+ return NDBT_OK;
+ }
+
+ if(GETNDB(step)->getDictionary()->getNdbError().code == 745)
+ return NDBT_OK;
+
+ return NDBT_FAILED;
+}
+
+static int
+run_create_pk_index(NDBT_Context* ctx, NDBT_Step* step){
+ bool orderedIndex = ctx->getProperty("OrderedIndex", (unsigned)0);
+
+ Ndb* pNdb = GETNDB(step);
+ const NdbDictionary::Table *pTab =
+ pNdb->getDictionary()->getTable(ctx->getTab()->getName());
+
+ if(!pTab)
+ return NDBT_OK;
+
+ bool logged = ctx->getProperty("LoggedIndexes", orderedIndex ? 0 : 1);
+
+ BaseString name;
+ name.assfmt("IND_%s_PK_%c", pTab->getName(), orderedIndex ? 'O' : 'U');
+
+ // Create index
+ if (orderedIndex)
+ ndbout << "Creating " << ((logged)?"logged ": "temporary ") << "ordered index "
+ << name.c_str() << " (";
+ else
+ ndbout << "Creating " << ((logged)?"logged ": "temporary ") << "unique index "
+ << name.c_str() << " (";
+
+ NdbDictionary::Index pIdx(name.c_str());
+ pIdx.setTable(pTab->getName());
+ if (orderedIndex)
+ pIdx.setType(NdbDictionary::Index::OrderedIndex);
+ else
+ pIdx.setType(NdbDictionary::Index::UniqueHashIndex);
+ for (int c = 0; c< pTab->getNoOfColumns(); c++){
+ const NdbDictionary::Column * col = pTab->getColumn(c);
+ if(col->getPrimaryKey()){
+ pIdx.addIndexColumn(col->getName());
+ ndbout << col->getName() <<" ";
+ }
+ }
+
+ pIdx.setStoredIndex(logged);
+ ndbout << ") ";
+ if (pNdb->getDictionary()->createIndex(pIdx) != 0){
+ ndbout << "FAILED!" << endl;
+ const NdbError err = pNdb->getDictionary()->getNdbError();
+ ERR(err);
+ return NDBT_FAILED;
+ }
+
+ ndbout << "OK!" << endl;
+ return NDBT_OK;
+}
+
+static int run_create_pk_index_drop(NDBT_Context* ctx, NDBT_Step* step){
+ bool orderedIndex = ctx->getProperty("OrderedIndex", (unsigned)0);
+
+ Ndb* pNdb = GETNDB(step);
+ const NdbDictionary::Table *pTab =
+ pNdb->getDictionary()->getTable(ctx->getTab()->getName());
+
+ if(!pTab)
+ return NDBT_OK;
+
+ BaseString name;
+ name.assfmt("IND_%s_PK_%c", pTab->getName(), orderedIndex ? 'O' : 'U');
+
+ ndbout << "Dropping index " << name.c_str() << " ";
+ if (pNdb->getDictionary()->dropIndex(name.c_str(), pTab->getName()) != 0){
+ ndbout << "FAILED!" << endl;
+ ERR(pNdb->getDictionary()->getNdbError());
+ return NDBT_FAILED;
+ } else {
+ ndbout << "OK!" << endl;
+ }
+
+ return NDBT_OK;
+}
+
+static int
+run_tests(Ndb* p_ndb, HugoTransactions& hugoTrans, int records)
+{
+ if (hugoTrans.loadTable(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ if(hugoTrans.pkReadRecords(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ if(hugoTrans.pkUpdateRecords(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ if(hugoTrans.pkDelRecords(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ if (hugoTrans.loadTable(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ if(hugoTrans.scanUpdateRecords(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ Uint32 abort = 23;
+ for(Uint32 j = 0; j<5; j++){
+ Uint32 parallelism = (j == 1 ? 1 : j * 3);
+ ndbout_c("parallelism: %d", parallelism);
+ if (hugoTrans.scanReadRecords(p_ndb, records, abort, parallelism,
+ NdbOperation::LM_Read) != 0)
+ {
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.scanReadRecords(p_ndb, records, abort, parallelism,
+ NdbOperation::LM_Exclusive) != 0)
+ {
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.scanReadRecords(p_ndb, records, abort, parallelism,
+ NdbOperation::LM_CommittedRead) != 0)
+ {
+ return NDBT_FAILED;
+ }
+ }
+
+ if(hugoTrans.clearTable(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ return 0;
+}
+
+static int
+run_pk_dk(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb* p_ndb = GETNDB(step);
+ int records = ctx->getNumRecords();
+ const NdbDictionary::Table *tab =
+ p_ndb->getDictionary()->getTable(ctx->getTab()->getName());
+
+ if(!tab)
+ return NDBT_OK;
+
+ HugoTransactions hugoTrans(*tab);
+
+ return run_tests(p_ndb, hugoTrans, records);
+}
+
+int
+run_index_dk(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb* p_ndb = GETNDB(step);
+ int records = ctx->getNumRecords();
+ const NdbDictionary::Table *pTab =
+ p_ndb->getDictionary()->getTable(ctx->getTab()->getName());
+
+ if(!pTab)
+ return NDBT_OK;
+
+ bool orderedIndex = ctx->getProperty("OrderedIndex", (unsigned)0);
+
+ BaseString name;
+ name.assfmt("IND_%s_PK_%c", pTab->getName(), orderedIndex ? 'O' : 'U');
+
+ const NdbDictionary::Index * idx =
+ p_ndb->getDictionary()->getIndex(name.c_str(), pTab->getName());
+
+ if(!idx)
+ {
+ ndbout << "Failed to retreive index: " << name.c_str() << endl;
+ return NDBT_FAILED;
+ }
+
+ HugoTransactions hugoTrans(*pTab, idx);
+
+ return run_tests(p_ndb, hugoTrans, records);
+}
+
+static int
+run_startHint(NDBT_Context* ctx, NDBT_Step* step)
+{
+ Ndb* p_ndb = GETNDB(step);
+ int records = ctx->getNumRecords();
+ const NdbDictionary::Table *tab =
+ p_ndb->getDictionary()->getTable(ctx->getTab()->getName());
+
+ if(!tab)
+ return NDBT_OK;
+
+ HugoTransactions hugoTrans(*tab);
+ if (hugoTrans.loadTable(p_ndb, records) != 0)
+ {
+ return NDBT_FAILED;
+ }
+
+ NdbRestarter restarter;
+ if(restarter.insertErrorInAllNodes(8050) != 0)
+ return NDBT_FAILED;
+
+ HugoCalculator dummy(*tab);
+ int result = NDBT_OK;
+ for(int i = 0; i<records && result == NDBT_OK; i++)
+ {
+ char buffer[8000];
+ char* start= buffer + (rand() & 7);
+ char* pos= start;
+
+ for(int j = 0; j<tab->getNoOfColumns(); j++)
+ {
+ if(tab->getColumn(j)->getPartitionKey())
+ {
+ ndbout_c(tab->getColumn(j)->getName());
+ int sz = tab->getColumn(j)->getSizeInBytes();
+ int aligned_size = 4 * ((sz + 3) >> 2);
+ memset(pos, 0, aligned_size);
+ dummy.calcValue(i, j, 0, pos, sz);
+ pos += aligned_size;
+ }
+ }
+ // Now we have the pk
+ NdbTransaction* pTrans= p_ndb->startTransaction(tab, start,(pos - start));
+ HugoOperations ops(*tab);
+ ops.setTransaction(pTrans);
+ if(ops.pkReadRecord(p_ndb, i, 1) != NDBT_OK)
+ {
+ result = NDBT_FAILED;
+ break;
+ }
+
+ if(ops.execute_Commit(p_ndb) != 0)
+ {
+ result = NDBT_FAILED;
+ break;
+ }
+
+ ops.closeTransaction(p_ndb);
+ }
+ restarter.insertErrorInAllNodes(0);
+ return result;
+}
+
+
+NDBT_TESTSUITE(testPartitioning);
+TESTCASE("pk_dk",
+ "Primary key operations with distribution key")
+{
+ TC_PROPERTY("distributionkey", ~0);
+ INITIALIZER(run_drop_table);
+ INITIALIZER(run_create_table);
+ INITIALIZER(run_pk_dk);
+ INITIALIZER(run_drop_table);
+}
+TESTCASE("hash_index_dk",
+ "Unique index operatations with distribution key")
+{
+ TC_PROPERTY("distributionkey", ~0);
+ TC_PROPERTY("OrderedIndex", (unsigned)0);
+ INITIALIZER(run_drop_table);
+ INITIALIZER(run_create_table);
+ INITIALIZER(run_create_pk_index);
+ INITIALIZER(run_index_dk);
+ INITIALIZER(run_create_pk_index_drop);
+ INITIALIZER(run_drop_table);
+}
+TESTCASE("ordered_index_dk",
+ "Ordered index operatations with distribution key")
+{
+ TC_PROPERTY("distributionkey", (unsigned)1);
+ TC_PROPERTY("OrderedIndex", (unsigned)1);
+ INITIALIZER(run_drop_table);
+ INITIALIZER(run_create_table);
+ INITIALIZER(run_create_pk_index);
+ INITIALIZER(run_index_dk);
+ INITIALIZER(run_create_pk_index_drop);
+ INITIALIZER(run_drop_table);
+}
+TESTCASE("startTransactionHint",
+ "Test startTransactionHint wo/ distribution key")
+{
+ TC_PROPERTY("distributionkey", (unsigned)0);
+ INITIALIZER(run_drop_table);
+ INITIALIZER(run_create_table);
+ INITIALIZER(run_startHint);
+ INITIALIZER(run_drop_table);
+}
+TESTCASE("startTransactionHint_dk",
+ "Test startTransactionHint with distribution key")
+{
+ TC_PROPERTY("distributionkey", (unsigned)~0);
+ INITIALIZER(run_drop_table);
+ INITIALIZER(run_create_table);
+ INITIALIZER(run_startHint);
+ INITIALIZER(run_drop_table);
+}
+NDBT_TESTSUITE_END(testPartitioning);
+
+int main(int argc, const char** argv){
+ ndb_init();
+ testPartitioning.setCreateTable(false);
+ return testPartitioning.execute(argc, argv);
+}
+
+
+
diff --git a/ndb/test/ndbapi/testReadPerf.cpp b/ndb/test/ndbapi/testReadPerf.cpp
index 3adcb5a2d9b..ba5f3c4232d 100644
--- a/ndb/test/ndbapi/testReadPerf.cpp
+++ b/ndb/test/ndbapi/testReadPerf.cpp
@@ -119,7 +119,13 @@ main(int argc, const char** argv){
myRandom48Init(NdbTick_CurrentMillisecond());
memset(g_times, 0, sizeof(g_times));
- g_ndb = new Ndb("TEST_DB");
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1))
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ g_ndb = new Ndb(&con, "TEST_DB");
if(g_ndb->init() != 0){
g_err << "init() failed" << endl;
goto error;
@@ -266,7 +272,6 @@ run_read(){
NdbScanOperation * pSp;
NdbIndexOperation * pUp;
NdbIndexScanOperation * pIp;
- NdbResultSet * rs = (NdbResultSet*)~0;
Uint32 start_row = rand() % (rows - range);
Uint32 stop_row = start_row + range;
@@ -319,27 +324,27 @@ run_read(){
}
break;
case 4:
- pOp = pIp = pTrans->getNdbIndexScanOperation(g_ordered,g_table);
- rs = pIp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0);
+ pOp = pSp = pIp = pTrans->getNdbIndexScanOperation(g_ordered,g_table);
+ pIp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0);
check = pIp->setBound(pk, NdbIndexScanOperation::BoundEQ, &start_row);
break;
case 5:
- pOp = pIp = pTrans->getNdbIndexScanOperation(g_ordered,g_table);
- rs = pIp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0);
+ pOp = pSp = pIp = pTrans->getNdbIndexScanOperation(g_ordered,g_table);
+ pIp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0);
check = pIp->setBound(pk, NdbIndexScanOperation::BoundLE, &start_row);
check = pIp->setBound(pk, NdbIndexScanOperation::BoundGT, &stop_row);
start_row = stop_row;
break;
case 6:
- pOp = pIp = pTrans->getNdbIndexScanOperation(g_ordered,g_table);
- rs = pIp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0, true);
+ pOp = pSp = pIp = pTrans->getNdbIndexScanOperation(g_ordered,g_table);
+ pIp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0, true);
check = pIp->setBound(pk, NdbIndexScanOperation::BoundLE, &start_row);
check = pIp->setBound(pk, NdbIndexScanOperation::BoundGT, &stop_row);
start_row = stop_row;
break;
case 7:
pOp = pSp = pTrans->getNdbScanOperation(g_table);
- rs = pSp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0);
+ pSp->readTuples(NdbScanOperation::LM_CommittedRead, 0, 0);
NdbScanFilter filter(pOp) ;
filter.begin(NdbScanFilter::AND);
filter.ge(pk, start_row);
@@ -355,7 +360,6 @@ run_read(){
ndbout << pTrans->getNdbError() << endl;
}
assert(check == 0);
- assert(rs);
for(int j = 0; j<g_tab->getNoOfColumns(); j++){
res = pOp->getValue(j);
@@ -368,7 +372,7 @@ run_read(){
}
assert(check == 0);
if(g_paramters[P_OPER].value >= 4){
- while((check = rs->nextResult(true)) == 0){
+ while((check = pSp->nextResult(true)) == 0){
cnt++;
}
@@ -377,13 +381,13 @@ run_read(){
return -1;
}
assert(check == 1);
- rs->close();
+ pSp->close();
}
}
assert(g_paramters[P_OPER].value < 4 || (cnt == range));
-
+
pTrans->close();
-
+
stop = NdbTick_CurrentMillisecond();
g_times[g_paramters[P_OPER].value] += (stop - start1);
return 0;
diff --git a/ndb/test/ndbapi/testSRBank.cpp b/ndb/test/ndbapi/testSRBank.cpp
index 5677f551da6..6d57724f4c6 100644
--- a/ndb/test/ndbapi/testSRBank.cpp
+++ b/ndb/test/ndbapi/testSRBank.cpp
@@ -23,7 +23,7 @@
#include "bank/Bank.hpp"
int runCreateBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int overWriteExisting = true;
if (bank.createAndLoadBank(overWriteExisting, 10) != NDBT_OK)
return NDBT_FAILED;
@@ -43,7 +43,7 @@ int runBankTimer(NDBT_Context* ctx, NDBT_Step* step){
ctx->incProperty("ThreadCount");
while (!ctx->isTestStopped())
{
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
while(!ctx->isTestStopped() && ctx->getProperty("SR") <= 1)
if(bank.performIncreaseTime(wait, yield) == NDBT_FAILED)
break;
@@ -63,7 +63,7 @@ int runBankTransactions(NDBT_Context* ctx, NDBT_Step* step){
ctx->incProperty("ThreadCount");
while (!ctx->isTestStopped())
{
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
while(!ctx->isTestStopped() && ctx->getProperty("SR") <= 1)
if(bank.performTransactions(0, 1) == NDBT_FAILED)
break;
@@ -83,7 +83,7 @@ int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
ctx->incProperty("ThreadCount");
while (ctx->isTestStopped() == false)
{
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
while(!ctx->isTestStopped() && ctx->getProperty("SR") <= 1)
if (bank.performMakeGLs(yield) != NDBT_OK)
{
@@ -102,7 +102,7 @@ int runBankGL(NDBT_Context* ctx, NDBT_Step* step){
}
int runBankSum(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
int wait = 2000; // Max ms between each sum of accounts
int yield = 1; // Loops before bank returns
int result = NDBT_OK;
@@ -160,7 +160,7 @@ int runSR(NDBT_Context* ctx, NDBT_Step* step)
{
int wait = 0;
int yield = 1;
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
if (bank.performSumAccounts(wait, yield) != 0)
{
ndbout << "bank.performSumAccounts FAILED" << endl;
@@ -183,7 +183,7 @@ int runSR(NDBT_Context* ctx, NDBT_Step* step)
}
int runDropBank(NDBT_Context* ctx, NDBT_Step* step){
- Bank bank;
+ Bank bank(ctx->m_cluster_connection);
if (bank.dropBank() != NDBT_OK)
return NDBT_FAILED;
return NDBT_OK;
diff --git a/ndb/test/ndbapi/testScan.cpp b/ndb/test/ndbapi/testScan.cpp
index f1018d29846..2802f1c950e 100644
--- a/ndb/test/ndbapi/testScan.cpp
+++ b/ndb/test/ndbapi/testScan.cpp
@@ -316,11 +316,16 @@ int runScanReadIndex(NDBT_Context* ctx, NDBT_Step* step){
while (pIdx && i<loops && !ctx->isTestStopped()) {
g_info << i << ": ";
bool sort = (rand() % 100) > 50 ? true : false;
+ bool desc = (rand() % 100) > 50 ? true : false;
+ desc = false; // random causes too many deadlocks
+ int scan_flags =
+ (NdbScanOperation::SF_OrderBy & -(int)sort) |
+ (NdbScanOperation::SF_Descending & -(int)desc);
NdbOperation::LockMode lm = (NdbOperation::LockMode)(rand() % 3);
if (hugoTrans.scanReadRecords(GETNDB(step), pIdx,
records, abort, parallelism,
lm,
- sort) != 0){
+ scan_flags) != 0){
return NDBT_FAILED;
}
i++;
@@ -333,6 +338,8 @@ int runScanReadCommitted(NDBT_Context* ctx, NDBT_Step* step){
int records = ctx->getNumRecords();
int parallelism = ctx->getProperty("Parallelism", 240);
int abort = ctx->getProperty("AbortProb", 5);
+ bool tupScan = ctx->getProperty("TupScan");
+ int scan_flags = (NdbScanOperation::SF_TupScan & -(int)tupScan);
int i = 0;
HugoTransactions hugoTrans(*ctx->getTab());
@@ -340,7 +347,8 @@ int runScanReadCommitted(NDBT_Context* ctx, NDBT_Step* step){
g_info << i << ": ";
if (hugoTrans.scanReadRecords(GETNDB(step), records,
abort, parallelism,
- NdbOperation::LM_CommittedRead) != 0){
+ NdbOperation::LM_CommittedRead,
+ scan_flags) != 0){
return NDBT_FAILED;
}
i++;
@@ -1013,8 +1021,7 @@ int runScanRestart(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
- NdbResultSet* rs = pOp->readTuples();
- if( rs == 0 ) {
+ if( pOp->readTuples() ) {
ERR(pCon->getNdbError());
return NDBT_FAILED;
}
@@ -1042,7 +1049,7 @@ int runScanRestart(NDBT_Context* ctx, NDBT_Step* step){
int res;
int row = 0;
- while(row < record && (res = rs->nextResult()) == 0) {
+ while(row < record && (res = pOp->nextResult()) == 0) {
if(calc.verifyRowValues(&tmpRow) != 0){
abort();
return NDBT_FAILED;
@@ -1055,14 +1062,14 @@ int runScanRestart(NDBT_Context* ctx, NDBT_Step* step){
return NDBT_FAILED;
}
g_info << " restarting" << endl;
- if((res = rs->restart()) != 0){
+ if((res = pOp->restart()) != 0){
ERR(pCon->getNdbError());
abort();
return NDBT_FAILED;
}
row = 0;
- while((res = rs->nextResult()) == 0) {
+ while((res = pOp->nextResult()) == 0) {
if(calc.verifyRowValues(&tmpRow) != 0){
abort();
return NDBT_FAILED;
@@ -1080,6 +1087,44 @@ int runScanRestart(NDBT_Context* ctx, NDBT_Step* step){
}
+int
+runScanParallelism(NDBT_Context* ctx, NDBT_Step* step){
+ int loops = ctx->getNumLoops() + 3;
+ int records = ctx->getNumRecords();
+ int abort = ctx->getProperty("AbortProb", 15);
+
+ Uint32 fib[] = { 1, 2 };
+ Uint32 parallelism = 0; // start with 0
+ int i = 0;
+ HugoTransactions hugoTrans(*ctx->getTab());
+ while (i<loops && !ctx->isTestStopped()) {
+ g_info << i << ": ";
+
+ if (hugoTrans.scanReadRecords(GETNDB(step), records, abort, parallelism,
+ NdbOperation::LM_Read) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.scanReadRecords(GETNDB(step), records, abort, parallelism,
+ NdbOperation::LM_Exclusive) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.scanReadRecords(GETNDB(step), records, abort, parallelism,
+ NdbOperation::LM_CommittedRead) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.scanUpdateRecords(GETNDB(step), records, abort, parallelism)
+ != 0){
+ return NDBT_FAILED;
+ }
+ i++;
+ parallelism = fib[0];
+ Uint32 next = fib[0] + fib[1];
+ fib[0] = fib[1];
+ fib[1] = next;
+ }
+ return NDBT_OK;
+}
+
NDBT_TESTSUITE(testScan);
TESTCASE("ScanRead",
"Verify scan requirement: It should be possible "\
@@ -1113,6 +1158,18 @@ TESTCASE("ScanReadCommitted240",
"downgraded to the maximum parallelism value for the current config)"){
INITIALIZER(runLoadTable);
TC_PROPERTY("Parallelism", 240);
+ TC_PROPERTY("TupScan", (Uint32)0);
+ STEP(runScanReadCommitted);
+ FINALIZER(runClearTable);
+}
+TESTCASE("ScanTupReadCommitted240",
+ "Verify scan requirement: It should be possible to scan read committed with "\
+ "parallelism, test with parallelism 240(240 would automatically be "\
+ "downgraded to the maximum parallelism value for the current config). "\
+ "Scans TUP pages directly without using ACC."){
+ INITIALIZER(runLoadTable);
+ TC_PROPERTY("Parallelism", 240);
+ TC_PROPERTY("TupScan", 1);
STEP(runScanReadCommitted);
FINALIZER(runClearTable);
}
@@ -1540,6 +1597,12 @@ TESTCASE("ScanRestart",
STEP(runScanRestart);
FINALIZER(runClearTable);
}
+TESTCASE("ScanParallelism",
+ "Test scan with different parallelism"){
+ INITIALIZER(runLoadTable);
+ STEP(runScanParallelism);
+ FINALIZER(runClearTable);
+}
NDBT_TESTSUITE_END(testScan);
int main(int argc, const char** argv){
diff --git a/ndb/test/ndbapi/testScanPerf.cpp b/ndb/test/ndbapi/testScanPerf.cpp
index 45f0468bc70..a730136c3af 100644
--- a/ndb/test/ndbapi/testScanPerf.cpp
+++ b/ndb/test/ndbapi/testScanPerf.cpp
@@ -38,10 +38,10 @@ struct Parameter {
#define P_ROWS 7
#define P_LOOPS 8
#define P_CREATE 9
-#define P_LOAD 10
#define P_RESET 11
+#define P_MULTI 12
-#define P_MAX 12
+#define P_MAX 13
static
Parameter
@@ -57,7 +57,8 @@ g_paramters[] = {
{ "iterations", 3, 1, ~0 },
{ "create_drop", 1, 0, 1 },
{ "data", 1, 0, 1 },
- { "q-reset bounds", 0, 1, 0 }
+ { "q-reset bounds", 0, 1, 0 },
+ { "multi read range", 1000, 1, ~0 }
};
static Ndb* g_ndb = 0;
@@ -67,10 +68,7 @@ static char g_tablename[256];
static char g_indexname[256];
int create_table();
-int load_table();
int run_scan();
-int clear_table();
-int drop_table();
int
main(int argc, const char** argv){
@@ -101,7 +99,13 @@ main(int argc, const char** argv){
myRandom48Init(NdbTick_CurrentMillisecond());
- g_ndb = new Ndb("TEST_DB");
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1))
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ g_ndb = new Ndb(&con, "TEST_DB");
if(g_ndb->init() != 0){
g_err << "init() failed" << endl;
goto error;
@@ -117,14 +121,8 @@ main(int argc, const char** argv){
BaseString::snprintf(g_indexname, sizeof(g_indexname), "IDX_%s", T);
if(create_table())
goto error;
- if(load_table())
- goto error;
if(run_scan())
goto error;
- if(clear_table())
- goto error;
- if(drop_table())
- goto error;
}
if(g_ndb) delete g_ndb;
@@ -139,6 +137,7 @@ create_table(){
NdbDictionary::Dictionary* dict = g_ndb->getDictionary();
assert(dict);
if(g_paramters[P_CREATE].value){
+ g_ndb->getDictionary()->dropTable(g_tablename);
const NdbDictionary::Table * pTab = NDBT_Tables::getTable(g_tablename);
assert(pTab);
NdbDictionary::Table copy = * pTab;
@@ -167,46 +166,18 @@ create_table(){
g_index = dict->getIndex(g_indexname, g_tablename);
assert(g_table);
assert(g_index);
- return 0;
-}
-int
-drop_table(){
- if(!g_paramters[P_CREATE].value)
- return 0;
- if(g_ndb->getDictionary()->dropTable(g_table->getName()) != 0){
- g_err << "Failed to drop table: " << g_table->getName() << endl;
- return -1;
- }
- g_table = 0;
- return 0;
-}
-
-int
-load_table(){
- if(!g_paramters[P_LOAD].value)
- return 0;
-
- int rows = g_paramters[P_ROWS].value;
- HugoTransactions hugoTrans(* g_table);
- if (hugoTrans.loadTable(g_ndb, rows)){
- g_err.println("Failed to load %s with %d rows", g_table->getName(), rows);
- return -1;
+ if(g_paramters[P_CREATE].value)
+ {
+ int rows = g_paramters[P_ROWS].value;
+ HugoTransactions hugoTrans(* g_table);
+ if (hugoTrans.loadTable(g_ndb, rows)){
+ g_err.println("Failed to load %s with %d rows",
+ g_table->getName(), rows);
+ return -1;
+ }
}
- return 0;
-}
-
-int
-clear_table(){
- if(!g_paramters[P_LOAD].value)
- return 0;
- int rows = g_paramters[P_ROWS].value;
- UtilTransactions utilTrans(* g_table);
- if (utilTrans.clearTable(g_ndb, rows) != 0){
- g_err.println("Failed to clear table %s", g_table->getName());
- return -1;
- }
return 0;
}
@@ -227,13 +198,12 @@ run_scan(){
Uint32 tot = g_paramters[P_ROWS].value;
- if(g_paramters[P_BOUND].value == 2 || g_paramters[P_FILT].value == 2)
+ if(g_paramters[P_BOUND].value >= 2 || g_paramters[P_FILT].value == 2)
iter *= g_paramters[P_ROWS].value;
NdbScanOperation * pOp = 0;
NdbIndexScanOperation * pIOp = 0;
NdbConnection * pTrans = 0;
- NdbResultSet * rs = 0;
int check = 0;
for(int i = 0; i<iter; i++){
@@ -246,7 +216,7 @@ run_scan(){
}
int par = g_paramters[P_PARRA].value;
- int bat = g_paramters[P_BATCH].value;
+ int bat = 0; // g_paramters[P_BATCH].value;
NdbScanOperation::LockMode lm;
switch(g_paramters[P_LOCK].value){
case 0:
@@ -265,13 +235,13 @@ run_scan(){
if(g_paramters[P_ACCESS].value == 0){
pOp = pTrans->getNdbScanOperation(g_tablename);
assert(pOp);
- rs = pOp->readTuples(lm, bat, par);
+ pOp->readTuples(lm, bat, par);
} else {
if(g_paramters[P_RESET].value == 0 || pIOp == 0)
{
pOp= pIOp= pTrans->getNdbIndexScanOperation(g_indexname, g_tablename);
bool ord = g_paramters[P_ACCESS].value == 2;
- rs = pIOp->readTuples(lm, bat, par, ord);
+ pIOp->readTuples(lm, bat, par, ord);
}
else
{
@@ -294,14 +264,26 @@ run_scan(){
#else
pIOp->setBound((Uint32)0, NdbIndexScanOperation::BoundEQ, &row);
#endif
+ if(g_paramters[P_RESET].value == 2)
+ goto execute;
+ break;
+ }
+ case 3: { // read multi
+ int multi = g_paramters[P_MULTI].value;
+ int tot = g_paramters[P_ROWS].value;
+ for(; multi > 0 && i < iter; --multi, i++)
+ {
+ int row = rand() % tot;
+ pIOp->setBound((Uint32)0, NdbIndexScanOperation::BoundEQ, &row);
+ pIOp->end_of_bound(i);
+ }
+ if(g_paramters[P_RESET].value == 2)
+ goto execute;
break;
}
}
- if(g_paramters[P_RESET].value == 2)
- goto execute;
}
assert(pOp);
- assert(rs);
switch(g_paramters[P_FILT].value){
case 0: // All
@@ -337,15 +319,18 @@ run_scan(){
for(int i = 0; i<g_table->getNoOfColumns(); i++){
pOp->getValue(i);
}
+
+ if(g_paramters[P_RESET].value == 1)
+ g_paramters[P_RESET].value = 2;
execute:
int rows = 0;
check = pTrans->execute(NoCommit);
assert(check == 0);
int fetch = g_paramters[P_FETCH].value;
- while((check = rs->nextResult(true)) == 0){
+ while((check = pOp->nextResult(true)) == 0){
do {
rows++;
- } while(!fetch && ((check = rs->nextResult(false)) == 0));
+ } while(!fetch && ((check = pOp->nextResult(false)) == 0));
if(check == -1){
err(pTrans->getNdbError());
return -1;
diff --git a/ndb/test/ndbapi/testTimeout.cpp b/ndb/test/ndbapi/testTimeout.cpp
index 71c11b25859..b02751ec819 100644
--- a/ndb/test/ndbapi/testTimeout.cpp
+++ b/ndb/test/ndbapi/testTimeout.cpp
@@ -87,47 +87,6 @@ int runClearTable(NDBT_Context* ctx, NDBT_Step* step){
result = NDBT_FAILED; \
break; }
-int runTimeoutTrans(NDBT_Context* ctx, NDBT_Step* step){
- int result = NDBT_OK;
- int loops = ctx->getNumLoops();
- NdbConfig conf(GETNDB(step)->getNodeId()+1);
- unsigned int nodeId = conf.getMasterNodeId();
- int stepNo = step->getStepNo();
-
- int timeout = ctx->getProperty("TransactionInactiveTimeout",TIMEOUT);
-
- int minSleep = (int)(timeout * 1.5);
- int maxSleep = timeout * 2;
- ndbout << "TransactionInactiveTimeout="<< timeout
- << ", minSleep="<<minSleep
- << ", maxSleep="<<maxSleep<<endl;
-
- HugoOperations hugoOps(*ctx->getTab());
- Ndb* pNdb = GETNDB(step);
-
- for (int l = 0; l < loops && result == NDBT_OK; l++){
-
- do{
- // Commit transaction
- CHECK(hugoOps.startTransaction(pNdb) == 0);
- CHECK(hugoOps.pkReadRecord(pNdb, stepNo) == 0);
- CHECK(hugoOps.execute_NoCommit(pNdb) == 0);
-
- int sleep = minSleep + myRandom48(maxSleep-minSleep);
- ndbout << "Sleeping for " << sleep << " milliseconds" << endl;
- NdbSleep_MilliSleep(sleep);
-
- // Expect that transaction has timed-out
- CHECK(hugoOps.execute_Commit(pNdb) == 237);
-
- } while(false);
-
- hugoOps.closeTransaction(pNdb);
- }
-
- return result;
-}
-
int runTimeoutTrans2(NDBT_Context* ctx, NDBT_Step* step){
int result = NDBT_OK;
int loops = ctx->getNumLoops();
@@ -407,27 +366,6 @@ TESTCASE("DontTimeoutTransaction5",
FINALIZER(resetTransactionTimeout);
FINALIZER(runClearTable);
}
-TESTCASE("TimeoutTransaction",
- "Test that the transaction does timeout "\
- "if we sleep during the transaction. Use a sleep "\
- "value which is larger than TransactionInactiveTimeout"){
- INITIALIZER(runLoadTable);
- INITIALIZER(setTransactionTimeout);
- STEPS(runTimeoutTrans, 1);
- FINALIZER(resetTransactionTimeout);
- FINALIZER(runClearTable);
-}
-TESTCASE("TimeoutTransaction5",
- "Test that the transaction does timeout " \
- "if we sleep during the transaction. Use a sleep " \
- "value which is larger than TransactionInactiveTimeout" \
- "Five simultaneous threads"){
- INITIALIZER(runLoadTable);
- INITIALIZER(setTransactionTimeout);
- STEPS(runTimeoutTrans, 5);
- FINALIZER(resetTransactionTimeout);
- FINALIZER(runClearTable);
-}
TESTCASE("TimeoutRandTransaction",
"Test that the transaction does timeout "\
"if we sleep during the transaction. Use a sleep "\
diff --git a/ndb/test/ndbapi/test_event.cpp b/ndb/test/ndbapi/test_event.cpp
index cb2793e42b9..2df50f21e43 100644
--- a/ndb/test/ndbapi/test_event.cpp
+++ b/ndb/test/ndbapi/test_event.cpp
@@ -14,11 +14,11 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
-#include "NDBT_Test.hpp"
-#include "NDBT_ReturnCodes.h"
-#include "HugoTransactions.hpp"
-#include "UtilTransactions.hpp"
-#include "TestNdbEventOperation.hpp"
+#include <NDBT_Test.hpp>
+#include <NDBT_ReturnCodes.h>
+#include <HugoTransactions.hpp>
+#include <UtilTransactions.hpp>
+#include <TestNdbEventOperation.hpp>
#define GETNDB(ps) ((NDBT_NdbApiStep*)ps)->getNdb()
@@ -32,6 +32,69 @@ int runCreateEvent(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+int runCreateShadowTable(NDBT_Context* ctx, NDBT_Step* step)
+{
+ const NdbDictionary::Table *table= ctx->getTab();
+ char buf[1024];
+ sprintf(buf, "%s_SHADOW", table->getName());
+
+ GETNDB(step)->getDictionary()->dropTable(buf);
+ if (GETNDB(step)->getDictionary()->getTable(buf))
+ {
+ g_err << "unsucessful drop of " << buf << endl;
+ return NDBT_FAILED;
+ }
+
+ NdbDictionary::Table table_shadow(*table);
+ table_shadow.setName(buf);
+ GETNDB(step)->getDictionary()->createTable(table_shadow);
+ if (GETNDB(step)->getDictionary()->getTable(buf))
+ return NDBT_OK;
+
+ g_err << "unsucessful create of " << buf << endl;
+ return NDBT_FAILED;
+}
+
+int runCreateDropEventOperation(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ HugoTransactions hugoTrans(*ctx->getTab());
+ EventOperationStats stats;
+
+ Ndb *pNdb=GETNDB(step);
+ const NdbDictionary::Table& tab= *ctx->getTab();
+ NdbEventOperation *pOp;
+ char eventName[1024];
+ sprintf(eventName,"%s_EVENT",tab.getName());
+ int noEventColumnName = tab.getNoOfColumns();
+
+ for (int i= 0; i < loops; i++)
+ {
+#if 1
+ if (hugoTrans.eventOperation(GETNDB(step), (void*)&stats, 0) != 0){
+ return NDBT_FAILED;
+ }
+#else
+ g_info << "create EventOperation\n";
+ pOp = pNdb->createEventOperation(eventName, 100);
+ if ( pOp == NULL ) {
+ g_err << "Event operation creation failed\n";
+ return NDBT_FAILED;
+ }
+
+ g_info << "dropping event operation" << endl;
+ int res = pNdb->dropEventOperation(pOp);
+ if (res != 0) {
+ g_err << "operation execution failed\n";
+ return NDBT_FAILED;
+ }
+#endif
+ }
+
+ return NDBT_OK;
+}
+
int theThreadIdCounter = 0;
int runEventOperation(NDBT_Context* ctx, NDBT_Step* step)
@@ -43,7 +106,7 @@ int runEventOperation(NDBT_Context* ctx, NDBT_Step* step)
EventOperationStats stats;
- g_info << "***** Id " << tId << endl;
+ g_info << "***** start Id " << tId << endl;
// sleep(tId);
@@ -62,12 +125,13 @@ int runEventOperation(NDBT_Context* ctx, NDBT_Step* step)
ret = NDBT_FAILED;
if (ret == NDBT_FAILED) {
- ndbout << "n_inserts = " << stats.n_inserts << endl;
- ndbout << "n_deletes = " << stats.n_deletes << endl;
- ndbout << "n_updates = " << stats.n_updates << endl;
- ndbout << "n_consecutive = " << stats.n_consecutive << endl;
- ndbout << "n_duplicates = " << stats.n_duplicates << endl;
- ndbout << "n_inconsistent_gcis = " << stats.n_inconsistent_gcis << endl;
+ g_info << "***** end Id " << tId << endl;
+ ndbout_c("n_inserts = %d (%d)", stats.n_inserts, records);
+ ndbout_c("n_deletes = %d (%d)", stats.n_deletes, records);
+ ndbout_c("n_updates = %d (%d)", stats.n_updates, records);
+ ndbout_c("n_consecutive = %d (%d)", stats.n_consecutive, 3);
+ ndbout_c("n_duplicates = %d (%d)", stats.n_duplicates, 0);
+ ndbout_c("n_inconsistent_gcis = %d (%d)", stats.n_inconsistent_gcis, 0);
}
return ret;
@@ -94,6 +158,36 @@ int runEventLoad(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+int runEventMixedLoad(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int loops = ctx->getNumLoops();
+ int records = ctx->getNumRecords();
+ HugoTransactions hugoTrans(*ctx->getTab());
+
+ sleep(5);
+
+ if (hugoTrans.loadTable(GETNDB(step), 3*records, 1, true, 1) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkDelRecords(GETNDB(step), 3*records, 1, true, 1) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.loadTable(GETNDB(step), records, 1, true, 1) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ return NDBT_FAILED;
+ }
+ if (hugoTrans.pkUpdateRecords(GETNDB(step), records, 1, 1) != 0){
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
int runDropEvent(NDBT_Context* ctx, NDBT_Step* step)
{
HugoTransactions hugoTrans(*ctx->getTab());
@@ -105,6 +199,246 @@ int runDropEvent(NDBT_Context* ctx, NDBT_Step* step)
return NDBT_OK;
}
+int runVerify(NDBT_Context* ctx, NDBT_Step* step)
+{
+ int records = ctx->getNumRecords();
+ const NdbDictionary::Table * table= ctx->getTab();
+ char buf[1024];
+
+ sprintf(buf, "%s_SHADOW", table->getName());
+
+ HugoTransactions hugoTrans(*table);
+ if (hugoTrans.compare(GETNDB(step), buf, 0))
+ {
+ return NDBT_FAILED;
+ }
+
+ return NDBT_OK;
+}
+
+int runEventApplier(NDBT_Context* ctx, NDBT_Step* step)
+{
+ DBUG_ENTER("runEventApplier");
+
+ int records = ctx->getNumRecords();
+ int loops = ctx->getNumLoops();
+ const NdbDictionary::Table * table= ctx->getTab();
+ char buf[1024];
+
+ sprintf(buf, "%s_SHADOW", table->getName());
+ const NdbDictionary::Table * table_shadow;
+ if ((table_shadow = GETNDB(step)->getDictionary()->getTable(buf)) == 0)
+ {
+ g_err << "Unable to get table " << buf << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ sprintf(buf, "%s_EVENT", table->getName());
+ NdbEventOperation *pOp;
+ pOp = GETNDB(step)->createEventOperation(buf, 10*records);
+ if ( pOp == NULL ) {
+ g_err << "Event operation creation failed on %s" << buf << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ int i;
+ int n_columns= table->getNoOfColumns();
+ NdbRecAttr* recAttr[1024];
+ NdbRecAttr* recAttrPre[1024];
+ for (i = 0; i < n_columns; i++) {
+ recAttr[i] = pOp->getValue(table->getColumn(i)->getName());
+ recAttrPre[i] = pOp->getPreValue(table->getColumn(i)->getName());
+ }
+
+ if (pOp->execute()) { // This starts changes to "start flowing"
+ g_err << "execute operation execution failed: \n";
+ g_err << pOp->getNdbError().code << " "
+ << pOp->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ int r= 0;
+ int res;
+ while (r < 10*records){
+ //printf("now waiting for event...\n");
+ res= GETNDB(step)->pollEvents(1000); // wait for event or 1000 ms
+ if (res <= 0)
+ {
+ ndbout_c("********************");
+ continue;
+ }
+
+ //printf("got data! %d\n", r);
+ int overrun= 0;
+ while (pOp->next(&overrun) > 0)
+ {
+ if (overrun)
+ {
+ g_err << "buffer overrun\n";
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ r++;
+
+ Uint32 gci= pOp->getGCI();
+
+ if (!pOp->isConsistent()) {
+ g_err << "A node failure has occured and events might be missing\n";
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ int noRetries= 0;
+ do
+ {
+ NdbTransaction *trans= GETNDB(step)->startTransaction();
+ if (trans == 0)
+ {
+ g_err << "startTransaction failed "
+ << GETNDB(step)->getNdbError().code << " "
+ << GETNDB(step)->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ NdbOperation *op= trans->getNdbOperation(table_shadow);
+ if (op == 0)
+ {
+ g_err << "getNdbOperation failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ if (op->insertTuple())
+ {
+ g_err << "insertTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ if (op->deleteTuple())
+ {
+ g_err << "deleteTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ if (op->updateTuple())
+ {
+ g_err << "updateTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ break;
+ default:
+ abort();
+ }
+
+ for (i= 0; i < n_columns; i++)
+ {
+ if (recAttr[i]->isNULL())
+ {
+ if (table->getColumn(i)->getPrimaryKey())
+ {
+ g_err << "internal error: primary key isNull()="
+ << recAttr[i]->isNULL() << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ if (recAttr[i]->isNULL() < 0)
+ {
+ g_err << "internal error: missing value for insert\n";
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ break;
+ default:
+ abort();
+ }
+ }
+ if (table->getColumn(i)->getPrimaryKey() &&
+ op->equal(i,recAttr[i]->aRef()))
+ {
+ g_err << "equal " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ for (i= 0; i < n_columns; i++)
+ {
+ if (!table->getColumn(i)->getPrimaryKey() &&
+ op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ {
+ g_err << "setValue(insert) " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ for (i= 0; i < n_columns; i++)
+ {
+ if (!table->getColumn(i)->getPrimaryKey() &&
+ recAttr[i]->isNULL() >= 0 &&
+ op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ {
+ g_err << "setValue(update) " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+ break;
+ case NdbDictionary::Event::TE_ALL:
+ abort();
+ }
+ if (trans->execute(Commit) == 0)
+ {
+ trans->close();
+ // everything ok
+ break;
+ }
+ if (noRetries++ == 10 ||
+ trans->getNdbError().status != NdbError::TemporaryError)
+ {
+ g_err << "execute " << r << " failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+ trans->close();
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ trans->close();
+ NdbSleep_MilliSleep(100); // sleep before retying
+ } while(1);
+ }
+ }
+
+ if (GETNDB(step)->dropEventOperation(pOp)) {
+ g_err << "dropEventOperation execution failed "
+ << GETNDB(step)->getNdbError().code << " "
+ << GETNDB(step)->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+
+ DBUG_RETURN(NDBT_OK);
+}
+
// INITIALIZER(runInsert);
// STEP(runPkRead);
// VERIFIER(runVerifyInsert);
@@ -116,25 +450,37 @@ TESTCASE("BasicEventOperation",
"NOTE! No errors are allowed!" ){
INITIALIZER(runCreateEvent);
STEP(runEventOperation);
- STEP(runEventOperation);
- STEP(runEventOperation);
- STEP(runEventOperation);
STEP(runEventLoad);
FINALIZER(runDropEvent);
}
-NDBT_TESTSUITE_END(test_event);
-
-#if 0
-NDBT_TESTSUITE(test_event);
+TESTCASE("CreateDropEventOperation",
+ "Verify that we can Create and Drop many times"
+ "NOTE! No errors are allowed!" ){
+ INITIALIZER(runCreateEvent);
+ STEP(runCreateDropEventOperation);
+ FINALIZER(runDropEvent);
+}
TESTCASE("ParallellEventOperation",
- "Verify that we can listen to Events in Parallell"
+ "Verify that we can listen to Events in parallell"
"NOTE! No errors are allowed!" ){
- INITIALIZER(runCreateAllEvent);
+ INITIALIZER(runCreateEvent);
+ STEP(runEventOperation);
STEP(runEventOperation);
+ STEP(runEventLoad);
+ FINALIZER(runDropEvent);
+}
+TESTCASE("EventOperationApplier",
+ "Verify that if we apply the data we get from event "
+ "operation is the same as the original table"
+ "NOTE! No errors are allowed!" ){
+ INITIALIZER(runCreateEvent);
+ INITIALIZER(runCreateShadowTable);
+ STEP(runEventApplier);
+ STEP(runEventMixedLoad);
FINALIZER(runDropEvent);
+ FINALIZER(runVerify);
}
NDBT_TESTSUITE_END(test_event);
-#endif
int main(int argc, const char** argv){
ndb_init();
diff --git a/ndb/test/ndbapi/test_event_multi_table.cpp b/ndb/test/ndbapi/test_event_multi_table.cpp
new file mode 100644
index 00000000000..f16504029fa
--- /dev/null
+++ b/ndb/test/ndbapi/test_event_multi_table.cpp
@@ -0,0 +1,487 @@
+/* Copyright (C) 2005 MySQL AB
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+
+#include <ndb_global.h>
+#include <ndb_opts.h>
+#include <NDBT_Test.hpp>
+#include <NDBT_ReturnCodes.h>
+#include <HugoTransactions.hpp>
+#include <UtilTransactions.hpp>
+#include <TestNdbEventOperation.hpp>
+
+static void usage()
+{
+ ndb_std_print_version();
+}
+
+static int start_transaction(Ndb *ndb, Vector<HugoOperations*> &ops)
+{
+ if (ops[0]->startTransaction(ndb) != NDBT_OK)
+ return -1;
+ NdbTransaction * t= ops[0]->getTransaction();
+ for (int i= ops.size()-1; i > 0; i--)
+ {
+ ops[i]->setTransaction(t);
+ }
+ return 0;
+}
+
+static int close_transaction(Ndb *ndb, Vector<HugoOperations*> &ops)
+{
+ if (ops[0]->closeTransaction(ndb) != NDBT_OK)
+ return -1;
+ for (int i= ops.size()-1; i > 0; i--)
+ {
+ ops[i]->setTransaction(NULL);
+ }
+ return 0;
+}
+
+static int execute_commit(Ndb *ndb, Vector<HugoOperations*> &ops)
+{
+ if (ops[0]->execute_Commit(ndb) != NDBT_OK)
+ return -1;
+ return 0;
+}
+
+static int copy_events(Ndb *ndb,
+ Vector<NdbEventOperation *> &ops,
+ Vector<const NdbDictionary::Table *> &tabs,
+ Vector<Vector<NdbRecAttr *> > &values)
+{
+ DBUG_ENTER("copy_events");
+ int r= 0;
+ while (1)
+ {
+ int res= ndb->pollEvents(1000); // wait for event or 1000 ms
+ DBUG_PRINT("info", ("pollEvents res=%d", r));
+ if (res <= 0)
+ {
+ break;
+ }
+ for (unsigned i_ops= 0; i_ops < ops.size(); i_ops++)
+ {
+ NdbEventOperation *pOp= ops[i_ops];
+ const NdbDictionary::Table *table= tabs[i_ops];
+ Vector<NdbRecAttr *> &recAttr= values[i_ops];
+
+ int overrun= 0;
+ unsigned i;
+ unsigned n_columns= table->getNoOfColumns();
+ while (pOp->next(&overrun) > 0)
+ {
+ if (overrun)
+ {
+ g_err << "buffer overrun\n";
+ DBUG_RETURN(-1);
+ }
+ r++;
+
+ Uint32 gci= pOp->getGCI();
+
+ if (!pOp->isConsistent()) {
+ g_err << "A node failure has occured and events might be missing\n";
+ DBUG_RETURN(-1);
+ }
+
+ int noRetries= 0;
+ do
+ {
+ NdbTransaction *trans= ndb->startTransaction();
+ if (trans == 0)
+ {
+ g_err << "startTransaction failed "
+ << ndb->getNdbError().code << " "
+ << ndb->getNdbError().message << endl;
+ DBUG_RETURN(-1);
+ }
+
+ NdbOperation *op= trans->getNdbOperation(table);
+ if (op == 0)
+ {
+ g_err << "getNdbOperation failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+ DBUG_RETURN(-1);
+ }
+
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ if (op->insertTuple())
+ {
+ g_err << "insertTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(-1);
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ if (op->deleteTuple())
+ {
+ g_err << "deleteTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(-1);
+ }
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ if (op->updateTuple())
+ {
+ g_err << "updateTuple "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(-1);
+ }
+ break;
+ default:
+ abort();
+ }
+
+ for (i= 0; i < n_columns; i++)
+ {
+ if (recAttr[i]->isNULL())
+ {
+ if (table->getColumn(i)->getPrimaryKey())
+ {
+ g_err << "internal error: primary key isNull()="
+ << recAttr[i]->isNULL() << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ if (recAttr[i]->isNULL() < 0)
+ {
+ g_err << "internal error: missing value for insert\n";
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ break;
+ default:
+ abort();
+ }
+ }
+ if (table->getColumn(i)->getPrimaryKey() &&
+ op->equal(i,recAttr[i]->aRef()))
+ {
+ g_err << "equal " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+
+ switch (pOp->getEventType()) {
+ case NdbDictionary::Event::TE_INSERT:
+ for (i= 0; i < n_columns; i++)
+ {
+ if (!table->getColumn(i)->getPrimaryKey() &&
+ op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ {
+ g_err << "setValue(insert) " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(-1);
+ }
+ }
+ break;
+ case NdbDictionary::Event::TE_DELETE:
+ break;
+ case NdbDictionary::Event::TE_UPDATE:
+ for (i= 0; i < n_columns; i++)
+ {
+ if (!table->getColumn(i)->getPrimaryKey() &&
+ recAttr[i]->isNULL() >= 0 &&
+ op->setValue(i,recAttr[i]->isNULL() ? 0:recAttr[i]->aRef()))
+ {
+ g_err << "setValue(update) " << i << " "
+ << op->getNdbError().code << " "
+ << op->getNdbError().message << endl;
+ DBUG_RETURN(NDBT_FAILED);
+ }
+ }
+ break;
+ case NdbDictionary::Event::TE_ALL:
+ abort();
+ }
+ if (trans->execute(Commit) == 0)
+ {
+ trans->close();
+ // everything ok
+ break;
+ }
+ if (noRetries++ == 10 ||
+ trans->getNdbError().status != NdbError::TemporaryError)
+ {
+ g_err << "execute " << r << " failed "
+ << trans->getNdbError().code << " "
+ << trans->getNdbError().message << endl;
+ trans->close();
+ DBUG_RETURN(-1);
+ }
+ trans->close();
+ NdbSleep_MilliSleep(100); // sleep before retying
+ } while(1);
+ }
+ }
+ }
+ DBUG_RETURN(r);
+}
+
+static int verify_copy(Ndb *ndb,
+ Vector<const NdbDictionary::Table *> &tabs1,
+ Vector<const NdbDictionary::Table *> &tabs2)
+{
+ for (unsigned i= 0; i < tabs1.size(); i++)
+ if (tabs1[i])
+ {
+ HugoTransactions hugoTrans(*tabs1[i]);
+ if (hugoTrans.compare(ndb, tabs2[i]->getName(), 0))
+ return -1;
+ }
+ return 0;
+}
+
+NDB_STD_OPTS_VARS;
+
+static const char* _dbname = "TEST_DB";
+static struct my_option my_long_options[] =
+{
+ NDB_STD_OPTS(""),
+ { "database", 'd', "Name of database table is in",
+ (gptr*) &_dbname, (gptr*) &_dbname, 0,
+ GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 },
+ { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
+};
+
+int
+main(int argc, char** argv)
+{
+ NDB_INIT(argv[0]);
+ const char *load_default_groups[]= { "mysql_cluster",0 };
+ load_defaults("my",load_default_groups,&argc,&argv);
+
+ int ho_error;
+#ifndef DBUG_OFF
+ opt_debug= "d:t:F:L";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
+
+ DBUG_ENTER("main");
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1))
+ {
+ DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED));
+ }
+
+
+ Ndb ndb(&con,_dbname);
+ ndb.init();
+ while (ndb.waitUntilReady() != 0);
+
+ NdbDictionary::Dictionary * dict = ndb.getDictionary();
+ int no_error= 1;
+ int i;
+
+ // create all tables
+ Vector<const NdbDictionary::Table*> pTabs;
+ for (i= 0; no_error && argc; argc--, i++)
+ {
+ dict->dropTable(argv[i]);
+ NDBT_Tables::createTable(&ndb, argv[i]);
+ const NdbDictionary::Table *pTab= dict->getTable(argv[i]);
+ if (pTab == 0)
+ {
+ ndbout << "Failed to create table" << endl;
+ ndbout << dict->getNdbError() << endl;
+ no_error= 0;
+ break;
+ }
+ pTabs.push_back(pTab);
+ }
+ pTabs.push_back(NULL);
+
+ // create an event for each table
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ HugoTransactions ht(*pTabs[i]);
+ if (ht.createEvent(&ndb)){
+ no_error= 0;
+ break;
+ }
+ }
+
+ // create an event operation for each event
+ Vector<NdbEventOperation *> pOps;
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ char buf[1024];
+ sprintf(buf, "%s_EVENT", pTabs[i]->getName());
+ NdbEventOperation *pOp= ndb.createEventOperation(buf, 1000);
+ if ( pOp == NULL )
+ {
+ no_error= 0;
+ break;
+ }
+ pOps.push_back(pOp);
+ }
+
+ // get storage for each event operation
+ Vector<Vector<NdbRecAttr*> > values;
+ Vector<Vector<NdbRecAttr*> > pre_values;
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ int n_columns= pTabs[i]->getNoOfColumns();
+ Vector<NdbRecAttr*> tmp_a;
+ Vector<NdbRecAttr*> tmp_b;
+ for (int j = 0; j < n_columns; j++) {
+ tmp_a.push_back(pOps[i]->getValue(pTabs[i]->getColumn(j)->getName()));
+ tmp_b.push_back(pOps[i]->getPreValue(pTabs[i]->getColumn(j)->getName()));
+ }
+ values.push_back(tmp_a);
+ pre_values.push_back(tmp_b);
+ }
+
+ // start receiving events
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ if ( pOps[i]->execute() )
+ {
+ no_error= 0;
+ break;
+ }
+ }
+
+ // create a "shadow" table for each table
+ Vector<const NdbDictionary::Table*> pShadowTabs;
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ char buf[1024];
+ sprintf(buf, "%s_SHADOW", pTabs[i]->getName());
+
+ dict->dropTable(buf);
+ if (dict->getTable(buf))
+ {
+ no_error= 0;
+ break;
+ }
+
+ NdbDictionary::Table table_shadow(*pTabs[i]);
+ table_shadow.setName(buf);
+ dict->createTable(table_shadow);
+ pShadowTabs.push_back(dict->getTable(buf));
+ if (!pShadowTabs[i])
+ {
+ no_error= 0;
+ break;
+ }
+ }
+
+ // create a hugo operation per table
+ Vector<HugoOperations *> hugo_ops;
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ hugo_ops.push_back(new HugoOperations(*pTabs[i]));
+ }
+
+ sleep(5);
+
+ // insert 3 records per table
+ do {
+ if (start_transaction(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+ for (i= 0; no_error && pTabs[i]; i++)
+ {
+ hugo_ops[i]->pkInsertRecord(&ndb, 0, 3);
+ }
+ if (execute_commit(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+ if(close_transaction(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+ } while(0);
+
+ // copy events and verify
+ do {
+ if (copy_events(&ndb, pOps, pShadowTabs, values) < 0)
+ {
+ no_error= 0;
+ break;
+ }
+ if (verify_copy(&ndb, pTabs, pShadowTabs))
+ {
+ no_error= 0;
+ break;
+ }
+ } while (0);
+
+ // update 2 records in first table
+ do {
+ if (start_transaction(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+
+ hugo_ops[0]->pkUpdateRecord(&ndb, 2);
+
+ if (execute_commit(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+ if(close_transaction(&ndb, hugo_ops))
+ {
+ no_error= 0;
+ break;
+ }
+ } while(0);
+
+ // copy events and verify
+ do {
+ if (copy_events(&ndb, pOps, pShadowTabs, values) < 0)
+ {
+ no_error= 0;
+ break;
+ }
+ if (verify_copy(&ndb, pTabs, pShadowTabs))
+ {
+ no_error= 0;
+ break;
+ }
+ } while (0);
+
+ if (no_error)
+ DBUG_RETURN(NDBT_ProgramExit(NDBT_OK));
+ DBUG_RETURN(NDBT_ProgramExit(NDBT_FAILED));
+}
+
+template class Vector<HugoOperations *>;
+template class Vector<NdbEventOperation *>;
+template class Vector<NdbRecAttr*>;
+template class Vector<Vector<NdbRecAttr*> >;
diff --git a/ndb/test/run-test/16node-tests.txt b/ndb/test/run-test/16node-tests.txt
new file mode 100644
index 00000000000..11ade56c28c
--- /dev/null
+++ b/ndb/test/run-test/16node-tests.txt
@@ -0,0 +1,733 @@
+# BASIC FUNCTIONALITY
+max-time: 500
+cmd: testBasic
+args: -n PkRead
+
+max-time: 500
+cmd: testBasic
+args: -n PkUpdate
+
+max-time: 500
+cmd: testBasic
+args: -n PkDelete
+
+max-time: 500
+cmd: testBasic
+args: -n PkInsert
+
+max-time: 600
+cmd: testBasic
+args: -n UpdateAndRead
+
+max-time: 500
+cmd: testBasic
+args: -n PkReadAndLocker T6
+
+max-time: 500
+cmd: testBasic
+args: -n PkReadAndLocker2 T6
+
+max-time: 500
+cmd: testBasic
+args: -n PkReadUpdateAndLocker T6
+
+max-time: 500
+cmd: testBasic
+args: -n ReadWithLocksAndInserts T6
+
+max-time: 500
+cmd: testBasic
+args: -n PkInsertTwice T1 T6 T10
+
+max-time: 1500
+cmd: testBasic
+args: -n Fill T13
+
+max-time: 1500
+cmd: testBasic
+args: -n Fill T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitSleep T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommit626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitAndClose T6
+
+max-time: 500
+cmd: testBasic
+args: -n Commit626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitTry626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitAsMuch626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommit626 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitRollback626 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n Commit630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitTry630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitAsMuch630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommit630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitRollback630 T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n NoCommitAndClose T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n RollbackUpdate T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n RollbackDeleteMultiple T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n ImplicitRollbackDelete T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n CommitDelete T1 T6
+
+max-time: 500
+cmd: testBasic
+args: -n RollbackNothing T1 T6
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkInsertAsynch
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkReadAsynch
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkUpdateAsynch
+
+max-time: 500
+cmd: testBasicAsynch
+args: -n PkDeleteAsynch
+
+max-time: 500
+cmd: testBasic
+args: -n MassiveRollback T1 T6 T13
+
+max-time: 500
+cmd: testBasic
+args: -n MassiveRollback2 T1 T6 T13
+
+max-time: 500
+cmd: testTimeout
+args: T1
+
+# SCAN TESTS
+#
+max-time: 500
+cmd: testScan
+args: -n ScanRead16
+
+max-time: 500
+cmd: testScan
+args: -n ScanRead240
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadCommitted240
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdate
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdate2 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanDelete
+
+max-time: 500
+cmd: testScan
+args: -n ScanDelete2 T10
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdateAndScanRead T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAndLocker T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAndPkRead T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanRead488 -l 10 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanRead488O -l 10 T6
+
+max-time: 1000
+cmd: testScan
+args: -n ScanRead488_Mixed -l 10 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanRead488Timeout -l 10 T6
+
+max-time: 600
+cmd: testScan
+args: -n ScanRead40 -l 100 T2
+
+max-time: 1800
+cmd: testScan
+args: -n ScanRead100 -l 100 T1
+
+max-time: 600
+cmd: testScan
+args: -n ScanRead40 -l 100 T1
+
+max-time: 1800
+cmd: testScan
+args: -n ScanRead40RandomTable -l 100 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanWithLocksAndInserts T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAbort T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAbort15 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadAbort240 T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdateAbort16 T6
+
+max-time: 3600
+cmd: testScan
+args: -n ScanReadRestart T1 T6 T13
+
+max-time: 500
+cmd: testScan
+args: -n ScanUpdateRestart T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckGetValue T6
+
+max-time: 500
+cmd: testScan
+args: -n CloseWithoutStop T6
+
+max-time: 500
+cmd: testScan
+args: -n NextScanWhenNoMore T6
+
+max-time: 500
+cmd: testScan
+args: -n ExecuteScanWithoutOpenScan T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOpenScanOnce T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOneOpInScanTrans T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOneOpBeforeOpenScan T6
+
+max-time: 500
+cmd: testScan
+args: -n OnlyOneScanPerTrans T6
+
+max-time: 500
+cmd: testScan
+args: -n NoCloseTransaction T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckInactivityTimeOut T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckInactivityBeforeClose T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckAfterTerror T6
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5021 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReaderror5022 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5023 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5024 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5025 T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadError5030 T1
+
+max-time: 500
+cmd: testScan
+args: -n InsertDelete T1 T6
+
+max-time: 500
+cmd: testScan
+args: -n CheckAfterTerror T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanReadWhileNodeIsDown T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanRestart T1
+
+max-time: 500
+cmd: testScan
+args: -n ScanParallelism
+
+#
+# DICT TESTS
+max-time: 1500
+cmd: testDict
+args: -n CreateAndDrop
+
+max-time: 1500
+cmd: testDict
+args: -n CreateAndDropWithData
+
+max-time: 1500
+cmd: testDict
+args: -n CreateAndDropDuring T6 T10
+
+max-time: 1500
+cmd: testDict
+args: -n CreateInvalidTables
+
+max-time: 1500
+cmd: testDict
+args: -n CreateTableWhenDbIsFull T6
+
+max-time: 1500
+cmd: testDict
+args: -n CreateMaxTables T6
+
+max-time: 500
+cmd: testDict
+args: -n FragmentTypeSingle T1
+
+max-time: 1500
+cmd: testDict
+args: -n FragmentTypeAllSmall T1 T6 T7 T8
+
+max-time: 1500
+cmd: testDict
+args: -n FragmentTypeAllLarge T1 T6 T7 T8
+
+max-time: 1500
+cmd: testDict
+args: -n TemporaryTables T1 T6 T7 T8
+
+#
+# TEST NDBAPI
+#
+max-time: 500
+cmd: testDataBuffers
+args:
+
+# Testsuite: testNdbApi
+# Number of tests: 5
+max-time: 500
+cmd: testNdbApi
+args: -n MaxNdb T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxTransactions T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxOperations T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxGetValue T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n MaxEqual
+
+max-time: 500
+cmd: testNdbApi
+args: -n DeleteNdb T1 T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n WaitUntilReady T1 T6 T7 T8 T13
+
+max-time: 500
+cmd: testNdbApi
+args: -n GetOperationNoTab T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n NdbErrorOperation T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n MissingOperation T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n GetValueInUpdate T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n UpdateWithoutKeys T6
+
+max-time: 500
+cmd: testNdbApi
+args: -n UpdateWithoutValues T6
+
+#max-time: 500
+#cmd: testInterpreter
+#args: T1
+#
+max-time: 150000
+cmd: testOperations
+args:
+
+max-time: 15000
+cmd: testTransactions
+args:
+
+max-time: 1500
+cmd: testRestartGci
+args: T6
+
+max-time: 600
+cmd: testBlobs
+args:
+
+max-time: 5000
+cmd: testOIBasic
+args:
+
+max-time: 2500
+cmd: testBitfield
+args:
+
+max-time: 2500
+cmd: testPartitioning
+args:
+
+max-time: 25000
+cmd: atrt-mysql-test-run
+args: --force
+
+#
+# INDEX
+#
+max-time: 1500
+cmd: testIndex
+args: -n CreateAll T1 T6 T13
+
+#-m 7200 1: testIndex -n InsertDeleteGentle T7
+max-time: 3600
+cmd: testIndex
+args: -n InsertDelete T1 T10
+
+#-m 3600 1: testIndex -n CreateLoadDropGentle T7
+max-time: 3600
+cmd: testIndex
+args: -n CreateLoadDrop T1 T10
+
+#
+# BACKUP
+#
+max-time: 600
+cmd: atrt-testBackup
+args: -n BackupOne T1 T6 T3 I3
+
+#
+#
+# SYSTEM RESTARTS
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR3 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR4 T6
+
+#
+# NODE RESTARTS
+#
+max-time: 2500
+cmd: testNodeRestart
+args: -n NoLoad T6
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n MixedPkRead T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -l 1 -n MixedPkReadPkUpdate
+
+max-time: 2500
+cmd: testNodeRestart
+args: -l 1 -n MixedReadUpdateScan
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n CommittedRead T1
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FullDb T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNode T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNodeError T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartRandomNodeInitial T6 T13
+
+max-time: 3600
+cmd: testNodeRestart
+args: -l 1 -n RestartNFDuringNR T6 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartMasterNodeError T6 T8 T13
+
+max-time: 3600
+cmd: testNodeRestart
+args: -n RestartNodeDuringLCP T6
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n TwoNodeFailure T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n TwoMasterNodeFailure T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FiftyPercentFail T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodes T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodesAbort T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n RestartAllNodesError9999 T6 T8 T13
+
+max-time: 2500
+cmd: testNodeRestart
+args: -n FiftyPercentStopAndWait T6 T8 T13
+
+#max-time: 500
+#cmd: testNodeRestart
+#args: -n StopOnError T1
+#
+#
+max-time: 2500
+cmd: testIndex
+args: -n NFNR1 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR2 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR3 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n BuildDuring T6
+
+max-time: 2500
+cmd: testIndex
+args: -l 2 -n SR1 T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR1_O T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR2_O T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n NFNR3_O T6 T13
+
+max-time: 2500
+cmd: testIndex
+args: -n BuildDuring_O T6
+
+max-time: 2500
+cmd: testIndex
+args: -l 2 -n SR1_O T6 T13
+
+max-time: 500
+cmd: testIndex
+args: -n MixedTransaction T1
+
+max-time: 2500
+cmd: testDict
+args: -n NF1 T1 T6 T13
+
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR6 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR7 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR8 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -l 1 -n SR9 T1
+
+#
+max-time: 2500
+cmd: test_event
+args: -n BasicEventOperation T1 T6
+
+#
+#
+# SYSTEM RESTARTS
+#
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T7
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR1 T8
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR2 T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR2 T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR2 T7
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T1
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T6
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T7
+
+max-time: 1500
+cmd: testSystemRestart
+args: -n SR_UNDO T8
+
+# OLD FLEX
+max-time: 500
+cmd: flexBench
+args: -c 25 -t 10
+
+max-time: 500
+cmd: flexHammer
+args: -r 5 -t 32
+
diff --git a/ndb/test/run-test/Makefile.am b/ndb/test/run-test/Makefile.am
index cf08542ae97..60d64a7697f 100644
--- a/ndb/test/run-test/Makefile.am
+++ b/ndb/test/run-test/Makefile.am
@@ -6,7 +6,7 @@ include $(top_srcdir)/ndb/config/type_util.mk.am
include $(top_srcdir)/ndb/config/type_mgmapiclient.mk.am
test_PROGRAMS = atrt
-test_DATA=daily-basic-tests.txt daily-devel-tests.txt \
+test_DATA=daily-basic-tests.txt daily-devel-tests.txt 16node-tests.txt \
conf-daily-basic-ndbmaster.txt \
conf-daily-basic-shark.txt \
conf-daily-devel-ndbmaster.txt \
diff --git a/ndb/test/run-test/basic.txt b/ndb/test/run-test/basic.txt
index a952320db08..ec9e21359e5 100644
--- a/ndb/test/run-test/basic.txt
+++ b/ndb/test/run-test/basic.txt
@@ -374,7 +374,7 @@ args: -n FragmentTypeSingle T1
max-time: 1500
cmd: testDict
-args: -n FragmentTypeAll T1 T6 T7 T8
+args: -n FragmentTypeAllSmall T1 T6 T7 T8
max-time: 1500
cmd: testDict
diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt
index fc04664564f..8b44594a9b5 100644
--- a/ndb/test/run-test/daily-basic-tests.txt
+++ b/ndb/test/run-test/daily-basic-tests.txt
@@ -75,7 +75,7 @@ max-time: 500
cmd: testBasic
args: -n PkInsert
-max-time: 600
+max-time: 660
cmd: testBasic
args: -n UpdateAndRead
@@ -207,34 +207,9 @@ max-time: 500
cmd: testBasic
args: -n MassiveRollback2 T1 T6 T13
-#-m 500 1: testBasic -n ReadConsistency T6
-max-time: 500
-cmd: testTimeout
-args: -n DontTimeoutTransaction T1
-
-max-time: 500
-cmd: testTimeout
-args: -n DontTimeoutTransaction5 T1
-
-max-time: 500
-cmd: testTimeout
-args: -n TimeoutTransaction T1
-
-max-time: 500
-cmd: testTimeout
-args: -n TimeoutTransaction5 T1
-
-max-time: 500
-cmd: testTimeout
-args: -n BuddyTransNoTimeout T1
-
max-time: 500
cmd: testTimeout
-args: -n BuddyTransNoTimeout5 T1
-
-max-time: 500
-cmd: testTimeout
-args: -n TimeoutRandTransaction T1
+args: T1
# SCAN TESTS
#
@@ -434,14 +409,9 @@ max-time: 500
cmd: testScan
args: -l 100 -n Scan-bug8262 T7
-# OLD FLEX
max-time: 500
-cmd: flexBench
-args: -c 25 -t 10
-
-max-time: 500
-cmd: flexHammer
-args: -r 5 -t 32
+cmd: testScan
+args: -n ScanParallelism
#
# DICT TESTS
@@ -475,7 +445,7 @@ args: -n FragmentTypeSingle T1
max-time: 1500
cmd: testDict
-args: -n FragmentTypeAll T1 T6 T7 T8
+args: -n FragmentTypeAllSmall T1 T6 T7 T8
max-time: 1500
cmd: testDict
@@ -554,6 +524,10 @@ max-time: 500
cmd: testNdbApi
args: -n Bug_11133 T1
+max-time: 500
+cmd: testNdbApi
+args: -n Scan_4006 T1
+
#max-time: 500
#cmd: testInterpreter
#args: T1
@@ -562,7 +536,7 @@ max-time: 150000
cmd: testOperations
args:
-max-time: 150000
+max-time: 15000
cmd: testTransactions
args:
@@ -574,10 +548,18 @@ max-time: 600
cmd: testBlobs
args:
-max-time: 2500
+max-time: 5000
cmd: testOIBasic
args:
+max-time: 2500
+cmd: testBitfield
+args:
+
+max-time: 2500
+cmd: testPartitioning
+args:
+
#
#
# SYSTEM RESTARTS
@@ -625,3 +607,42 @@ args: -n SR_UNDO T7
max-time: 1500
cmd: testSystemRestart
args: -n SR_UNDO T8
+
+# OLD FLEX
+max-time: 500
+cmd: flexBench
+args: -c 25 -t 10
+
+max-time: 500
+cmd: flexHammer
+args: -r 5 -t 32
+
+max-time: 300
+cmd: DbCreate
+args:
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 25
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 100
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 200
+type: bench
+
+max-time: 180
+cmd: DbAsyncGenerator
+args: -time 60 -p 1 -proc 25
+type: bench
+
diff --git a/ndb/test/run-test/daily-devel-tests.txt b/ndb/test/run-test/daily-devel-tests.txt
index 9812ec2ceaa..20f54e031e5 100644
--- a/ndb/test/run-test/daily-devel-tests.txt
+++ b/ndb/test/run-test/daily-devel-tests.txt
@@ -206,34 +206,5 @@ args: -l 1 -n SR9 T1
#
max-time: 2500
cmd: test_event
-args: -n BasicEventOperation T1 T6
-
-max-time: 300
-cmd: DbCreate
-args:
-
-max-time: 180
-cmd: DbAsyncGenerator
-args: -time 60 -p 1
-type: bench
-
-max-time: 180
-cmd: DbAsyncGenerator
-args: -time 60 -p 25
-type: bench
-
-max-time: 180
-cmd: DbAsyncGenerator
-args: -time 60 -p 100
-type: bench
-
-max-time: 180
-cmd: DbAsyncGenerator
-args: -time 60 -p 200
-type: bench
-
-max-time: 180
-cmd: DbAsyncGenerator
-args: -time 60 -p 1 -proc 25
-type: bench
+args: -n EventOperationApplier
diff --git a/ndb/test/src/HugoAsynchTransactions.cpp b/ndb/test/src/HugoAsynchTransactions.cpp
index f75293f5a14..5d2eb451c0b 100644
--- a/ndb/test/src/HugoAsynchTransactions.cpp
+++ b/ndb/test/src/HugoAsynchTransactions.cpp
@@ -17,11 +17,12 @@
#include <NdbSleep.h>
#include <HugoAsynchTransactions.hpp>
-HugoAsynchTransactions::HugoAsynchTransactions(const NdbDictionary::Table& _tab):
- HugoTransactions(_tab),
- transactionsCompleted(0),
- numTransactions(0),
- transactions(NULL){
+HugoAsynchTransactions::HugoAsynchTransactions(const NdbDictionary::Table& _t)
+ : HugoTransactions(_t),
+ transactionsCompleted(0),
+ numTransactions(0),
+ transactions(NULL)
+{
}
HugoAsynchTransactions::~HugoAsynchTransactions(){
diff --git a/ndb/test/src/HugoCalculator.cpp b/ndb/test/src/HugoCalculator.cpp
index 86ff76831d7..8e01f6442bb 100644
--- a/ndb/test/src/HugoCalculator.cpp
+++ b/ndb/test/src/HugoCalculator.cpp
@@ -14,8 +14,26 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
+#include <ndb_global.h>
#include "HugoCalculator.hpp"
#include <NDBT.hpp>
+#include <Base64.hpp>
+
+static
+Uint32
+myRand(Uint64 * seed)
+{
+ const Uint64 mul= 0x5deece66dull;
+ const Uint64 add= 0xb;
+ Uint64 loc_result = *seed * mul + add;
+
+ * seed= loc_result;
+ return loc_result >> 1;
+}
+
+static char base64_table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/";
/* *************************************************************
* HugoCalculator
@@ -40,7 +58,8 @@ HugoCalculator::HugoCalculator(const NdbDictionary::Table& tab) : m_tab(tab) {
// The "number of updates" column for this table is found in the last column
for (i=m_tab.getNoOfColumns()-1; i>=0; i--){
const NdbDictionary::Column* attr = m_tab.getColumn(i);
- if (attr->getType() == NdbDictionary::Column::Unsigned){
+ if (attr->getType() == NdbDictionary::Column::Unsigned &&
+ !attr->getPrimaryKey()){
m_updatesCol = i;
break;
}
@@ -57,22 +76,11 @@ Int32
HugoCalculator::calcValue(int record,
int attrib,
int updates) const {
- const NdbDictionary::Column* attr = m_tab.getColumn(attrib);
- // If this is the "id" column
- if (attrib == m_idCol)
- return record;
-
- // If this is the update column
- if (attrib == m_updatesCol)
- return updates;
-
-
- Int32 val;
- if (attr->getPrimaryKey())
- val = record + attrib;
- else
- val = record + attrib + updates;
- return val;
+
+ Int32 i;
+ calcValue(record, attrib, updates, (char*)&i, sizeof(i));
+
+ return i;
}
#if 0
HugoCalculator::U_Int32 calcValue(int record, int attrib, int updates) const;
@@ -81,49 +89,121 @@ HugoCalculator::Int64 calcValue(int record, int attrib, int updates) const;
HugoCalculator::float calcValue(int record, int attrib, int updates) const;
HugoCalculator::double calcValue(int record, int attrib, int updates) const;
#endif
+
const char*
HugoCalculator::calcValue(int record,
int attrib,
int updates,
- char* buf) const {
- const char a[26] = {"UAWBORCTDPEFQGNYHISJMKXLZ"};
+ char* buf,
+ int len) const {
+ Uint64 seed;
const NdbDictionary::Column* attr = m_tab.getColumn(attrib);
- int val = calcValue(record, attrib, updates);
-
- int len;
- if (attr->getPrimaryKey()){
- // Create a string where val is printed as chars in the beginning
- // of the string, then fill with other chars
- // The string length is set to the same size as the attribute
- len = attr->getLength();
- BaseString::snprintf(buf, len, "%d", val);
- for(int i=strlen(buf); i < len; i++)
- buf[i] = a[((val^i)%25)];
- } else{
+ Uint32 val;
+ do
+ {
+ if (attrib == m_idCol)
+ {
+ val= record;
+ memcpy(buf, &val, 4);
+ return buf;
+ }
- // Fill buf with some pattern so that we can detect
- // anomalies in the area that we don't fill with chars
- int i;
- for (i = 0; i<attr->getLength(); i++)
- buf[i] = ((i+2) % 255);
+ // If this is the update column
+ if (attrib == m_updatesCol)
+ {
+ val= updates;
+ memcpy(buf, &val, 4);
+ return buf;
+ }
- // Calculate length of the string to create. We want the string
- // length to be varied between max and min of this attribute.
-
- len = val % (attr->getLength() + 1);
- // If len == 0 return NULL if this is a nullable attribute
- if (len == 0){
- if(attr->getNullable() == true)
- return NULL;
- else
- len++;
+ if (attr->getPrimaryKey())
+ {
+ seed = record + attrib;
+ }
+ else
+ {
+ seed = record + attrib + updates;
+ }
+ } while (0);
+
+ val = myRand(&seed);
+
+ if(attr->getNullable() && (((val >> 16) & 255) > 220))
+ return NULL;
+
+ int pos= 0;
+ switch(attr->getType()){
+ case NdbDictionary::Column::Tinyint:
+ case NdbDictionary::Column::Tinyunsigned:
+ case NdbDictionary::Column::Smallint:
+ case NdbDictionary::Column::Smallunsigned:
+ case NdbDictionary::Column::Mediumint:
+ case NdbDictionary::Column::Mediumunsigned:
+ case NdbDictionary::Column::Int:
+ case NdbDictionary::Column::Unsigned:
+ case NdbDictionary::Column::Bigint:
+ case NdbDictionary::Column::Bigunsigned:
+ case NdbDictionary::Column::Float:
+ case NdbDictionary::Column::Double:
+ case NdbDictionary::Column::Olddecimal:
+ case NdbDictionary::Column::Olddecimalunsigned:
+ case NdbDictionary::Column::Decimal:
+ case NdbDictionary::Column::Decimalunsigned:
+ case NdbDictionary::Column::Binary:
+ case NdbDictionary::Column::Datetime:
+ case NdbDictionary::Column::Time:
+ case NdbDictionary::Column::Date:
+ case NdbDictionary::Column::Bit:
+ while (len > 4)
+ {
+ memcpy(buf+pos, &val, 4);
+ pos += 4;
+ len -= 4;
+ val= myRand(&seed);
+ }
+
+ memcpy(buf+pos, &val, len);
+ if(attr->getType() == NdbDictionary::Column::Bit)
+ {
+ Uint32 bits= attr->getLength();
+ Uint32 tmp = bits >> 5;
+ Uint32 size = bits & 31;
+ ((Uint32*)buf)[tmp] &= ((1 << size) - 1);
+ }
+ break;
+ case NdbDictionary::Column::Varbinary:
+ case NdbDictionary::Column::Varchar:
+ case NdbDictionary::Column::Text:
+ case NdbDictionary::Column::Char:
+ case NdbDictionary::Column::Longvarchar:
+ case NdbDictionary::Column::Longvarbinary:
+ {
+ char* ptr= (char*)&val;
+ while(len >= 4)
+ {
+ len -= 4;
+ buf[pos++] = base64_table[ptr[0] & 0x3f];
+ buf[pos++] = base64_table[ptr[1] & 0x3f];
+ buf[pos++] = base64_table[ptr[2] & 0x3f];
+ buf[pos++] = base64_table[ptr[3] & 0x3f];
+ val= myRand(&seed);
}
- for(i=0; i < len; i++)
- buf[i] = a[((val^i)%25)];
- buf[len] = 0;
+
+ for(; len; len--, pos++)
+ buf[pos] = base64_table[ptr[len] & 0x3f];
+
+ pos--;
+ break;
+ }
+ case NdbDictionary::Column::Blob:
+ case NdbDictionary::Column::Undefined:
+ abort();
+ break;
}
+
+
return buf;
-}
+}
int
HugoCalculator::verifyRowValues(NDBT_ResultRow* const pRow) const{
@@ -131,104 +211,48 @@ HugoCalculator::verifyRowValues(NDBT_ResultRow* const pRow) const{
id = pRow->attributeStore(m_idCol)->u_32_value();
updates = pRow->attributeStore(m_updatesCol)->u_32_value();
-
+ int result = 0;
+
// Check the values of each column
for (int i = 0; i<m_tab.getNoOfColumns(); i++){
if (i != m_updatesCol && id != m_idCol) {
-
const NdbDictionary::Column* attr = m_tab.getColumn(i);
- switch (attr->getType()){
- case NdbDictionary::Column::Char:
- case NdbDictionary::Column::Varchar:
- case NdbDictionary::Column::Binary:
- case NdbDictionary::Column::Varbinary:{
- int result = 0;
- char* buf = new char[attr->getLength()+1];
- const char* res = calcValue(id, i, updates, buf);
- if (res == NULL){
- if (!pRow->attributeStore(i)->isNULL()){
- g_err << "|- NULL ERROR: expected a NULL but the column was not null" << endl;
- g_err << "|- The row: \"" << (*pRow) << "\"" << endl;
- result = -1;
- }
- } else{
- if (memcmp(res, pRow->attributeStore(i)->aRef(), pRow->attributeStore(i)->arraySize()) != 0){
- // if (memcmp(res, pRow->attributeStore(i)->aRef(), pRow->attributeStore(i)->getLength()) != 0){
- g_err << "arraySize(): "
- << pRow->attributeStore(i)->arraySize()
- << ", NdbDict::Column::getLength(): " << attr->getLength()
- << endl;
- const char* buf2 = pRow->attributeStore(i)->aRef();
- for (Uint32 j = 0; j < pRow->attributeStore(i)->arraySize(); j++)
+ Uint32 len = attr->getSizeInBytes();
+ char buf[8000];
+ const char* res = calcValue(id, i, updates, buf, len);
+ if (res == NULL){
+ if (!pRow->attributeStore(i)->isNULL()){
+ g_err << "|- NULL ERROR: expected a NULL but the column was not null" << endl;
+ g_err << "|- The row: \"" << (*pRow) << "\"" << endl;
+ result = -1;
+ }
+ } else{
+ if (memcmp(res, pRow->attributeStore(i)->aRef(), len) != 0){
+ g_err << "Column: " << attr->getName() << endl;
+ const char* buf2 = pRow->attributeStore(i)->aRef();
+ for (Uint32 j = 0; j < len; j++)
+ {
+ g_err << j << ":" << hex << (Uint32)(Uint8)buf[j] << "[" << hex << (Uint32)(Uint8)buf2[j] << "]";
+ if (buf[j] != buf2[j])
{
- g_err << j << ":" << buf[j] << "[" << buf2[j] << "]";
- if (buf[j] != buf2[j])
- {
- g_err << "==>Match failed!";
- }
- g_err << endl;
+ g_err << "==>Match failed!";
}
g_err << endl;
- g_err << "|- Invalid data found in attribute " << i << ": \""
- << pRow->attributeStore(i)->aRef()
- << "\" != \"" << res << "\"" << endl
- << "Length of expected=" << (unsigned)strlen(res) << endl
- << "Lenght of read="
- << (unsigned)strlen(pRow->attributeStore(i)->aRef()) << endl;
- g_err << "|- The row: \"" << (* pRow) << "\"" << endl;
- result = -1;
}
- }
- delete []buf;
- if (result != 0)
- return result;
- }
- break;
- case NdbDictionary::Column::Int:
- case NdbDictionary::Column::Unsigned:{
- Int32 cval = calcValue(id, i, updates);
- Int32 val = pRow->attributeStore(i)->int32_value();
- if (val != cval){
- g_err << "|- Invalid data found: \"" << val << "\" != \""
- << cval << "\"" << endl;
- g_err << "|- The row: \"" << (* pRow) << "\"" << endl;
- return -1;
- }
- break;
- }
- case NdbDictionary::Column::Bigint:
- case NdbDictionary::Column::Bigunsigned:{
- Uint64 cval = calcValue(id, i, updates);
- Uint64 val = pRow->attributeStore(i)->u_64_value();
- if (val != cval){
- g_err << "|- Invalid data found: \"" << val << "\" != \""
- << cval << "\""
- << endl;
+ g_err << endl;
+ g_err << "|- Invalid data found in attribute " << i << ": \""
+ << pRow->attributeStore(i)->aRef()
+ << "\" != \"" << res << "\"" << endl
+ << "Length of expected=" << (unsigned)strlen(res) << endl
+ << "Lenght of read="
+ << (unsigned)strlen(pRow->attributeStore(i)->aRef()) << endl;
g_err << "|- The row: \"" << (* pRow) << "\"" << endl;
- return -1;
+ result = -1;
}
}
- break;
- case NdbDictionary::Column::Float:{
- float cval = calcValue(id, i, updates);
- float val = pRow->attributeStore(i)->float_value();
- if (val != cval){
- g_err << "|- Invalid data found: \"" << val << "\" != \""
- << cval << "\"" << endl;
- g_err << "|- The row: \"" << (* pRow) << "\"" << endl;
- return -1;
- }
- }
- break;
- case NdbDictionary::Column::Undefined:
- default:
- assert(false);
- break;
- }
-
}
}
- return 0;
+ return result;
}
int
diff --git a/ndb/test/src/HugoOperations.cpp b/ndb/test/src/HugoOperations.cpp
index 6b1a1ca395b..f2e54971766 100644
--- a/ndb/test/src/HugoOperations.cpp
+++ b/ndb/test/src/HugoOperations.cpp
@@ -31,6 +31,19 @@ int HugoOperations::startTransaction(Ndb* pNdb){
return NDBT_OK;
}
+int HugoOperations::setTransaction(NdbTransaction* new_trans){
+
+ if (pTrans != NULL){
+ ndbout << "HugoOperations::startTransaction, pTrans != NULL" << endl;
+ return NDBT_FAILED;
+ }
+ pTrans = new_trans;
+ if (pTrans == NULL) {
+ return NDBT_FAILED;
+ }
+ return NDBT_OK;
+}
+
void
HugoOperations::setTransactionId(Uint64 id){
if (pTrans != NULL){
@@ -40,11 +53,7 @@ HugoOperations::setTransactionId(Uint64 id){
int HugoOperations::closeTransaction(Ndb* pNdb){
- if (pTrans != NULL){
- pNdb->closeTransaction(pTrans);
- pTrans = NULL;
- }
- pTrans = NULL;
+ UtilTransactions::closeTransaction(pNdb);
m_result_sets.clear();
m_executed_result_sets.clear();
@@ -63,8 +72,16 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
int a;
allocRows(numRecords);
int check;
+
+ NdbOperation* pOp = 0;
+ pIndexScanOp = 0;
+
for(int r=0; r < numRecords; r++){
- NdbOperation* pOp = pTrans->getNdbOperation(tab.getName());
+
+ if(pOp == 0)
+ {
+ pOp = getOperation(pTrans, NdbOperation::ReadRequest);
+ }
if (pOp == NULL) {
ERR(pTrans->getNdbError());
return NDBT_FAILED;
@@ -73,13 +90,16 @@ int HugoOperations::pkReadRecord(Ndb* pNdb,
rand_lock_mode:
switch(lm){
case NdbOperation::LM_Read:
- check = pOp->readTuple();
- break;
case NdbOperation::LM_Exclusive:
- check = pOp->readTupleExclusive();
- break;
case NdbOperation::LM_CommittedRead:
- check = pOp->dirtyRead();
+ if(idx && idx->getType() == NdbDictionary::Index::OrderedIndex &&
+ pIndexScanOp == 0)
+ {
+ pIndexScanOp = ((NdbIndexScanOperation*)pOp);
+ check = pIndexScanOp->readTuples(lm);
+ }
+ else
+ check = pOp->readTuple(lm);
break;
default:
lm = (NdbOperation::LockMode)((rand() >> 16) & 3);
@@ -100,15 +120,22 @@ rand_lock_mode:
}
}
}
+
+ if(pIndexScanOp)
+ pIndexScanOp->end_of_bound(r);
- // Define attributes to read
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if((rows[r]->attributeStore(a) =
- pOp->getValue(tab.getColumn(a)->getName())) == 0) {
- ERR(pTrans->getNdbError());
- return NDBT_FAILED;
- }
- }
+ if(r == 0 || pIndexScanOp == 0)
+ {
+ // Define attributes to read
+ for(a = 0; a<tab.getNoOfColumns(); a++){
+ if((rows[r]->attributeStore(a) =
+ pOp->getValue(tab.getColumn(a)->getName())) == 0) {
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
+ }
+ }
+ }
+ pOp = pIndexScanOp;
}
return NDBT_OK;
}
@@ -121,7 +148,7 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb,
allocRows(numRecords);
int check;
for(int r=0; r < numRecords; r++){
- NdbOperation* pOp = pTrans->getNdbOperation(tab.getName());
+ NdbOperation* pOp = getOperation(pTrans, NdbOperation::UpdateRequest);
if (pOp == NULL) {
ERR(pTrans->getNdbError());
return NDBT_FAILED;
@@ -133,26 +160,37 @@ int HugoOperations::pkUpdateRecord(Ndb* pNdb,
return NDBT_FAILED;
}
- // Define primary keys
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == true){
- if(equalForAttr(pOp, a, r+recordNo) != 0){
- ERR(pTrans->getNdbError());
- return NDBT_FAILED;
- }
+ if(setValues(pOp, r+recordNo, updatesValue) != NDBT_OK)
+ {
+ return NDBT_FAILED;
+ }
+ }
+ return NDBT_OK;
+}
+
+int
+HugoOperations::setValues(NdbOperation* pOp, int rowId, int updateId)
+{
+ // Define primary keys
+ int a;
+ for(a = 0; a<tab.getNoOfColumns(); a++){
+ if (tab.getColumn(a)->getPrimaryKey() == true){
+ if(equalForAttr(pOp, a, rowId) != 0){
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
}
}
-
- // Define attributes to update
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == false){
- if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){
- ERR(pTrans->getNdbError());
- return NDBT_FAILED;
- }
+ }
+
+ for(a = 0; a<tab.getNoOfColumns(); a++){
+ if (tab.getColumn(a)->getPrimaryKey() == false){
+ if(setValueForAttr(pOp, a, rowId, updateId ) != 0){
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
}
- }
+ }
}
+
return NDBT_OK;
}
@@ -163,7 +201,7 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb,
int a, check;
for(int r=0; r < numRecords; r++){
- NdbOperation* pOp = pTrans->getNdbOperation(tab.getName());
+ NdbOperation* pOp = getOperation(pTrans, NdbOperation::InsertRequest);
if (pOp == NULL) {
ERR(pTrans->getNdbError());
return NDBT_FAILED;
@@ -175,25 +213,10 @@ int HugoOperations::pkInsertRecord(Ndb* pNdb,
return NDBT_FAILED;
}
- // Define primary keys
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == true){
- if(equalForAttr(pOp, a, r+recordNo) != 0){
- ERR(pTrans->getNdbError());
- return NDBT_FAILED;
- }
- }
+ if(setValues(pOp, r+recordNo, updatesValue) != NDBT_OK)
+ {
+ return NDBT_FAILED;
}
-
- // Define attributes to update
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == false){
- if(setValueForAttr(pOp, a, recordNo+r, updatesValue ) != 0){
- ERR(pTrans->getNdbError());
- return NDBT_FAILED;
- }
- }
- }
}
return NDBT_OK;
}
@@ -240,9 +263,9 @@ int HugoOperations::pkWriteRecord(Ndb* pNdb,
return NDBT_OK;
}
-int HugoOperations::pkDeleteRecord(Ndb* pNdb,
- int recordNo,
- int numRecords){
+int HugoOperations::pkWritePartialRecord(Ndb* pNdb,
+ int recordNo,
+ int numRecords){
int a, check;
for(int r=0; r < numRecords; r++){
@@ -252,7 +275,7 @@ int HugoOperations::pkDeleteRecord(Ndb* pNdb,
return NDBT_FAILED;
}
- check = pOp->deleteTuple();
+ check = pOp->writeTuple();
if( check == -1 ) {
ERR(pTrans->getNdbError());
return NDBT_FAILED;
@@ -270,65 +293,37 @@ int HugoOperations::pkDeleteRecord(Ndb* pNdb,
}
return NDBT_OK;
}
-#if 0
-NdbResultSet*
-HugoOperations::scanReadRecords(Ndb* pNdb, ScanLock lock){
-
- NDBT_ResultRow * m_tmpRow = new NDBT_ResultRow(tab);
-
- NdbScanOperation* pOp = pTrans->getNdbScanOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- return 0;
- }
-
- int check = 0;
- NdbResultSet * rs = 0;
- switch(lock){
- case SL_ReadHold:
- rs = pOp->readTuples(NdbScanOperation::LM_Read, 1, 1);
- break;
- case SL_Exclusive:
- rs = pOp->readTuples(NdbScanOperation::LM_Exclusive, 1, 1);
- break;
- case SL_Read:
- default:
- rs = pOp->readTuples(NdbScanOperation::LM_Dirty, 1, 1);
- }
-
- if( rs == 0) {
- ERR(pTrans->getNdbError());
- return 0;
- }
-
- check = pOp->interpret_exit_ok();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- return 0;
- }
+int HugoOperations::pkDeleteRecord(Ndb* pNdb,
+ int recordNo,
+ int numRecords){
- // Define attributes to read
- for(int a = 0; a<tab.getNoOfColumns(); a++){
- if((m_tmpRow->attributeStore(a) =
- pOp->getValue(tab.getColumn(a)->getName())) == 0) {
+ int a, check;
+ for(int r=0; r < numRecords; r++){
+ NdbOperation* pOp = getOperation(pTrans, NdbOperation::DeleteRequest);
+ if (pOp == NULL) {
ERR(pTrans->getNdbError());
- return 0;
+ return NDBT_FAILED;
+ }
+
+ check = pOp->deleteTuple();
+ if( check == -1 ) {
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
+ }
+
+ // Define primary keys
+ for(a = 0; a<tab.getNoOfColumns(); a++){
+ if (tab.getColumn(a)->getPrimaryKey() == true){
+ if(equalForAttr(pOp, a, r+recordNo) != 0){
+ ERR(pTrans->getNdbError());
+ return NDBT_FAILED;
+ }
+ }
}
- }
- return rs;
-}
-
-int
-HugoOperations::readTuples(NdbResultSet* rs){
- int res = 0;
- while((res = rs->nextResult()) == 0){
}
- if(res != 1)
- return NDBT_FAILED;
return NDBT_OK;
}
-#endif
int HugoOperations::execute_Commit(Ndb* pNdb,
AbortOption eao){
@@ -353,7 +348,7 @@ int HugoOperations::execute_Commit(Ndb* pNdb,
m_executed_result_sets.push_back(m_result_sets[i]);
int rows = m_result_sets[i].records;
- NdbResultSet* rs = m_result_sets[i].m_result_set;
+ NdbScanOperation* rs = m_result_sets[i].m_result_set;
int res = rs->nextResult();
switch(res){
case 1:
@@ -402,7 +397,7 @@ int HugoOperations::execute_NoCommit(Ndb* pNdb, AbortOption eao){
m_executed_result_sets.push_back(m_result_sets[i]);
int rows = m_result_sets[i].records;
- NdbResultSet* rs = m_result_sets[i].m_result_set;
+ NdbScanOperation* rs = m_result_sets[i].m_result_set;
int res = rs->nextResult();
switch(res){
case 1:
@@ -441,21 +436,29 @@ int HugoOperations::execute_Rollback(Ndb* pNdb){
}
void
-HugoOperations_async_callback(int res, NdbConnection* pCon, void* ho)
+HugoOperations_async_callback(int res, NdbTransaction* pCon, void* ho)
{
((HugoOperations*)ho)->callback(res, pCon);
}
void
-HugoOperations::callback(int res, NdbConnection* pCon)
+HugoOperations::callback(int res, NdbTransaction* pCon)
{
assert(pCon == pTrans);
m_async_reply= 1;
- m_async_return= res;
+ if(res)
+ {
+ m_async_return = pCon->getNdbError().code;
+ }
+ else
+ {
+ m_async_return = 0;
+ }
}
int
-HugoOperations::execute_async(Ndb* pNdb, ExecType et, AbortOption eao){
+HugoOperations::execute_async(Ndb* pNdb, NdbTransaction::ExecType et,
+ NdbTransaction::AbortOption eao){
m_async_reply= 0;
pTrans->executeAsynchPrepare(et,
@@ -475,16 +478,18 @@ HugoOperations::wait_async(Ndb* pNdb, int timeout)
if(m_async_reply)
{
+ if(m_async_return)
+ ndbout << "ERROR: " << pNdb->getNdbError(m_async_return) << endl;
return m_async_return;
}
ndbout_c("wait returned nothing...");
return -1;
}
-HugoOperations::HugoOperations(const NdbDictionary::Table& _tab):
- UtilTransactions(_tab),
- calc(_tab),
- pTrans(NULL)
+HugoOperations::HugoOperations(const NdbDictionary::Table& _tab,
+ const NdbDictionary::Index* idx):
+ UtilTransactions(_tab, idx),
+ calc(_tab)
{
}
@@ -501,101 +506,32 @@ HugoOperations::~HugoOperations(){
int HugoOperations::equalForAttr(NdbOperation* pOp,
int attrId,
int rowId){
- int check = 0;
+ int check = -1;
const NdbDictionary::Column* attr = tab.getColumn(attrId);
if (attr->getPrimaryKey() == false){
g_info << "Can't call equalForAttr on non PK attribute" << endl;
return NDBT_FAILED;
}
- switch (attr->getType()){
- case NdbDictionary::Column::Char:
- case NdbDictionary::Column::Varchar:
- case NdbDictionary::Column::Binary:
- case NdbDictionary::Column::Varbinary:{
- char buf[8000];
- memset(buf, 0, sizeof(buf));
- check = pOp->equal( attr->getName(), calc.calcValue(rowId, attrId, 0, buf));
- break;
- }
- case NdbDictionary::Column::Int:
- check = pOp->equal( attr->getName(), (Int32)calc.calcValue(rowId, attrId, 0));
- break;
- case NdbDictionary::Column::Unsigned:
- check = pOp->equal( attr->getName(), (Uint32)calc.calcValue(rowId, attrId, 0));
- break;
- case NdbDictionary::Column::Bigint:
- check = pOp->equal( attr->getName(), (Int64)calc.calcValue(rowId, attrId, 0));
- break;
- case NdbDictionary::Column::Bigunsigned:
- check = pOp->equal( attr->getName(), (Uint64)calc.calcValue(rowId, attrId, 0));
- break;
- case NdbDictionary::Column::Float:
- g_info << "Float not allowed as PK value" << endl;
- check = -1;
- break;
-
- default:
- g_info << "default" << endl;
- check = -1;
- break;
- }
- return check;
+ int len = attr->getSizeInBytes();
+ char buf[8000];
+ memset(buf, 0, sizeof(buf));
+ return pOp->equal( attr->getName(),
+ calc.calcValue(rowId, attrId, 0, buf, len));
}
int HugoOperations::setValueForAttr(NdbOperation* pOp,
int attrId,
int rowId,
int updateId){
- int check = 0;
+ int check = -1;
const NdbDictionary::Column* attr = tab.getColumn(attrId);
-
- if (attr->getTupleKey()){
- // Don't set values for TupleId PKs
- return check;
- }
- switch (attr->getType()){
- case NdbDictionary::Column::Char:
- case NdbDictionary::Column::Varchar:
- case NdbDictionary::Column::Binary:
- case NdbDictionary::Column::Varbinary:{
- char buf[8000];
- check = pOp->setValue( attr->getName(),
- calc.calcValue(rowId, attrId, updateId, buf));
- break;
- }
- case NdbDictionary::Column::Int:{
- Int32 val = calc.calcValue(rowId, attrId, updateId);
- check = pOp->setValue( attr->getName(), val);
- }
- break;
- case NdbDictionary::Column::Bigint:{
- Int64 val = calc.calcValue(rowId, attrId, updateId);
- check = pOp->setValue( attr->getName(),
- val);
- }
- break;
- case NdbDictionary::Column::Unsigned:{
- Uint32 val = calc.calcValue(rowId, attrId, updateId);
- check = pOp->setValue( attr->getName(), val);
- }
- break;
- case NdbDictionary::Column::Bigunsigned:{
- Uint64 val = calc.calcValue(rowId, attrId, updateId);
- check = pOp->setValue( attr->getName(),
- val);
- }
- break;
- case NdbDictionary::Column::Float:
- check = pOp->setValue( attr->getName(),
- (float)calc.calcValue(rowId, attrId, updateId));
- break;
- default:
- check = -1;
- break;
- }
- return check;
+ int len = attr->getSizeInBytes();
+ char buf[8000];
+ memset(buf, 0, sizeof(buf));
+ return pOp->setValue( attr->getName(),
+ calc.calcValue(rowId, attrId, updateId, buf, len));
}
int
@@ -611,7 +547,7 @@ HugoOperations::verifyUpdatesValue(int updatesValue, int _numRows){
result = NDBT_FAILED;
continue;
}
-
+
if(calc.getUpdatesValue(rows[i]) != updatesValue){
result = NDBT_FAILED;
g_err << "Invalid updates value for row " << i << endl
@@ -621,7 +557,7 @@ HugoOperations::verifyUpdatesValue(int updatesValue, int _numRows){
continue;
}
}
-
+
if(_numRows == 0){
g_err << "No rows -> Invalid updates value" << endl;
return NDBT_FAILED;
@@ -631,14 +567,12 @@ HugoOperations::verifyUpdatesValue(int updatesValue, int _numRows){
}
void HugoOperations::allocRows(int _numRows){
- deallocRows();
-
if(_numRows <= 0){
g_info << "Illegal value for num rows : " << _numRows << endl;
abort();
}
- for(int b=0; b<_numRows; b++){
+ for(int b=rows.size(); b<_numRows; b++){
rows.push_back(new NDBT_ResultRow(tab));
}
}
@@ -694,7 +628,7 @@ int HugoOperations::compareRecordToCopy(int numRecords ){
void
HugoOperations::refresh() {
- NdbConnection* t = getTransaction();
+ NdbTransaction * t = getTransaction();
if(t)
t->refresh();
}
@@ -799,12 +733,10 @@ HugoOperations::scanReadRecords(Ndb* pNdb, NdbScanOperation::LockMode lm,
if(!pOp)
return -1;
- NdbResultSet * rs = pOp->readTuples(lm, 1, 1);
-
- if(!rs){
+ if(pOp->readTuples(lm, 0, 1)){
return -1;
}
-
+
for(int a = 0; a<tab.getNoOfColumns(); a++){
if((rows[0]->attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
@@ -812,8 +744,8 @@ HugoOperations::scanReadRecords(Ndb* pNdb, NdbScanOperation::LockMode lm,
return NDBT_FAILED;
}
}
-
- RsPair p = {rs, records};
+
+ RsPair p = {pOp, records};
m_result_sets.push_back(p);
return 0;
diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp
index 5d5b2fa99df..3260b921985 100644
--- a/ndb/test/src/HugoTransactions.cpp
+++ b/ndb/test/src/HugoTransactions.cpp
@@ -18,8 +18,9 @@
#include <NdbSleep.h>
-HugoTransactions::HugoTransactions(const NdbDictionary::Table& _tab):
- HugoOperations(_tab),
+HugoTransactions::HugoTransactions(const NdbDictionary::Table& _tab,
+ const NdbDictionary::Index* idx):
+ HugoOperations(_tab, idx),
row(_tab){
m_defaultScanUpdateMethod = 3;
@@ -34,7 +35,8 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
int records,
int abortPercent,
int parallelism,
- NdbOperation::LockMode lm)
+ NdbOperation::LockMode lm,
+ int scan_flags)
{
int retryAttempt = 0;
@@ -64,17 +66,14 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbScanOperation(tab.getName());
+ pOp = getScanOperation(pTrans);
if (pOp == NULL) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
}
- NdbResultSet * rs;
- rs = pOp ->readTuples(lm);
-
- if( rs == 0 ) {
+ if( pOp ->readTuples(lm, scan_flags, parallelism) ) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
@@ -123,7 +122,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
int eof;
int rows = 0;
- while((eof = rs->nextResult(true)) == 0){
+ while((eof = pOp->nextResult(true)) == 0){
rows++;
if (calc.verifyRowValues(&row) != 0){
closeTransaction(pNdb);
@@ -133,7 +132,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if (abortCount == rows && abortTrans == true){
ndbout << "Scan is aborted" << endl;
g_info << "Scan is aborted" << endl;
- rs->close();
+ pOp->close();
if( check == -1 ) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
@@ -189,7 +188,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
int abortPercent,
int parallelism,
NdbOperation::LockMode lm,
- bool sorted)
+ int scan_flags)
{
int retryAttempt = 0;
@@ -226,10 +225,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- NdbResultSet * rs;
- rs = pOp ->readTuples(lm, 0, parallelism, sorted);
-
- if( rs == 0 ) {
+ if( pOp ->readTuples(lm, scan_flags, parallelism) ) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
@@ -278,7 +274,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
int eof;
int rows = 0;
- while((eof = rs->nextResult(true)) == 0){
+ while((eof = pOp->nextResult(true)) == 0){
rows++;
if (calc.verifyRowValues(&row) != 0){
closeTransaction(pNdb);
@@ -288,7 +284,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb,
if (abortCount == rows && abortTrans == true){
ndbout << "Scan is aborted" << endl;
g_info << "Scan is aborted" << endl;
- rs->close();
+ pOp->close();
if( check == -1 ) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
@@ -361,161 +357,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb,
int records,
int abortPercent,
int parallelism){
-#if 1
return scanUpdateRecords3(pNdb, records, abortPercent, 1);
-#else
- int retryAttempt = 0;
- const int retryMax = 100;
- int check, a;
- NdbOperation *pOp;
-
-
- while (true){
-
- if (retryAttempt >= retryMax){
- g_info << "ERROR: has retried this operation " << retryAttempt
- << " times, failing!" << endl;
- return NDBT_FAILED;
- }
-
- pTrans = pNdb->startTransaction();
- if (pTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- return NDBT_FAILED;
- }
-
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->openScanExclusive(parallelism);
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->interpret_exit_ok();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Read all attributes from this table
- for(a=0; a<tab.getNoOfColumns(); a++){
- if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
-
- check = pTrans->executeScan();
- if( check == -1 ) {
- const NdbError err = pTrans->getNdbError();
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- closeTransaction(pNdb);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Abort after 1-100 or 1-records rows
- int ranVal = rand();
- int abortCount = ranVal % (records == 0 ? 100 : records);
- bool abortTrans = false;
- if (abort > 0){
- // Abort if abortCount is less then abortPercent
- if (abortCount < abortPercent)
- abortTrans = true;
- }
-
-
- int eof;
- int rows = 0;
-
- eof = pTrans->nextScanResult();
- while(eof == 0){
- rows++;
-
- if (abortCount == rows && abortTrans == true){
- g_info << "Scan is aborted" << endl;
- // This scan should be aborted
- check = pTrans->stopScan();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- closeTransaction(pNdb);
- return NDBT_OK;
- }
- int res = takeOverAndUpdateRecord(pNdb, pOp);
- if(res == RESTART_SCAN){
- eof = -2;
- continue;
- }
- if (res != 0){
- closeTransaction(pNdb);
- return res;
- }
-
- eof = pTrans->nextScanResult();
- }
- if (eof == -1) {
- const NdbError err = pTrans->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- switch (err.code){
- case 488:
- case 245:
- case 490:
- // Too many active scans, no limit on number of retry attempts
- break;
- default:
- retryAttempt++;
- }
- continue;
- }
- ERR(err);
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- if(eof == -2){
- closeTransaction(pNdb);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
-
- closeTransaction(pNdb);
-
- g_info << rows << " rows have been updated" << endl;
- return NDBT_OK;
- }
- return NDBT_FAILED;
-#endif
}
// Scan all records exclusive and update
@@ -527,166 +369,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb,
int records,
int abortPercent,
int parallelism){
-#if 1
return scanUpdateRecords3(pNdb, records, abortPercent, parallelism);
-#else
- int retryAttempt = 0;
- const int retryMax = 100;
- int check, a;
- NdbOperation *pOp;
-
-
- while (true){
-
- if (retryAttempt >= retryMax){
- g_info << "ERROR: has retried this operation " << retryAttempt
- << " times, failing!" << endl;
- return NDBT_FAILED;
- }
-
- pTrans = pNdb->startTransaction();
- if (pTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- return NDBT_FAILED;
- }
-
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->openScanExclusive(parallelism);
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->interpret_exit_ok();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Read all attributes from this table
- for(a=0; a<tab.getNoOfColumns(); a++){
- if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == NULL){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
-
- check = pTrans->executeScan();
- if( check == -1 ) {
- const NdbError err = pTrans->getNdbError();
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- closeTransaction(pNdb);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Abort after 1-100 or 1-records rows
- int ranVal = rand();
- int abortCount = ranVal % (records == 0 ? 100 : records);
- bool abortTrans = false;
- if (abort > 0){
- // Abort if abortCount is less then abortPercent
- if (abortCount < abortPercent)
- abortTrans = true;
- }
-
- int eof;
- int rows = 0;
-
- while((eof = pTrans->nextScanResult(true)) == 0){
- pUpTrans = pNdb->startTransaction();
- if (pUpTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- return NDBT_FAILED;
- }
- do {
- rows++;
- if (addRowToUpdate(pNdb, pUpTrans, pOp) != 0){
- pNdb->closeTransaction(pUpTrans);
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- } while((eof = pTrans->nextScanResult(false)) == 0);
-
- if (abortCount == rows && abortTrans == true){
- g_info << "Scan is aborted" << endl;
- // This scan should be aborted
- check = pTrans->stopScan();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- pNdb->closeTransaction(pUpTrans);
- return NDBT_FAILED;
- }
-
- closeTransaction(pNdb);
- pNdb->closeTransaction(pUpTrans);
- return NDBT_OK;
- }
-
- check = pUpTrans->execute(Commit);
- if( check == -1 ) {
- const NdbError err = pUpTrans->getNdbError();
- ERR(err);
- pNdb->closeTransaction(pUpTrans);
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- pNdb->closeTransaction(pUpTrans);
- }
- if (eof == -1) {
- const NdbError err = pTrans->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- closeTransaction(pNdb);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- closeTransaction(pNdb);
-
- g_info << rows << " rows have been updated" << endl;
- return NDBT_OK;
- }
- return NDBT_FAILED;
-#endif
}
int
@@ -719,15 +402,14 @@ restart:
return NDBT_FAILED;
}
- pOp = pTrans->getNdbScanOperation(tab.getName());
+ pOp = getScanOperation(pTrans);
if (pOp == NULL) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
}
- NdbResultSet *rs = pOp->readTuplesExclusive(parallelism);
- if( rs == 0 ) {
+ if( pOp->readTuplesExclusive(parallelism) ) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
@@ -765,10 +447,10 @@ restart:
}
int rows = 0;
- while((check = rs->nextResult(true)) == 0){
+ while((check = pOp->nextResult(true)) == 0){
do {
rows++;
- NdbOperation* pUp = rs->updateTuple();
+ NdbOperation* pUp = pOp->updateCurrentTuple();
if(pUp == 0){
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
@@ -792,7 +474,7 @@ restart:
closeTransaction(pNdb);
return NDBT_OK;
}
- } while((check = rs->nextResult(false)) == 0);
+ } while((check = pOp->nextResult(false)) == 0);
if(check != -1){
check = pTrans->execute(Commit);
@@ -858,7 +540,11 @@ HugoTransactions::loadTable(Ndb* pNdb,
g_info << "|- Inserting records..." << endl;
for (int c=0 ; c<records ; ){
- bool closeTrans;
+ bool closeTrans = true;
+
+ if(c + batch > records)
+ batch = records - c;
+
if (retryAttempt >= retryMax){
g_info << "Record " << c << " could not be inserted, has retried "
<< retryAttempt << " times " << endl;
@@ -887,30 +573,11 @@ HugoTransactions::loadTable(Ndb* pNdb,
}
}
- for(int b = 0; b < batch && c+b<records; b++){
-
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->insertTuple();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Set a calculated value for each attribute in this table
- for (a = 0; a<tab.getNoOfColumns(); a++){
- if(setValueForAttr(pOp, a, c+b, 0 ) != 0){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
+ if(pkInsertRecord(pNdb, c, batch) != NDBT_OK)
+ {
+ ERR(pTrans->getNdbError());
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
}
// Execute the transaction and insert the record
@@ -976,6 +643,9 @@ HugoTransactions::loadTable(Ndb* pNdb,
c = c+batch;
retryAttempt = 0;
}
+
+ if(pTrans)
+ closeTransaction(pNdb);
return NDBT_OK;
}
@@ -986,8 +656,20 @@ HugoTransactions::fillTable(Ndb* pNdb,
int retryAttempt = 0;
int retryMax = 5;
NdbOperation *pOp;
+
+ const int org = batch;
+ const int cols = tab.getNoOfColumns();
+ const int brow = tab.getRowSizeInBytes();
+ const int bytes = 12 + brow + 4 * cols;
+ batch = (batch * 256); // -> 512 -> 65536k per commit
+ batch = batch/bytes; //
+ batch = batch == 0 ? 1 : batch;
+
+ if(batch != org){
+ g_info << "batch = " << org << " rowsize = " << bytes
+ << " -> rows/commit = " << batch << endl;
+ }
- g_info << "|- Inserting records..." << endl;
for (int c=0 ; ; ){
if (retryAttempt >= retryMax){
@@ -1012,30 +694,11 @@ HugoTransactions::fillTable(Ndb* pNdb,
return NDBT_FAILED;
}
- for(b = 0; b < batch; b++){
-
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->insertTuple();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Set a calculated value for each attribute in this table
- for (a = 0; a<tab.getNoOfColumns(); a++){
- if(setValueForAttr(pOp, a, c+b, 0 ) != 0){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
+ if(pkInsertRecord(pNdb, c, batch) != NDBT_OK)
+ {
+ ERR(pTrans->getNdbError());
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
}
// Execute the transaction and insert the record
@@ -1113,7 +776,9 @@ HugoTransactions::createEvent(Ndb* pNdb){
NdbDictionary::Dictionary *myDict = pNdb->getDictionary();
if (!myDict) {
- printf("Event Creation failedDictionary not found");
+ g_err << "Dictionary not found "
+ << pNdb->getNdbError().code << " "
+ << pNdb->getNdbError().message << endl;
return NDBT_FAILED;
}
@@ -1134,21 +799,33 @@ HugoTransactions::createEvent(Ndb* pNdb){
if (res == 0)
myEvent.print();
- else {
- g_info << "Event creation failed\n";
- g_info << "trying drop Event, maybe event exists\n";
+ else if (myDict->getNdbError().classification ==
+ NdbError::SchemaObjectExists)
+ {
+ g_info << "Event creation failed event exists\n";
res = myDict->dropEvent(eventName);
if (res) {
- g_err << "failed to drop event\n";
+ g_err << "Failed to drop event: "
+ << myDict->getNdbError().code << " : "
+ << myDict->getNdbError().message << endl;
return NDBT_FAILED;
}
// try again
res = myDict->createEvent(myEvent); // Add event to database
if (res) {
- g_err << "failed to create event\n";
+ g_err << "Failed to create event (1): "
+ << myDict->getNdbError().code << " : "
+ << myDict->getNdbError().message << endl;
return NDBT_FAILED;
}
}
+ else
+ {
+ g_err << "Failed to create event (2): "
+ << myDict->getNdbError().code << " : "
+ << myDict->getNdbError().message << endl;
+ return NDBT_FAILED;
+ }
return NDBT_OK;
}
@@ -1164,6 +841,7 @@ struct receivedEvent {
};
int XXXXX = 0;
+
int
HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
int records) {
@@ -1234,7 +912,9 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
// set up the callbacks
g_info << function << "execute\n";
if (pOp->execute()) { // This starts changes to "start flowing"
- g_err << function << "operation execution failed\n";
+ g_err << function << "operation execution failed: \n";
+ g_err << pOp->getNdbError().code << " "
+ << pOp->getNdbError().message << endl;
return NDBT_FAILED;
}
@@ -1362,36 +1042,36 @@ HugoTransactions::eventOperation(Ndb* pNdb, void* pstats,
stats.n_duplicates += recDeleteEvent[i].count-1;
}
}
-
+
return NDBT_OK;
}
int
HugoTransactions::pkReadRecords(Ndb* pNdb,
int records,
- int batchsize,
+ int batch,
NdbOperation::LockMode lm){
int reads = 0;
int r = 0;
int retryAttempt = 0;
const int retryMax = 100;
int check, a;
- NdbOperation *pOp;
- if (batchsize == 0) {
- g_info << "ERROR: Argument batchsize == 0 in pkReadRecords(). Not allowed." << endl;
+ if (batch == 0) {
+ g_info << "ERROR: Argument batch == 0 in pkReadRecords(). Not allowed." << endl;
return NDBT_FAILED;
}
- allocRows(batchsize);
-
while (r < records){
+ if(r + batch > records)
+ batch = records - r;
+
if (retryAttempt >= retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
}
-
+
pTrans = pNdb->startTransaction();
if (pTrans == NULL) {
const NdbError err = pNdb->getNdbError();
@@ -1405,64 +1085,18 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
ERR(err);
return NDBT_FAILED;
}
-
- for(int b=0; (b<batchsize) && (r+b < records); b++){
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- rand_lock_mode:
- switch(lm){
- case NdbOperation::LM_Read:
- check = pOp->readTuple();
- break;
- case NdbOperation::LM_Exclusive:
- check = pOp->readTupleExclusive();
- break;
- case NdbOperation::LM_CommittedRead:
- check = pOp->dirtyRead();
- break;
- default:
- lm = (NdbOperation::LockMode)((rand() >> 16) & 3);
- goto rand_lock_mode;
- }
-
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- // Define primary keys
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == true){
- if(equalForAttr(pOp, a, r+b) != 0){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
- }
-
- // Define attributes to read
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if((rows[b]->attributeStore(a) =
- pOp->getValue(tab.getColumn(a)->getName())) == 0) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
-
+ if(pkReadRecord(pNdb, r, batch, lm) != NDBT_OK)
+ {
+ ERR(pTrans->getNdbError());
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
}
-
+
check = pTrans->execute(Commit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
-
+
if (err.status == NdbError::TemporaryError){
ERR(err);
closeTransaction(pNdb);
@@ -1481,19 +1115,48 @@ HugoTransactions::pkReadRecords(Ndb* pNdb,
closeTransaction(pNdb);
return NDBT_FAILED;
}
- } else{
- for (int b=0; (b<batchsize) && (r+b<records); b++){
- if (calc.verifyRowValues(rows[b]) != 0){
+ } else {
+ if(pIndexScanOp)
+ {
+ int rows_found = 0;
+ while((check = pIndexScanOp->nextResult()) == 0)
+ {
+ rows_found++;
+ if (calc.verifyRowValues(rows[0]) != 0){
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+ }
+ if(check != 1 || rows_found > batch)
+ {
closeTransaction(pNdb);
return NDBT_FAILED;
}
- reads++;
- r++;
+ else if(rows_found < batch)
+ {
+ if(batch == 1){
+ g_info << r << ": not found" << endl; abort(); }
+ else
+ g_info << "Found " << rows_found << " of "
+ << batch << " rows" << endl;
+ }
+ r += batch;
+ reads += rows_found;
+ }
+ else
+ {
+ for (int b=0; (b<batch) && (r+b<records); b++){
+ if (calc.verifyRowValues(rows[b]) != 0){
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+ reads++;
+ r++;
+ }
}
}
-
+
closeTransaction(pNdb);
-
}
deallocRows();
g_info << reads << " records read" << endl;
@@ -1518,6 +1181,8 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
g_info << "|- Updating records (batch=" << batch << ")..." << endl;
while (r < records){
+ if(r + batch > records)
+ batch = records - r;
if (retryAttempt >= retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
@@ -1542,43 +1207,13 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(b = 0; b<batch && (r+b) < records; b++){
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->readTupleExclusive();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Define primary keys
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == true){
- if(equalForAttr(pOp, a, r+b) != 0){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
- }
-
- // Define attributes to read
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if((rows[b]->attributeStore(a) =
- pOp->getValue(tab.getColumn(a)->getName())) == 0) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
+ if(pkReadRecord(pNdb, r, batch, NdbOperation::LM_Exclusive) != NDBT_OK)
+ {
+ ERR(pTrans->getNdbError());
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
}
-
+
check = pTrans->execute(NoCommit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
@@ -1594,52 +1229,62 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
closeTransaction(pNdb);
return NDBT_FAILED;
}
-
- for(b = 0; b<batch && (b+r)<records; b++){
- if (calc.verifyRowValues(rows[b]) != 0){
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- int updates = calc.getUpdatesValue(rows[b]) + 1;
-
- NdbOperation* pUpdOp;
- pUpdOp = pTrans->getNdbOperation(tab.getName());
- if (pUpdOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pUpdOp->updateTuple();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == true){
- if(equalForAttr(pUpdOp, a, r+b) != 0){
- ERR(pTrans->getNdbError());
+
+ if(pIndexScanOp)
+ {
+ int rows_found = 0;
+ while((check = pIndexScanOp->nextResult(true)) == 0)
+ {
+ do {
+
+ if (calc.verifyRowValues(rows[0]) != 0){
closeTransaction(pNdb);
return NDBT_FAILED;
}
- }
- }
-
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == false){
- if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){
+
+ int updates = calc.getUpdatesValue(rows[0]) + 1;
+
+ if(pkUpdateRecord(pNdb, r+rows_found, 1, updates) != NDBT_OK)
+ {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
}
+ rows_found++;
+ } while((check = pIndexScanOp->nextResult(false)) == 0);
+
+ if(check != 2)
+ break;
+ if((check = pTrans->execute(NoCommit)) != 0)
+ break;
+ }
+ if(check != 1 || rows_found != batch)
+ {
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+ }
+ else
+ {
+ for(b = 0; b<batch && (b+r)<records; b++)
+ {
+ if (calc.verifyRowValues(rows[b]) != 0)
+ {
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
+ }
+
+ int updates = calc.getUpdatesValue(rows[b]) + 1;
+
+ if(pkUpdateRecord(pNdb, r+b, 1, updates) != NDBT_OK)
+ {
+ ERR(pTrans->getNdbError());
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
}
}
+ check = pTrans->execute(Commit);
}
-
- check = pTrans->execute(Commit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
@@ -1658,13 +1303,12 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb,
else{
updated += batch;
}
-
-
+
closeTransaction(pNdb);
-
+
r += batch; // Read next record
}
-
+
deallocRows();
g_info << "|- " << updated << " records updated" << endl;
return NDBT_OK;
@@ -1713,7 +1357,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb,
if( check == -1 ) {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
- return NDBT_FAILED;
+ return NDBT_FAILED;
}
// Define primary keys
@@ -1861,6 +1505,8 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
g_info << "|- Deleting records..." << endl;
while (r < records){
+ if(r + batch > records)
+ batch = records - r;
if (retryAttempt >= retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
@@ -1885,30 +1531,13 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->deleteTuple();
- if( check == -1 ) {
+ if(pkDeleteRecord(pNdb, r, batch) != NDBT_OK)
+ {
ERR(pTrans->getNdbError());
closeTransaction(pNdb);
return NDBT_FAILED;
}
- // Define primary keys
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == true){
- if(equalForAttr(pOp, a, r) != 0){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
- }
check = pTrans->execute(Commit);
if( check == -1) {
const NdbError err = pTrans->getNdbError();
@@ -1946,11 +1575,11 @@ HugoTransactions::pkDelRecords(Ndb* pNdb,
}
}
else {
- deleted++;
+ deleted += batch;
}
closeTransaction(pNdb);
-
- r++; // Read next record
+
+ r += batch; // Read next record
}
@@ -1972,6 +1601,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
const int retryMax = 100;
int check, a, b;
NdbOperation *pOp;
+ NdbOperation::LockMode lm = NdbOperation::LM_Exclusive;
// Calculate how many records to lock in each batch
if (percentToLock <= 0)
@@ -1984,6 +1614,9 @@ HugoTransactions::lockRecords(Ndb* pNdb,
allocRows(lockBatch);
while (r < records){
+ if(r + lockBatch > records)
+ lockBatch = records - r;
+
g_info << "|- Locking " << lockBatch << " records..." << endl;
if (retryAttempt >= retryMax){
@@ -2006,42 +1639,13 @@ HugoTransactions::lockRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(b = 0; (b<lockBatch) && (r+b < records); b++){
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- check = pOp->readTupleExclusive();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
-
- // Define primary keys
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey() == true){
- if(equalForAttr(pOp, a, r+b) != 0){
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
- }
-
- // Define attributes to read
- for(a = 0; a<tab.getNoOfColumns(); a++){
- if((rows[b]->attributeStore(a) =
- pOp->getValue(tab.getColumn(a)->getName())) == 0) {
- ERR(pTrans->getNdbError());
- closeTransaction(pNdb);
- return NDBT_FAILED;
- }
- }
+ if(pkReadRecord(pNdb, r, lockBatch, lm) != NDBT_OK)
+ {
+ ERR(pTrans->getNdbError());
+ closeTransaction(pNdb);
+ return NDBT_FAILED;
}
+
// NoCommit lockTime times with 100 millis interval
int sleepInterval = 50;
int lockCount = lockTime / sleepInterval;
@@ -2099,8 +1703,7 @@ HugoTransactions::lockRecords(Ndb* pNdb,
}
closeTransaction(pNdb);
-
-
+
}
deallocRows();
g_info << "|- Record locking completed" << endl;
@@ -2111,7 +1714,7 @@ int
HugoTransactions::indexReadRecords(Ndb* pNdb,
const char * idxName,
int records,
- int batchsize){
+ int batch){
int reads = 0;
int r = 0;
int retryAttempt = 0;
@@ -2119,24 +1722,23 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
int check, a;
NdbOperation *pOp;
NdbIndexScanOperation *sOp;
- NdbResultSet * rs;
const NdbDictionary::Index* pIndex
= pNdb->getDictionary()->getIndex(idxName, tab.getName());
const bool ordered = (pIndex->getType()==NdbDictionary::Index::OrderedIndex);
- if (batchsize == 0) {
- g_info << "ERROR: Argument batchsize == 0 in indexReadRecords(). "
+ if (batch == 0) {
+ g_info << "ERROR: Argument batch == 0 in indexReadRecords(). "
<< "Not allowed." << endl;
return NDBT_FAILED;
}
if (ordered) {
- batchsize = 1;
+ batch = 1;
}
- allocRows(batchsize);
+ allocRows(batch);
while (r < records){
if (retryAttempt >= retryMax){
@@ -2159,7 +1761,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(int b=0; (b<batchsize) && (r+b < records); b++){
+ for(int b=0; (b<batch) && (r+b < records); b++){
if(!ordered){
pOp = pTrans->getNdbIndexOperation(idxName, tab.getName());
if (pOp == NULL) {
@@ -2175,9 +1777,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
closeTransaction(pNdb);
return NDBT_FAILED;
}
-
- check = 0;
- rs = sOp->readTuples();
+ check = sOp->readTuples();
}
if( check == -1 ) {
@@ -2209,7 +1809,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
}
check = pTrans->execute(Commit);
- check = (check == -1 ? -1 : !ordered ? check : rs->nextResult(true));
+ check = (check == -1 ? -1 : !ordered ? check : sOp->nextResult(true));
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
@@ -2232,7 +1832,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
} else{
- for (int b=0; (b<batchsize) && (r+b<records); b++){
+ for (int b=0; (b<batch) && (r+b<records); b++){
if (calc.verifyRowValues(rows[b]) != 0){
closeTransaction(pNdb);
return NDBT_FAILED;
@@ -2240,7 +1840,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb,
reads++;
r++;
}
- if(ordered && rs->nextResult(true) == 0){
+ if(ordered && sOp->nextResult(true) == 0){
ndbout << "Error when comparing records "
<< " - index op next_result to many" << endl;
closeTransaction(pNdb);
@@ -2260,7 +1860,7 @@ int
HugoTransactions::indexUpdateRecords(Ndb* pNdb,
const char * idxName,
int records,
- int batchsize){
+ int batch){
int updated = 0;
int r = 0;
@@ -2269,17 +1869,16 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
int check, a, b;
NdbOperation *pOp;
NdbScanOperation * sOp;
- NdbResultSet * rs;
const NdbDictionary::Index* pIndex
= pNdb->getDictionary()->getIndex(idxName, tab.getName());
const bool ordered = (pIndex->getType()==NdbDictionary::Index::OrderedIndex);
if (ordered){
- batchsize = 1;
+ batch = 1;
}
- allocRows(batchsize);
+ allocRows(batch);
while (r < records){
if (retryAttempt >= retryMax){
@@ -2302,7 +1901,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- for(b = 0; b<batchsize && (b+r)<records; b++){
+ for(b = 0; b<batch && (b+r)<records; b++){
if(!ordered){
pOp = pTrans->getNdbIndexOperation(idxName, tab.getName());
if (pOp == NULL) {
@@ -2326,7 +1925,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
}
check = 0;
- rs = sOp->readTuplesExclusive();
+ sOp->readTuplesExclusive();
}
// Define primary keys
@@ -2352,7 +1951,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
}
check = pTrans->execute(NoCommit);
- check = (check == -1 ? -1 : !ordered ? check : rs->nextResult(true));
+ check = (check == -1 ? -1 : !ordered ? check : sOp->nextResult(true));
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
ERR(err);
@@ -2367,12 +1966,12 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
}
if(ordered && check != 0){
- g_err << "Row: " << r << " not found!!" << endl;
+ g_err << check << " - Row: " << r << " not found!!" << endl;
closeTransaction(pNdb);
return NDBT_FAILED;
}
- for(b = 0; b<batchsize && (b+r)<records; b++){
+ for(b = 0; b<batch && (b+r)<records; b++){
if (calc.verifyRowValues(rows[b]) != 0){
closeTransaction(pNdb);
return NDBT_FAILED;
@@ -2385,7 +1984,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
pUpdOp = pTrans->getNdbIndexOperation(idxName, tab.getName());
check = (pUpdOp == 0 ? -1 : pUpdOp->updateTuple());
} else {
- pUpdOp = rs->updateTuple();
+ pUpdOp = sOp->updateCurrentTuple();
}
if (pUpdOp == NULL) {
@@ -2437,12 +2036,12 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb,
ndbout << "r = " << r << endl;
return NDBT_FAILED;
} else {
- updated += batchsize;
+ updated += batch;
}
closeTransaction(pNdb);
- r+= batchsize; // Read next record
+ r+= batch; // Read next record
}
g_info << "|- " << updated << " records updated" << endl;
diff --git a/ndb/test/src/NDBT_ResultRow.cpp b/ndb/test/src/NDBT_ResultRow.cpp
index 11554b4a9b3..ab8d7b07ea1 100644
--- a/ndb/test/src/NDBT_ResultRow.cpp
+++ b/ndb/test/src/NDBT_ResultRow.cpp
@@ -84,7 +84,7 @@ NDBT_ResultRow::header (NdbOut & out) const {
return out;
}
-BaseString NDBT_ResultRow::c_str() {
+BaseString NDBT_ResultRow::c_str() const {
BaseString str;
@@ -137,3 +137,10 @@ NDBT_ResultRow::clone () const {
return row;
}
+
+bool
+NDBT_ResultRow::operator==(const NDBT_ResultRow& other) const
+{
+ // quick and dirty
+ return c_str() == other.c_str();
+}
diff --git a/ndb/test/src/NDBT_Tables.cpp b/ndb/test/src/NDBT_Tables.cpp
index ff6db3e892c..5a5fecd85c1 100644
--- a/ndb/test/src/NDBT_Tables.cpp
+++ b/ndb/test/src/NDBT_Tables.cpp
@@ -48,7 +48,7 @@ const
NDBT_Attribute T2Attribs[] = {
NDBT_Attribute("KOL1", NdbDictionary::Column::Bigunsigned, 1, true),
NDBT_Attribute("KOL2", NdbDictionary::Column::Unsigned),
- NDBT_Attribute("KOL3", NdbDictionary::Column::Unsigned),
+ NDBT_Attribute("KOL3", NdbDictionary::Column::Bit, 23),
NDBT_Attribute("KOL4", NdbDictionary::Column::Unsigned,
1, false, true), // Nullable
NDBT_Attribute("KOL5", NdbDictionary::Column::Unsigned)
@@ -820,21 +820,25 @@ NDBT_Tables::createAllTables(Ndb* pNdb){
int
NDBT_Tables::createTable(Ndb* pNdb, const char* _name, bool _temp,
- bool existsOk){
+ bool existsOk, NDBT_CreateTableHook f){
const NdbDictionary::Table* tab = NDBT_Tables::getTable(_name);
if (tab == NULL){
ndbout << "Could not create table " << _name
<< ", it doesn't exist in list of tables "\
- "that NDBT_Tables can create!" << endl;
+ "that NDBT_Tables can create!" << endl;
return NDBT_WRONGARGS;
}
-
+
int r = 0;
do {
NdbDictionary::Table tmpTab(* tab);
tmpTab.setStoredTable(_temp ? 0 : 1);
-
+ if(f != 0 && f(pNdb, tmpTab, 0))
+ {
+ ndbout << "Failed to create table" << endl;
+ return NDBT_FAILED;
+ }
r = pNdb->getDictionary()->createTable(tmpTab);
if(r == -1){
if(!existsOk){
@@ -883,6 +887,11 @@ NDBT_Tables::createTable(Ndb* pNdb, const char* _name, bool _temp,
}
}
}
+ if(f != 0 && f(pNdb, tmpTab, 1))
+ {
+ ndbout << "Failed to create table" << endl;
+ return NDBT_FAILED;
+ }
} while(false);
return r;
diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp
index 42bae193b35..8fecf56531f 100644
--- a/ndb/test/src/NDBT_Test.cpp
+++ b/ndb/test/src/NDBT_Test.cpp
@@ -15,6 +15,7 @@
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
#include <ndb_global.h>
+#include <my_pthread.h>
#include "NDBT.hpp"
#include "NDBT_Test.hpp"
@@ -26,7 +27,9 @@
// No verbose outxput
-NDBT_Context::NDBT_Context(){
+NDBT_Context::NDBT_Context(Ndb_cluster_connection& con)
+ : m_cluster_connection(con)
+{
tab = NULL;
suite = NULL;
testcase = NULL;
@@ -248,7 +251,7 @@ int NDBT_Step::execute(NDBT_Context* ctx) {
g_info << " |- " << name << " started [" << ctx->suite->getDate() << "]"
<< endl;
- result = setUp();
+ result = setUp(ctx->m_cluster_connection);
if (result != NDBT_OK){
return result;
}
@@ -288,10 +291,10 @@ NDBT_NdbApiStep::NDBT_NdbApiStep(NDBT_TestCase* ptest,
int
-NDBT_NdbApiStep::setUp(){
- ndb = new Ndb( "TEST_DB" );
+NDBT_NdbApiStep::setUp(Ndb_cluster_connection& con){
+ ndb = new Ndb(&con, "TEST_DB" );
ndb->init(1024);
-
+
int result = ndb->waitUntilReady(300); // 5 minutes
if (result != 0){
g_err << name << ": Ndb was not ready" << endl;
@@ -626,7 +629,6 @@ int NDBT_TestCase::execute(NDBT_Context* ctx){
return res;
}
-
void NDBT_TestCase::startTimer(NDBT_Context* ctx){
timer.doStart();
}
@@ -757,14 +759,15 @@ int NDBT_TestSuite::addTest(NDBT_TestCase* pTest){
return 0;
}
-int NDBT_TestSuite::executeAll(const char* _testname){
+int NDBT_TestSuite::executeAll(Ndb_cluster_connection& con,
+ const char* _testname){
if(tests.size() == 0)
return NDBT_FAILED;
- Ndb ndb("TEST_DB");
+ Ndb ndb(&con, "TEST_DB");
ndb.init(1024);
- int result = ndb.waitUntilReady(300); // 5 minutes
+ int result = ndb.waitUntilReady(500); // 5 minutes
if (result != 0){
g_err << name <<": Ndb was not ready" << endl;
return NDBT_FAILED;
@@ -777,18 +780,19 @@ int NDBT_TestSuite::executeAll(const char* _testname){
for (int t=0; t < NDBT_Tables::getNumTables(); t++){
const NdbDictionary::Table* ptab = NDBT_Tables::getTable(t);
ndbout << "|- " << ptab->getName() << endl;
- execute(&ndb, ptab, _testname);
+ execute(con, &ndb, ptab, _testname);
}
testSuiteTimer.doStop();
return reportAllTables(_testname);
}
int
-NDBT_TestSuite::executeOne(const char* _tabname, const char* _testname){
+NDBT_TestSuite::executeOne(Ndb_cluster_connection& con,
+ const char* _tabname, const char* _testname){
if(tests.size() == 0)
return NDBT_FAILED;
- Ndb ndb("TEST_DB");
+ Ndb ndb(&con, "TEST_DB");
ndb.init(1024);
int result = ndb.waitUntilReady(300); // 5 minutes
@@ -805,7 +809,7 @@ NDBT_TestSuite::executeOne(const char* _tabname, const char* _testname){
ndbout << "|- " << ptab->getName() << endl;
- execute(&ndb, ptab, _testname);
+ execute(con, &ndb, ptab, _testname);
if (numTestsFail > 0){
return NDBT_FAILED;
@@ -814,7 +818,8 @@ NDBT_TestSuite::executeOne(const char* _tabname, const char* _testname){
}
}
-void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab,
+void NDBT_TestSuite::execute(Ndb_cluster_connection& con,
+ Ndb* ndb, const NdbDictionary::Table* pTab,
const char* _testname){
int result;
@@ -856,14 +861,14 @@ void NDBT_TestSuite::execute(Ndb* ndb, const NdbDictionary::Table* pTab,
pTab2 = pTab;
}
- ctx = new NDBT_Context();
+ ctx = new NDBT_Context(con);
ctx->setTab(pTab2);
ctx->setNumRecords(records);
ctx->setNumLoops(loops);
if(remote_mgm != NULL)
ctx->setRemoteMgm(remote_mgm);
ctx->setSuite(this);
-
+
result = tests[t]->execute(ctx);
tests[t]->saveTestResult(pTab, result);
if (result != NDBT_OK)
@@ -1035,14 +1040,19 @@ int NDBT_TestSuite::execute(int argc, const char** argv){
loops = _loops;
timer = _timer;
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1))
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
if(optind == argc){
// No table specified
- res = executeAll(_testname);
+ res = executeAll(con, _testname);
} else {
testSuiteTimer.doStart();
- Ndb ndb("TEST_DB"); ndb.init();
for(int i = optind; i<argc; i++){
- executeOne(argv[i], _testname);
+ executeOne(con, argv[i], _testname);
}
testSuiteTimer.doStop();
res = report(_testname);
diff --git a/ndb/test/src/NdbSchemaOp.cpp b/ndb/test/src/NdbSchemaOp.cpp
index a296094ea9d..4281ceb02c8 100644
--- a/ndb/test/src/NdbSchemaOp.cpp
+++ b/ndb/test/src/NdbSchemaOp.cpp
@@ -113,7 +113,7 @@ NdbSchemaOp::createAttribute( const char* anAttrName,
AttrType anAttrType,
StorageMode aStorageMode,
bool nullable,
- StorageAttributeType aStorageAttr,
+ int aStorageAttr,
int aDistributionKeyFlag,
int aDistributionGroupFlag,
int aDistributionGroupNoOfBits,
@@ -158,7 +158,6 @@ NdbSchemaOp::createAttribute( const char* anAttrName,
col.setPrimaryKey(false);
col.setDistributionKey(aDistributionKeyFlag);
- col.setDistributionGroup(aDistributionGroupFlag,aDistributionGroupNoOfBits);
col.setAutoIncrement(aAutoIncrement);
col.setDefaultValue(aDefaultValue != 0 ? aDefaultValue : "");
diff --git a/ndb/test/src/UtilTransactions.cpp b/ndb/test/src/UtilTransactions.cpp
index 92073143d34..31c323045ed 100644
--- a/ndb/test/src/UtilTransactions.cpp
+++ b/ndb/test/src/UtilTransactions.cpp
@@ -20,13 +20,20 @@
#define VERBOSE 0
-UtilTransactions::UtilTransactions(const NdbDictionary::Table& _tab):
- tab(_tab){
+UtilTransactions::UtilTransactions(const NdbDictionary::Table& _tab,
+ const NdbDictionary::Index* _idx):
+ tab(_tab), idx(_idx), pTrans(0)
+{
m_defaultClearMethod = 3;
}
-UtilTransactions::UtilTransactions(Ndb* ndb, const char * name) :
- tab(* ndb->getDictionary()->getTable(name)){
+UtilTransactions::UtilTransactions(Ndb* ndb,
+ const char * name,
+ const char * index) :
+ tab(* ndb->getDictionary()->getTable(name)),
+ idx(index ? ndb->getDictionary()->getIndex(index, name) : 0),
+ pTrans(0)
+{
m_defaultClearMethod = 3;
}
@@ -51,313 +58,29 @@ UtilTransactions::clearTable(Ndb* pNdb,
int
UtilTransactions::clearTable1(Ndb* pNdb,
int records,
- int parallelism){
-#if 1
+ int parallelism)
+{
return clearTable3(pNdb, records, 1);
-#else
- // Scan all records exclusive and delete
- // them one by one
- int retryAttempt = 0;
- const int retryMax = 100;
- int check;
- NdbConnection *pTrans;
- NdbOperation *pOp;
-
- while (true){
-
- if (retryAttempt >= retryMax){
- g_info << "ERROR: Has retried this operation " << retryAttempt
- << " times, failing!" << endl;
- return NDBT_FAILED;
- }
-
-
- pTrans = pNdb->startTransaction();
- if (pTrans == NULL) {
- NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- RETURN_FAIL(err);
- }
-
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- NdbError err = pNdb->getNdbError();
- ERR(err);
- pNdb->closeTransaction(pTrans);
- RETURN_FAIL(err);
- }
-
- check = pOp->openScanExclusive(parallelism);
- if( check == -1 ) {
- NdbError err = pNdb->getNdbError();
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- RETURN_FAIL(err);
- }
-
- check = pOp->interpret_exit_ok();
- if( check == -1 ) {
- NdbError err = pNdb->getNdbError();
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- RETURN_FAIL(err);
- }
-#if 0
- // It's not necessary to read and PK's
- // Information about the PK's are sent in
- // KEYINFO20 signals anyway and used by takeOverScan
-
- // Read the primary keys from this table
- for(int a=0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey()){
- if(pOp->getValue(tab.getColumn(a)->getName()) == NULL){
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- RETURN_FAIL(err);
- }
- }
- }
-#endif
-
- check = pTrans->executeScan();
- if( check == -1 ) {
- NdbError err = pTrans->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- pNdb->closeTransaction(pTrans);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- pNdb->closeTransaction(pTrans);
- RETURN_FAIL(err);
- }
-
- int eof;
- int rows = 0;
-
- eof = pTrans->nextScanResult();
- while(eof == 0){
- rows++;
-
- int res = takeOverAndDeleteRecord(pNdb, pOp);
- if(res == RESTART_SCAN){
- eof = -2;
- continue;
- }
-
- if (res != 0){
- NdbError err = pNdb->getNdbError(res);
- pNdb->closeTransaction(pTrans);
- RETURN_FAIL(err);
- }
-
- eof = pTrans->nextScanResult();
- }
-
- if (eof == -1) {
- const NdbError err = pTrans->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- pNdb->closeTransaction(pTrans);
- NdbSleep_MilliSleep(50);
- // If error = 488 there should be no limit on number of retry attempts
- if (err.code != 488)
- retryAttempt++;
- continue;
- }
- ERR(err);
- pNdb->closeTransaction(pTrans);
- RETURN_FAIL(err);
- }
-
- if(eof == -2){
- pNdb->closeTransaction(pTrans);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
-
- pNdb->closeTransaction(pTrans);
-
- g_info << rows << " deleted" << endl;
-
- return NDBT_OK;
- }
- return NDBT_FAILED;
-#endif
}
int
UtilTransactions::clearTable2(Ndb* pNdb,
- int records,
- int parallelism){
-#if 1
+ int records,
+ int parallelism)
+{
return clearTable3(pNdb, records, parallelism);
-#else
- // Scan all records exclusive and delete
- // them one by one
- int retryAttempt = 0;
- const int retryMax = 10;
- int deletedRows = 0;
- int check;
- NdbConnection *pTrans;
- NdbOperation *pOp;
-
- while (true){
-
- if (retryAttempt >= retryMax){
- g_info << "ERROR: has retried this operation " << retryAttempt
- << " times, failing!" << endl;
- return NDBT_FAILED;
- }
-
-
- pTrans = pNdb->startTransaction();
- if (pTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
- ERR(err);
- return NDBT_FAILED;
- }
-
- pOp = pTrans->getNdbOperation(tab.getName());
- if (pOp == NULL) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
-
- check = pOp->openScanExclusive(parallelism);
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
-
- check = pOp->interpret_exit_ok();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
-#if 0
- // It's not necessary to read any PK's
- // Information about the PK's are sent in
- // KEYINFO20 signals anyway and used by takeOverScan
-
- // Read the primary keys from this table
- for(int a=0; a<tab.getNoOfColumns(); a++){
- if (tab.getColumn(a)->getPrimaryKey()){
- if(pOp->getValue(tab.getColumn(a)->getName()) == NULL){
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return -1;
- }
- }
- }
-#endif
-
- check = pTrans->executeScan();
- if( check == -1 ) {
- ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
-
- int eof;
- NdbConnection* pDelTrans;
-
- while((eof = pTrans->nextScanResult(true)) == 0){
- pDelTrans = pNdb->startTransaction();
- if (pDelTrans == NULL) {
- const NdbError err = pNdb->getNdbError();
-#if 0
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- NdbSleep_MilliSleep(50);
- retryAttempt++;
- continue;
- }
-#endif
- ERR(err);
- pNdb->closeTransaction(pDelTrans);
- return NDBT_FAILED;
- }
- do {
- deletedRows++;
- if (addRowToDelete(pNdb, pDelTrans, pOp) != 0){
- pNdb->closeTransaction(pDelTrans);
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- } while((eof = pTrans->nextScanResult(false)) == 0);
-
- check = pDelTrans->execute(Commit);
- if( check == -1 ) {
- const NdbError err = pDelTrans->getNdbError();
- ERR(err);
- pNdb->closeTransaction(pDelTrans);
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
- pNdb->closeTransaction(pDelTrans);
-
- }
- if (eof == -1) {
- const NdbError err = pTrans->getNdbError();
-
- if (err.status == NdbError::TemporaryError){
- ERR(err);
- pNdb->closeTransaction(pTrans);
- NdbSleep_MilliSleep(50);
- // If error = 488 there should be no limit on number of retry attempts
- if (err.code != 488)
- retryAttempt++;
- continue;
- }
- ERR(err);
- pNdb->closeTransaction(pTrans);
- return NDBT_FAILED;
- }
-
- pNdb->closeTransaction(pTrans);
-
- g_info << deletedRows << " rows deleted" << endl;
-
- return NDBT_OK;
- }
- return NDBT_FAILED;
-#endif
}
int
UtilTransactions::clearTable3(Ndb* pNdb,
- int records,
- int parallelism){
+ int records,
+ int parallelism){
// Scan all records exclusive and delete
// them one by one
int retryAttempt = 0;
const int retryMax = 10;
int deletedRows = 0;
int check;
- NdbConnection *pTrans;
NdbScanOperation *pOp;
NdbError err;
@@ -380,13 +103,13 @@ UtilTransactions::clearTable3(Ndb* pNdb,
}
goto failed;
}
-
- pOp = pTrans->getNdbScanOperation(tab.getName());
+
+ pOp = getScanOperation(pTrans);
if (pOp == NULL) {
err = pTrans->getNdbError();
if(err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
par = 1;
goto restart;
@@ -394,8 +117,7 @@ UtilTransactions::clearTable3(Ndb* pNdb,
goto failed;
}
- NdbResultSet * rs = pOp->readTuplesExclusive(par);
- if( rs == 0 ) {
+ if( pOp->readTuplesExclusive(par) ) {
err = pTrans->getNdbError();
goto failed;
}
@@ -404,20 +126,20 @@ UtilTransactions::clearTable3(Ndb* pNdb,
err = pTrans->getNdbError();
if(err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
continue;
}
goto failed;
}
- while((check = rs->nextResult(true)) == 0){
+ while((check = pOp->nextResult(true)) == 0){
do {
- if (rs->deleteTuple() != 0){
+ if (pOp->deleteCurrentTuple() != 0){
goto failed;
}
deletedRows++;
- } while((check = rs->nextResult(false)) == 0);
+ } while((check = pOp->nextResult(false)) == 0);
if(check != -1){
check = pTrans->execute(Commit);
@@ -428,7 +150,7 @@ UtilTransactions::clearTable3(Ndb* pNdb,
if(check == -1){
if(err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
par = 1;
goto restart;
@@ -440,20 +162,20 @@ UtilTransactions::clearTable3(Ndb* pNdb,
err = pTrans->getNdbError();
if(err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
par = 1;
goto restart;
}
goto failed;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_OK;
}
return NDBT_FAILED;
failed:
- if(pTrans != 0) pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
ERR(err);
return (err.code != 0 ? err.code : NDBT_FAILED);
}
@@ -468,7 +190,6 @@ UtilTransactions::copyTableData(Ndb* pNdb,
int insertedRows = 0;
int parallelism = 240;
int check;
- NdbConnection *pTrans;
NdbScanOperation *pOp;
NDBT_ResultRow row(tab);
@@ -498,22 +219,20 @@ UtilTransactions::copyTableData(Ndb* pNdb,
pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- NdbResultSet* rs = pOp->readTuples(NdbScanOperation::LM_Read,
- parallelism);
- if( check == -1 ) {
+ if( pOp->readTuples(NdbScanOperation::LM_Read, parallelism) ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -522,7 +241,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
if ((row.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -530,26 +249,26 @@ UtilTransactions::copyTableData(Ndb* pNdb,
check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
int eof;
- while((eof = rs->nextResult(true)) == 0){
+ while((eof = pOp->nextResult(true)) == 0){
do {
insertedRows++;
if (addRowToInsert(pNdb, pTrans, row, destName) != 0){
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- } while((eof = rs->nextResult(false)) == 0);
+ } while((eof = pOp->nextResult(false)) == 0);
check = pTrans->execute(Commit);
pTrans->restart();
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -558,7 +277,7 @@ UtilTransactions::copyTableData(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
// If error = 488 there should be no limit on number of retry attempts
if (err.code != 488)
@@ -566,11 +285,11 @@ UtilTransactions::copyTableData(Ndb* pNdb,
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
g_info << insertedRows << " rows copied" << endl;
@@ -628,7 +347,6 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
int retryAttempt = 0;
const int retryMax = 100;
int check;
- NdbConnection *pTrans;
NdbScanOperation *pOp;
NDBT_ResultRow row(tab);
@@ -654,10 +372,10 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- pOp = pTrans->getNdbScanOperation(tab.getName());
+ pOp = getScanOperation(pTrans);
if (pOp == NULL) {
const NdbError err = pNdb->getNdbError();
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
if (err.status == NdbError::TemporaryError){
ERR(err);
@@ -669,17 +387,16 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples(lm, 0, parallelism);
- if( rs == 0 ) {
+ if( pOp->readTuples(lm, 0, parallelism) ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -691,7 +408,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
if ((row.attributeStore(attrib_list[a]) =
pOp->getValue(tab.getColumn(attrib_list[a])->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -704,13 +421,13 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -718,7 +435,7 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
int rows = 0;
- while((eof = rs->nextResult()) == 0){
+ while((eof = pOp->nextResult()) == 0){
rows++;
// Call callback for each record returned
@@ -730,17 +447,17 @@ UtilTransactions::scanReadRecords(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
g_info << rows << " rows have been read" << endl;
if (records != 0 && rows != records){
g_info << "Check expected number of records failed" << endl
@@ -766,37 +483,26 @@ UtilTransactions::selectCount(Ndb* pNdb,
int check;
NdbScanOperation *pOp;
+ if(!pTrans)
+ pTrans = pNdb->startTransaction();
+
while (true){
+
if (retryAttempt >= retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
return NDBT_FAILED;
}
- if(!pTrans)
- pTrans = pNdb->startTransaction();
-
- if(!pTrans)
- {
- const NdbError err = pNdb->getNdbError();
-
- if (err.status == NdbError::TemporaryError)
- continue;
- return NDBT_FAILED;
- }
-
- pOp = pTrans->getNdbScanOperation(tab.getName());
+ pOp = getScanOperation(pTrans);
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- pTrans = 0;
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples(lm);
- if( rs == 0) {
+ if( pOp->readTuples(lm) ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- pTrans = 0;
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -809,8 +515,7 @@ UtilTransactions::selectCount(Ndb* pNdb,
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- pTrans = 0;
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -819,8 +524,7 @@ UtilTransactions::selectCount(Ndb* pNdb,
check = pTrans->execute(NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
- pTrans = 0;
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -828,27 +532,24 @@ UtilTransactions::selectCount(Ndb* pNdb,
int rows = 0;
- while((eof = rs->nextResult()) == 0){
+ while((eof = pOp->nextResult()) == 0){
rows++;
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
if (err.status == NdbError::TemporaryError){
- pNdb->closeTransaction(pTrans);
- pTrans = 0;
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
- pTrans = 0;
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
- pTrans = 0;
+ closeTransaction(pNdb);
if (count_rows != NULL){
*count_rows = rows;
@@ -922,14 +623,13 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
int retryAttempt = 0;
const int retryMax = 100;
int check;
- NdbConnection *pTrans;
NdbScanOperation *pOp;
NDBT_ResultRow row(tab);
parallelism = 1;
while (true){
-
+restart:
if (retryAttempt >= retryMax){
g_info << "ERROR: has retried this operation " << retryAttempt
<< " times, failing!" << endl;
@@ -953,7 +653,7 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
const NdbError err = pNdb->getNdbError();
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
ERR(err);
if (err.status == NdbError::TemporaryError){
@@ -964,23 +664,23 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
return NDBT_FAILED;
}
- NdbResultSet* rs;
+ int rs;
if(transactional){
rs = pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism);
} else {
rs = pOp->readTuples(NdbScanOperation::LM_CommittedRead, 0, parallelism);
}
- if( rs == 0 ) {
+ if( rs != 0 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -989,7 +689,7 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
if ((row.attributeStore(a) =
pOp->getValue(tab.getColumn(a)->getName())) == 0) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1000,13 +700,13 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1014,17 +714,32 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
int rows = 0;
- while((eof = rs->nextResult()) == 0){
+ while((eof = pOp->nextResult()) == 0){
rows++;
// ndbout << row.c_str().c_str() << endl;
-
if (readRowFromTableAndIndex(pNdb,
pTrans,
pIndex,
row) != NDBT_OK){
- pNdb->closeTransaction(pTrans);
+
+ while((eof= pOp->nextResult(false)) == 0);
+ if(eof == 2)
+ eof = pOp->nextResult(true); // this should give -1
+ if(eof == -1)
+ {
+ const NdbError err = pTrans->getNdbError();
+
+ if (err.status == NdbError::TemporaryError){
+ ERR(err);
+ closeTransaction(pNdb);
+ NdbSleep_MilliSleep(50);
+ retryAttempt++;
+ goto restart;
+ }
+ }
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1033,18 +748,17 @@ UtilTransactions::scanAndCompareUniqueIndex(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
- rows--;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_OK;
}
@@ -1062,7 +776,6 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
const int retryMax = 100;
int check, a;
NdbConnection *pTrans1=NULL;
- NdbResultSet *cursor= NULL;
NdbOperation *pOp;
int return_code= NDBT_FAILED;
@@ -1112,7 +825,6 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
check = pOp->readTuple();
if( check == -1 ) {
ERR(pTrans1->getNdbError());
- pNdb->closeTransaction(pTrans1);
goto close_all;
}
@@ -1190,7 +902,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
if (pIndexOp) {
not_ok = pIndexOp->readTuple() == -1;
} else {
- not_ok = (cursor= pScanOp->readTuples()) == 0;
+ not_ok = pScanOp->readTuples();
}
if( not_ok ) {
@@ -1244,7 +956,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
#if VERBOSE
printf("\n");
#endif
-
+ scanTrans->refresh();
check = pTrans1->execute(Commit);
if( check == -1 ) {
const NdbError err = pTrans1->getNdbError();
@@ -1267,7 +979,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
*/
if(!null_found){
if (pScanOp) {
- if (cursor->nextResult() != 0){
+ if (pScanOp->nextResult() != 0){
const NdbError err = pTrans1->getNdbError();
ERR(err);
ndbout << "Error when comparing records - index op next_result missing" << endl;
@@ -1282,7 +994,7 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
goto close_all;
}
if (pScanOp) {
- if (cursor->nextResult() == 0){
+ if (pScanOp->nextResult() == 0){
ndbout << "Error when comparing records - index op next_result to many" << endl;
ndbout << "row: " << row.c_str().c_str() << endl;
goto close_all;
@@ -1294,8 +1006,6 @@ UtilTransactions::readRowFromTableAndIndex(Ndb* pNdb,
}
close_all:
- if (cursor)
- cursor->close();
if (pTrans1)
pNdb->closeTransaction(pTrans1);
@@ -1311,10 +1021,8 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
int retryAttempt = 0;
const int retryMax = 100;
int check;
- NdbConnection *pTrans;
NdbScanOperation *pOp;
NdbIndexScanOperation * iop = 0;
- NdbResultSet* cursor= 0;
NDBT_ResultRow scanRow(tab);
NDBT_ResultRow pkRow(tab);
@@ -1349,23 +1057,20 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
pOp = pTrans->getNdbScanOperation(tab.getName());
if (pOp == NULL) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- NdbResultSet*
- rs = pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism);
-
- if( rs == 0 ) {
+ if( pOp->readTuples(NdbScanOperation::LM_Read, 0, parallelism) ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
check = pOp->interpret_exit_ok();
if( check == -1 ) {
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1380,19 +1085,19 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
int eof;
int rows = 0;
- while(check == 0 && (eof = rs->nextResult()) == 0){
+ while(check == 0 && (eof = pOp->nextResult()) == 0){
rows++;
bool null_found= false;
@@ -1417,10 +1122,11 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
if(!iop && (iop= pTrans->getNdbIndexScanOperation(indexName,
tab.getName())))
{
- cursor= iop->readTuples(NdbScanOperation::LM_CommittedRead,
- parallelism);
+ if(iop->readTuples(NdbScanOperation::LM_CommittedRead,
+ parallelism))
+ goto error;
iop->interpret_exit_ok();
- if(!cursor || get_values(iop, indexRow))
+ if(get_values(iop, indexRow))
goto error;
}
else if(!iop || iop->reset_bounds())
@@ -1440,17 +1146,17 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
g_err << "Error when comapring records" << endl;
g_err << " scanRow: \n" << scanRow.c_str().c_str() << endl;
g_err << " pkRow: \n" << pkRow.c_str().c_str() << endl;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
if(!null_found)
{
- if((res= cursor->nextResult()) != 0){
+ if((res= iop->nextResult()) != 0){
g_err << "Failed to find row using index: " << res << endl;
ERR(pTrans->getNdbError());
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
@@ -1458,14 +1164,14 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
g_err << "Error when comapring records" << endl;
g_err << " scanRow: \n" << scanRow.c_str().c_str() << endl;
g_err << " indexRow: \n" << indexRow.c_str().c_str() << endl;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- if(cursor->nextResult() == 0){
+ if(iop->nextResult() == 0){
g_err << "Found extra row!!" << endl;
g_err << " indexRow: \n" << indexRow.c_str().c_str() << endl;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
}
@@ -1478,18 +1184,18 @@ UtilTransactions::verifyOrderedIndex(Ndb* pNdb,
if (err.status == NdbError::TemporaryError){
ERR(err);
iop = 0;
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
NdbSleep_MilliSleep(50);
retryAttempt++;
rows--;
continue;
}
ERR(err);
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_FAILED;
}
- pNdb->closeTransaction(pTrans);
+ closeTransaction(pNdb);
return NDBT_OK;
}
@@ -1537,3 +1243,218 @@ UtilTransactions::equal(const NdbDictionary::Table* pTable,
}
return 0;
}
+
+NdbScanOperation*
+UtilTransactions::getScanOperation(NdbConnection* pTrans)
+{
+ return (NdbScanOperation*)
+ getOperation(pTrans, NdbOperation::OpenScanRequest);
+}
+
+NdbOperation*
+UtilTransactions::getOperation(NdbConnection* pTrans,
+ NdbOperation::OperationType type)
+{
+ switch(type){
+ case NdbOperation::ReadRequest:
+ case NdbOperation::ReadExclusive:
+ if(idx)
+ {
+ switch(idx->getType()){
+ case NdbDictionary::Index::UniqueHashIndex:
+ return pTrans->getNdbIndexOperation(idx->getName(), tab.getName());
+ case NdbDictionary::Index::OrderedIndex:
+ return pTrans->getNdbIndexScanOperation(idx->getName(), tab.getName());
+ }
+ }
+ case NdbOperation::InsertRequest:
+ case NdbOperation::WriteRequest:
+ return pTrans->getNdbOperation(tab.getName());
+ case NdbOperation::UpdateRequest:
+ case NdbOperation::DeleteRequest:
+ if(idx)
+ {
+ switch(idx->getType()){
+ case NdbDictionary::Index::UniqueHashIndex:
+ return pTrans->getNdbIndexOperation(idx->getName(), tab.getName());
+ }
+ }
+ return pTrans->getNdbOperation(tab.getName());
+ case NdbOperation::OpenScanRequest:
+ if(idx)
+ {
+ switch(idx->getType()){
+ case NdbDictionary::Index::OrderedIndex:
+ return pTrans->getNdbIndexScanOperation(idx->getName(), tab.getName());
+ }
+ }
+ return pTrans->getNdbScanOperation(tab.getName());
+ case NdbOperation::OpenRangeScanRequest:
+ if(idx)
+ {
+ switch(idx->getType()){
+ case NdbDictionary::Index::OrderedIndex:
+ return pTrans->getNdbIndexScanOperation(idx->getName(), tab.getName());
+ }
+ }
+ return 0;
+ }
+}
+
+#include <HugoOperations.hpp>
+
+int
+UtilTransactions::closeTransaction(Ndb* pNdb)
+{
+ if (pTrans != NULL){
+ pNdb->closeTransaction(pTrans);
+ pTrans = NULL;
+ }
+ return 0;
+}
+
+int
+UtilTransactions::compare(Ndb* pNdb, const char* tab_name2, int flags){
+
+
+ NdbError err;
+ int return_code= -1, row_count= 0;
+ int retryAttempt = 0, retryMax = 10;
+
+ HugoCalculator calc(tab);
+ NDBT_ResultRow row(tab);
+ const NdbDictionary::Table* tmp= pNdb->getDictionary()->getTable(tab_name2);
+ if(tmp == 0)
+ {
+ g_err << "Unable to lookup table: " << tab_name2
+ << endl << pNdb->getDictionary()->getNdbError() << endl;
+ return -1;
+ }
+ const NdbDictionary::Table& tab2= *tmp;
+
+ HugoOperations cmp(tab2);
+ UtilTransactions count(tab2);
+
+ while (true){
+
+ if (retryAttempt++ >= retryMax){
+ g_info << "ERROR: has retried this operation " << retryAttempt
+ << " times, failing!" << endl;
+ return -1;
+ }
+
+ NdbScanOperation *pOp= 0;
+ pTrans = pNdb->startTransaction();
+ if (pTrans == NULL) {
+ err = pNdb->getNdbError();
+ goto error;
+ }
+
+ pOp= pTrans->getNdbScanOperation(tab.getName());
+ if (pOp == NULL) {
+ ERR(err= pTrans->getNdbError());
+ goto error;
+ }
+
+ if( pOp->readTuples(NdbScanOperation::LM_Read) ) {
+ ERR(err= pTrans->getNdbError());
+ goto error;
+ }
+
+ if( pOp->interpret_exit_ok() == -1 ) {
+ ERR(err= pTrans->getNdbError());
+ goto error;
+ }
+
+ // Read all attributes
+ {
+ for (int a = 0; a < tab.getNoOfColumns(); a++){
+ if ((row.attributeStore(a) =
+ pOp->getValue(tab.getColumn(a)->getName())) == 0) {
+ ERR(err= pTrans->getNdbError());
+ goto error;
+ }
+ }
+ }
+
+ if( pTrans->execute(NoCommit) == -1 ) {
+ ERR(err= pTrans->getNdbError());
+ goto error;
+ }
+
+ {
+ int eof;
+ while((eof = pOp->nextResult(true)) == 0)
+ {
+ do {
+ row_count++;
+ if(cmp.startTransaction(pNdb) != NDBT_OK)
+ {
+ ERR(err= pNdb->getNdbError());
+ goto error;
+ }
+ int rowNo= calc.getIdValue(&row);
+ if(cmp.pkReadRecord(pNdb, rowNo, 1) != NDBT_OK)
+ {
+ ERR(err= cmp.getTransaction()->getNdbError());
+ goto error;
+ }
+ if(cmp.execute_Commit(pNdb) != NDBT_OK)
+ {
+ ERR(err= cmp.getTransaction()->getNdbError());
+ goto error;
+ }
+ if(row != cmp.get_row(0))
+ {
+ g_err << "COMPARE FAILED" << endl;
+ g_err << row << endl;
+ g_err << cmp.get_row(0) << endl;
+ return_code= 1;
+ goto close;
+ }
+ retryAttempt= 0;
+ cmp.closeTransaction(pNdb);
+ } while((eof = pOp->nextResult(false)) == 0);
+ }
+ if (eof == -1)
+ {
+ err = pTrans->getNdbError();
+ goto error;
+ }
+ }
+
+ pTrans->close(); pTrans= 0;
+
+ g_info << row_count << " rows compared" << endl;
+ {
+ int row_count2;
+ if(count.selectCount(pNdb, 0, &row_count2) != NDBT_OK)
+ {
+ g_err << "Failed to count rows in tab_name2" << endl;
+ return -1;
+ }
+
+ g_info << row_count2 << " rows in tab_name2" << endl;
+ return (row_count == row_count2 ? 0 : 1);
+ }
+error:
+ if(err.status == NdbError::TemporaryError)
+ {
+ NdbSleep_MilliSleep(50);
+ if(pTrans != 0)
+ {
+ pTrans->close();
+ pTrans= 0;
+ }
+ if(cmp.getTransaction())
+ cmp.closeTransaction(pNdb);
+ continue;
+ }
+ break;
+ }
+
+close:
+ if(pTrans != 0) pTrans->close();
+
+ return return_code;
+}
diff --git a/ndb/test/tools/Makefile.am b/ndb/test/tools/Makefile.am
index a6a013bb263..873136e254d 100644
--- a/ndb/test/tools/Makefile.am
+++ b/ndb/test/tools/Makefile.am
@@ -1,9 +1,8 @@
-ndbtest_PROGRAMS = hugoCalculator hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc
+ndbtest_PROGRAMS = hugoLoad hugoFill hugoLockRecords hugoPkDelete hugoPkRead hugoPkReadRecord hugoPkUpdate hugoScanRead hugoScanUpdate restart verify_index copy_tab create_index ndb_cpcc
# transproxy
-hugoCalculator_SOURCES = hugoCalculator.cpp
hugoFill_SOURCES = hugoFill.cpp
hugoLoad_SOURCES = hugoLoad.cpp
hugoLockRecords_SOURCES = hugoLockRecords.cpp
diff --git a/ndb/test/tools/copy_tab.cpp b/ndb/test/tools/copy_tab.cpp
index 30141acaa78..97370b170ef 100644
--- a/ndb/test/tools/copy_tab.cpp
+++ b/ndb/test/tools/copy_tab.cpp
@@ -56,9 +56,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
_to_tabname = argv[optind+1];
- if (_connectstr)
- Ndb::setConnectString(_connectstr);
- Ndb MyNdb(_dbname);
+ Ndb_cluster_connection con(_connectstr);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con,_dbname);
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/tools/create_index.cpp b/ndb/test/tools/create_index.cpp
index 6e4c5377f4a..9f9c26aa0da 100644
--- a/ndb/test/tools/create_index.cpp
+++ b/ndb/test/tools/create_index.cpp
@@ -53,8 +53,13 @@ main(int argc, const char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
- Ndb MyNdb(_dbname);
+ Ndb MyNdb(&con, _dbname);
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
diff --git a/ndb/test/tools/hugoFill.cpp b/ndb/test/tools/hugoFill.cpp
index 6253bd1bb12..6408b2987f9 100644
--- a/ndb/test/tools/hugoFill.cpp
+++ b/ndb/test/tools/hugoFill.cpp
@@ -51,7 +51,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
// Connect to Ndb
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/tools/hugoLoad.cpp b/ndb/test/tools/hugoLoad.cpp
index 3a0bba07df3..1a229169650 100644
--- a/ndb/test/tools/hugoLoad.cpp
+++ b/ndb/test/tools/hugoLoad.cpp
@@ -56,7 +56,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
// Connect to Ndb
- Ndb MyNdb( db ? db : "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb( &con, db ? db : "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/tools/hugoLockRecords.cpp b/ndb/test/tools/hugoLockRecords.cpp
index 629408d401d..c0d0b9f9c5a 100644
--- a/ndb/test/tools/hugoLockRecords.cpp
+++ b/ndb/test/tools/hugoLockRecords.cpp
@@ -59,7 +59,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
// Connect to Ndb
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/tools/hugoPkDelete.cpp b/ndb/test/tools/hugoPkDelete.cpp
index 78a90ebcb46..84e7ded0add 100644
--- a/ndb/test/tools/hugoPkDelete.cpp
+++ b/ndb/test/tools/hugoPkDelete.cpp
@@ -55,7 +55,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
// Connect to Ndb
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/tools/hugoPkRead.cpp b/ndb/test/tools/hugoPkRead.cpp
index cf08b137e8e..e3702dc5ca1 100644
--- a/ndb/test/tools/hugoPkRead.cpp
+++ b/ndb/test/tools/hugoPkRead.cpp
@@ -60,7 +60,12 @@ int main(int argc, const char** argv){
// Connect to Ndb
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/tools/hugoPkReadRecord.cpp b/ndb/test/tools/hugoPkReadRecord.cpp
index 38b7cae2bf4..c60a994c7d4 100644
--- a/ndb/test/tools/hugoPkReadRecord.cpp
+++ b/ndb/test/tools/hugoPkReadRecord.cpp
@@ -62,7 +62,12 @@ int main(int argc, const char** argv)
<< "Row: " << _row << ", PrimaryKey: " << _primaryKey
<< endl;
- Ndb* ndb = new Ndb("TEST_DB");
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb* ndb = new Ndb(&con, "TEST_DB");
if (ndb->init() == 0 && ndb->waitUntilReady(30) == 0)
{
NdbConnection* conn = ndb->startTransaction();
diff --git a/ndb/test/tools/hugoPkUpdate.cpp b/ndb/test/tools/hugoPkUpdate.cpp
index ccbbccfc523..7d46ae95c29 100644
--- a/ndb/test/tools/hugoPkUpdate.cpp
+++ b/ndb/test/tools/hugoPkUpdate.cpp
@@ -58,7 +58,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
// Connect to Ndb
- Ndb MyNdb( db ? db : "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb( &con, db ? db : "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/tools/hugoScanRead.cpp b/ndb/test/tools/hugoScanRead.cpp
index 4cd428f44c6..a345bb88d0e 100644
--- a/ndb/test/tools/hugoScanRead.cpp
+++ b/ndb/test/tools/hugoScanRead.cpp
@@ -62,7 +62,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
// Connect to Ndb
- Ndb MyNdb( db ? db : "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb( &con, db ? db : "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
@@ -86,8 +91,7 @@ int main(int argc, const char** argv){
if(!pIdx)
ndbout << " Index " << argv[optind+1] << " not found" << endl;
else
- if(pIdx->getType() != NdbDictionary::Index::UniqueOrderedIndex &&
- pIdx->getType() != NdbDictionary::Index::OrderedIndex)
+ if(pIdx->getType() != NdbDictionary::Index::OrderedIndex)
{
ndbout << " Index " << argv[optind+1] << " is not scannable" << endl;
pIdx = 0;
diff --git a/ndb/test/tools/hugoScanUpdate.cpp b/ndb/test/tools/hugoScanUpdate.cpp
index 8fe84779c6b..6960fa44b96 100644
--- a/ndb/test/tools/hugoScanUpdate.cpp
+++ b/ndb/test/tools/hugoScanUpdate.cpp
@@ -59,7 +59,12 @@ int main(int argc, const char** argv){
_tabname = argv[optind];
// Connect to Ndb
- Ndb MyNdb( db ? db : "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb( &con, db ? db : "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/test/tools/verify_index.cpp b/ndb/test/tools/verify_index.cpp
index 6c8e304e1a1..acc97af883b 100644
--- a/ndb/test/tools/verify_index.cpp
+++ b/ndb/test/tools/verify_index.cpp
@@ -53,7 +53,12 @@ int main(int argc, const char** argv){
_indexname = argv[optind+1];
// Connect to Ndb
- Ndb MyNdb( "TEST_DB" );
+ Ndb_cluster_connection con;
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, "TEST_DB" );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
diff --git a/ndb/tools/Makefile.am b/ndb/tools/Makefile.am
index 795441380a8..17b71f7e33c 100644
--- a/ndb/tools/Makefile.am
+++ b/ndb/tools/Makefile.am
@@ -30,12 +30,14 @@ ndb_restore_SOURCES = restore/restore_main.cpp \
restore/consumer.cpp \
restore/consumer_restore.cpp \
restore/consumer_printer.cpp \
- restore/Restore.cpp $(tools_common_sources)
+ restore/Restore.cpp \
+ ../test/src/NDBT_ResultRow.cpp $(tools_common_sources)
ndb_config_SOURCES = ndb_config.cpp \
../src/mgmsrv/Config.cpp \
../src/mgmsrv/ConfigInfo.cpp \
../src/mgmsrv/InitConfigFileParser.cpp
+
ndb_config_CXXFLAGS = -I$(top_srcdir)/ndb/src/mgmapi \
-I$(top_srcdir)/ndb/src/mgmsrv \
-I$(top_srcdir)/ndb/include/mgmcommon \
diff --git a/ndb/tools/delete_all.cpp b/ndb/tools/delete_all.cpp
index 21e0c2ac089..2c395a67900 100644
--- a/ndb/tools/delete_all.cpp
+++ b/ndb/tools/delete_all.cpp
@@ -44,35 +44,37 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_delete_all.trace");
-}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_delete_all.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
- Ndb::setConnectString(opt_connect_str);
- // Connect to Ndb
- Ndb MyNdb(_dbname);
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ ndbout << "Unable to connect to management server." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ if (con.wait_until_ready(30,0) < 0)
+ {
+ ndbout << "Cluster nodes not ready in 30 seconds." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, _dbname );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
}
- // Connect to Ndb and wait for it to become ready
- while(MyNdb.waitUntilReady() != 0)
- ndbout << "Waiting for ndb to become ready..." << endl;
-
// Check if table exists in db
int res = NDBT_OK;
for(int i = 0; i<argc; i++){
@@ -99,7 +101,7 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism)
const int retryMax = 10;
int deletedRows = 0;
int check;
- NdbConnection *pTrans;
+ NdbTransaction *pTrans;
NdbScanOperation *pOp;
NdbError err;
@@ -128,12 +130,11 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism)
goto failed;
}
- NdbResultSet * rs = pOp->readTuplesExclusive(par);
- if( rs == 0 ) {
+ if( pOp->readTuples(NdbOperation::LM_Exclusive,par) ) {
goto failed;
}
- if(pTrans->execute(NoCommit) != 0){
+ if(pTrans->execute(NdbTransaction::NoCommit) != 0){
err = pTrans->getNdbError();
if(err.status == NdbError::TemporaryError){
ERR(err);
@@ -144,16 +145,16 @@ int clear_table(Ndb* pNdb, const NdbDictionary::Table* pTab, int parallelism)
goto failed;
}
- while((check = rs->nextResult(true)) == 0){
+ while((check = pOp->nextResult(true)) == 0){
do {
- if (rs->deleteTuple() != 0){
+ if (pOp->deleteCurrentTuple() != 0){
goto failed;
}
deletedRows++;
- } while((check = rs->nextResult(false)) == 0);
+ } while((check = pOp->nextResult(false)) == 0);
if(check != -1){
- check = pTrans->execute(Commit);
+ check = pTrans->execute(NdbTransaction::Commit);
pTrans->restart();
}
diff --git a/ndb/tools/desc.cpp b/ndb/tools/desc.cpp
index aac47c9042c..be0f6942db5 100644
--- a/ndb/tools/desc.cpp
+++ b/ndb/tools/desc.cpp
@@ -44,35 +44,38 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_desc.trace");
-}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_desc.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
- Ndb::setConnectString(opt_connect_str);
-
- Ndb* pMyNdb;
- pMyNdb = new Ndb(_dbname);
- pMyNdb->init();
-
- ndbout << "Waiting...";
- while (pMyNdb->waitUntilReady() != 0) {
- ndbout << "...";
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ ndbout << "Unable to connect to management server." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ if (con.wait_until_ready(30,0) < 0)
+ {
+ ndbout << "Cluster nodes not ready in 30 seconds." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
}
- ndbout << endl;
- NdbDictionary::Dictionary * dict = pMyNdb->getDictionary();
+ Ndb MyNdb(&con, _dbname);
+ if(MyNdb.init() != 0){
+ ERR(MyNdb.getNdbError());
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ const NdbDictionary::Dictionary * dict= MyNdb.getDictionary();
for (int i = 0; i < argc; i++) {
NDBT_Table* pTab = (NDBT_Table*)dict->getTable(argv[i]);
if (pTab != 0){
@@ -112,6 +115,5 @@ int main(int argc, char** argv){
ndbout << argv[i] << ": " << dict->getNdbError() << endl;
}
- delete pMyNdb;
return NDBT_ProgramExit(NDBT_OK);
}
diff --git a/ndb/tools/drop_index.cpp b/ndb/tools/drop_index.cpp
index 69c8345fdb6..24116f22784 100644
--- a/ndb/tools/drop_index.cpp
+++ b/ndb/tools/drop_index.cpp
@@ -41,37 +41,40 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_drop_index.trace");
-}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ "d:t:O,/tmp/ndb_drop_index.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
if (argc < 1) {
usage();
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Ndb::setConnectString(opt_connect_str);
- // Connect to Ndb
- Ndb MyNdb(_dbname);
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ if (con.wait_until_ready(30,3) < 0)
+ {
+ ndbout << "Cluster nodes not ready in 30 seconds." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Ndb MyNdb(&con, _dbname );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
}
- while(MyNdb.waitUntilReady() != 0)
- ndbout << "Waiting for ndb to become ready..." << endl;
-
int res = 0;
for(int i = 0; i+1<argc; i += 2){
ndbout << "Dropping index " << argv[i] << "/" << argv[i+1] << "...";
diff --git a/ndb/tools/drop_tab.cpp b/ndb/tools/drop_tab.cpp
index 091db5cc4b7..991e1505486 100644
--- a/ndb/tools/drop_tab.cpp
+++ b/ndb/tools/drop_tab.cpp
@@ -41,36 +41,41 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_drop_table.trace");
-}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ "d:t:O,/tmp/ndb_drop_table.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
if (argc < 1) {
usage();
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Ndb::setConnectString(opt_connect_str);
- Ndb MyNdb(_dbname);
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ ndbout << "Unable to connect to management server." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ if (con.wait_until_ready(30,3) < 0)
+ {
+ ndbout << "Cluster nodes not ready in 30 seconds." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+
+ Ndb MyNdb(&con, _dbname );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
}
- while(MyNdb.waitUntilReady() != 0)
- ndbout << "Waiting for ndb to become ready..." << endl;
-
int res = 0;
for(int i = 0; i<argc; i++){
ndbout << "Dropping table " << argv[i] << "...";
diff --git a/ndb/tools/listTables.cpp b/ndb/tools/listTables.cpp
index eb0c1c53c2d..fa078f7d351 100644
--- a/ndb/tools/listTables.cpp
+++ b/ndb/tools/listTables.cpp
@@ -29,7 +29,7 @@
static Ndb_cluster_connection *ndb_cluster_connection= 0;
static Ndb* ndb = 0;
-static NdbDictionary::Dictionary* dic = 0;
+static const NdbDictionary::Dictionary * dic = 0;
static int _unqualified = 0;
static void
@@ -199,13 +199,6 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_show_tables.trace");
-}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
@@ -213,22 +206,29 @@ int main(int argc, char** argv){
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_show_tables.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
_tabname = argv[0];
ndb_cluster_connection = new Ndb_cluster_connection(opt_connect_str);
if (ndb_cluster_connection->connect(12,5,1))
- fatal("unable to connect");
+ fatal("Unable to connect to management server.");
+ if (ndb_cluster_connection->wait_until_ready(30,0) < 0)
+ fatal("Cluster nodes not ready in 30 seconds.");
+
ndb = new Ndb(ndb_cluster_connection, _dbname);
if (ndb->init() != 0)
fatal("init");
- if (ndb->waitUntilReady(30) < 0)
- fatal("waitUntilReady");
dic = ndb->getDictionary();
for (int i = 0; _loops == 0 || i < _loops; i++) {
list(_tabname, (NdbDictionary::Object::Type)_type);
}
+ delete ndb;
+ delete ndb_cluster_connection;
return NDBT_ProgramExit(NDBT_OK);
}
diff --git a/ndb/tools/ndb_test_platform.cpp b/ndb/tools/ndb_test_platform.cpp
index 72dd146dacd..88f21b31d58 100644
--- a/ndb/tools/ndb_test_platform.cpp
+++ b/ndb/tools/ndb_test_platform.cpp
@@ -33,14 +33,14 @@ int test_snprintf(const char * fmt, int buf_sz, int result)
if(ret < 0)
{
printf("BaseString::snprint returns %d with size=%d and strlen(fmt)=%d\n",
- ret, buf_sz, strlen(fmt));
+ ret, buf_sz, (int) strlen(fmt));
return -1;
}
if(ret+1 == buf_sz)
{
printf("BaseString::snprint truncates returns %d with size=%d and strlen(fmt)=%d\n",
- ret, buf_sz, strlen(fmt));
+ ret, buf_sz, (int) strlen(fmt));
return -1;
}
@@ -87,7 +87,7 @@ main(void)
if (sizeof(UintPtr) != sizeof(Uint32*))
{
printf("sizeof(UintPtr)=%d != sizeof(Uint32*)=%d\n",
- sizeof(UintPtr), sizeof(Uint32*));
+ (int) sizeof(UintPtr), (int) sizeof(Uint32*));
return -1;
}
diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp
index 79df49c6f26..6ac06f8a6f8 100644
--- a/ndb/tools/restore/Restore.cpp
+++ b/ndb/tools/restore/Restore.cpp
@@ -646,7 +646,7 @@ bool RestoreDataIterator::readFragmentHeader(int & ret)
}
info << "_____________________________________________________" << endl
- << "Restoring data in table: " << m_currentTable->getTableName()
+ << "Processing data in table: " << m_currentTable->getTableName()
<< "(" << Header.TableId << ") fragment "
<< Header.FragmentNo << endl;
@@ -925,19 +925,12 @@ operator<<(NdbOut& ndbout, const LogEntry& logE)
return ndbout;
}
+#include <NDBT.hpp>
NdbOut &
operator<<(NdbOut& ndbout, const TableS & table){
- ndbout << endl << "Table: " << table.getTableName() << endl;
- for (int j = 0; j < table.getNoOfAttributes(); j++)
- {
- const AttributeDesc * desc = table[j];
- ndbout << desc->m_column->getName() << ": "
- << (Uint32) desc->m_column->getType();
- ndbout << " key: " << (Uint32) desc->m_column->getPrimaryKey();
- ndbout << " array: " << desc->arraySize;
- ndbout << " size: " << desc->size << endl;
- } // for
+
+ ndbout << (* (NDBT_Table*)table.m_dictTable) << endl;
return ndbout;
}
diff --git a/ndb/tools/restore/consumer.cpp b/ndb/tools/restore/consumer.cpp
index ecbdbbf8f4e..b130c4998d5 100644
--- a/ndb/tools/restore/consumer.cpp
+++ b/ndb/tools/restore/consumer.cpp
@@ -45,9 +45,11 @@ BackupConsumer::create_table_string(const TableS & table,
pos += sprintf(buf+pos, "%s", "float");
break;
case NdbDictionary::Column::Olddecimal:
+ case NdbDictionary::Column::Decimal:
pos += sprintf(buf+pos, "%s", "decimal");
break;
case NdbDictionary::Column::Olddecimalunsigned:
+ case NdbDictionary::Column::Decimalunsigned:
pos += sprintf(buf+pos, "%s", "decimal unsigned");
break;
case NdbDictionary::Column::Char:
diff --git a/ndb/tools/restore/consumer_printer.hpp b/ndb/tools/restore/consumer_printer.hpp
index 7cbc924e364..e47bc56f874 100644
--- a/ndb/tools/restore/consumer_printer.hpp
+++ b/ndb/tools/restore/consumer_printer.hpp
@@ -29,6 +29,8 @@ public:
m_print_log = false;
m_print_data = false;
m_print_meta = false;
+ m_logCount = 0;
+ m_dataCount = 0;
}
virtual bool table(const TableS &);
diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp
index 70ea7460d78..552246a4f9e 100644
--- a/ndb/tools/restore/consumer_restore.cpp
+++ b/ndb/tools/restore/consumer_restore.cpp
@@ -24,8 +24,9 @@ extern FilteredNdbOut err;
extern FilteredNdbOut info;
extern FilteredNdbOut debug;
-static void callback(int, NdbConnection*, void*);
+static void callback(int, NdbTransaction*, void*);
+extern const char * g_connect_string;
bool
BackupRestore::init()
{
@@ -34,7 +35,13 @@ BackupRestore::init()
if (!m_restore && !m_restore_meta)
return true;
- m_ndb = new Ndb();
+ m_cluster_connection = new Ndb_cluster_connection(g_connect_string);
+ if(m_cluster_connection->connect(12, 5, 1) != 0)
+ {
+ return false;
+ }
+
+ m_ndb = new Ndb(m_cluster_connection);
if (m_ndb == NULL)
return false;
@@ -80,6 +87,12 @@ void BackupRestore::release()
delete [] m_callback;
m_callback= 0;
}
+
+ if (m_cluster_connection)
+ {
+ delete m_cluster_connection;
+ m_cluster_connection= 0;
+ }
}
BackupRestore::~BackupRestore()
@@ -204,7 +217,7 @@ BackupRestore::endOfTables(){
NdbDictionary::Dictionary* dict = m_ndb->getDictionary();
for(size_t i = 0; i<m_indexes.size(); i++){
- const NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]);
+ NdbTableImpl & indtab = NdbTableImpl::getImpl(* m_indexes[i]);
BaseString tmp(indtab.m_primaryTable.c_str());
Vector<BaseString> split;
@@ -364,7 +377,8 @@ void BackupRestore::tuple_a(restore_callback_t *cb)
}
// Prepare transaction (the transaction is NOT yet sent to NDB)
- cb->connection->executeAsynchPrepare(Commit, &callback, cb);
+ cb->connection->executeAsynchPrepare(NdbTransaction::Commit,
+ &callback, cb);
m_transactions++;
return;
}
@@ -494,7 +508,7 @@ BackupRestore::logEntry(const LogEntry & tup)
if (!m_restore)
return;
- NdbConnection * trans = m_ndb->startTransaction();
+ NdbTransaction * trans = m_ndb->startTransaction();
if (trans == NULL)
{
// Deep shit, TODO: handle the error
@@ -564,7 +578,7 @@ BackupRestore::logEntry(const LogEntry & tup)
} // if
}
- const int ret = trans->execute(Commit);
+ const int ret = trans->execute(NdbTransaction::Commit);
if (ret != 0)
{
// Both insert update and delete can fail during log running
@@ -612,12 +626,12 @@ BackupRestore::endOfLogEntrys()
*
* (This function must have three arguments:
* - The result of the transaction,
- * - The NdbConnection object, and
+ * - The NdbTransaction object, and
* - A pointer to an arbitrary object.)
*/
static void
-callback(int result, NdbConnection* trans, void* aObject)
+callback(int result, NdbTransaction* trans, void* aObject)
{
restore_callback_t *cb = (restore_callback_t *)aObject;
(cb->restore)->cback(result, cb);
@@ -631,7 +645,7 @@ BackupRestore::tuple(const TupleS & tup)
return;
while (1)
{
- NdbConnection * trans = m_ndb->startTransaction();
+ NdbTransaction * trans = m_ndb->startTransaction();
if (trans == NULL)
{
// Deep shit, TODO: handle the error
@@ -682,7 +696,7 @@ BackupRestore::tuple(const TupleS & tup)
else
op->setValue(i, dataPtr, length);
}
- int ret = trans->execute(Commit);
+ int ret = trans->execute(NdbTransaction::Commit);
if (ret != 0)
{
ndbout << "execute failed: ";
diff --git a/ndb/tools/restore/consumer_restore.hpp b/ndb/tools/restore/consumer_restore.hpp
index df219cd4412..1bf6d89a912 100644
--- a/ndb/tools/restore/consumer_restore.hpp
+++ b/ndb/tools/restore/consumer_restore.hpp
@@ -22,7 +22,7 @@
struct restore_callback_t {
class BackupRestore *restore;
class TupleS tup;
- class NdbConnection *connection;
+ class NdbTransaction *connection;
int retries;
int error_code;
restore_callback_t *next;
@@ -35,6 +35,7 @@ public:
BackupRestore(Uint32 parallelism=1)
{
m_ndb = 0;
+ m_cluster_connection = 0;
m_logCount = m_dataCount = 0;
m_restore = false;
m_restore_meta = false;
@@ -62,6 +63,7 @@ public:
virtual bool finalize_table(const TableS &);
void connectToMysql();
Ndb * m_ndb;
+ Ndb_cluster_connection * m_cluster_connection;
bool m_restore;
bool m_restore_meta;
Uint32 m_logCount;
diff --git a/ndb/tools/restore/consumer_restorem.cpp b/ndb/tools/restore/consumer_restorem.cpp
index 6a9ec07148a..56179a60ab0 100644
--- a/ndb/tools/restore/consumer_restorem.cpp
+++ b/ndb/tools/restore/consumer_restorem.cpp
@@ -21,8 +21,8 @@ extern FilteredNdbOut err;
extern FilteredNdbOut info;
extern FilteredNdbOut debug;
-static bool asynchErrorHandler(NdbConnection * trans, Ndb * ndb);
-static void callback(int result, NdbConnection* trans, void* aObject);
+static bool asynchErrorHandler(NdbTransaction * trans, Ndb * ndb);
+static void callback(int result, NdbTransaction* trans, void* aObject);
bool
BackupRestore::init()
@@ -80,6 +80,7 @@ BackupRestore::init()
ndbout_c("Connect failed: %s", mysql_error(&mysql));
returnValue = false;
}
+ mysql.reconnect= 1;
ndbout << "Connected to MySQL!!!" <<endl;
}
@@ -370,7 +371,7 @@ BackupRestore::tuple(const TupleS & tup)
return;
while (1)
{
- NdbConnection * trans = m_ndb->startTransaction();
+ NdbTransaction * trans = m_ndb->startTransaction();
if (trans == NULL)
{
// Deep shit, TODO: handle the error
@@ -459,7 +460,7 @@ BackupRestore::logEntry(const LogEntry & tup)
if (!m_restore)
return;
- NdbConnection * trans = m_ndb->startTransaction();
+ NdbTransaction * trans = m_ndb->startTransaction();
if (trans == NULL)
{
// Deep shit, TODO: handle the error
@@ -550,7 +551,7 @@ BackupRestore::endOfLogEntrys()
*
******************************************/
static void restoreCallback(int result, // Result for transaction
- NdbConnection *object, // Transaction object
+ NdbTransaction *object, // Transaction object
void *anything) // Not used
{
static Uint32 counter = 0;
@@ -592,12 +593,12 @@ static void restoreCallback(int result, // Result for transaction
*
* (This function must have three arguments:
* - The result of the transaction,
- * - The NdbConnection object, and
+ * - The NdbTransaction object, and
* - A pointer to an arbitrary object.)
*/
static void
-callback(int result, NdbConnection* trans, void* aObject)
+callback(int result, NdbTransaction* trans, void* aObject)
{
restore_callback_t *cb = (restore_callback_t *)aObject;
(cb->restore)->cback(result, cb);
@@ -609,7 +610,7 @@ callback(int result, NdbConnection* trans, void* aObject)
* false if it is an error that generates an abort.
*/
static
-bool asynchErrorHandler(NdbConnection * trans, Ndb* ndb)
+bool asynchErrorHandler(NdbTransaction * trans, Ndb* ndb)
{
NdbError error = trans->getNdbError();
ndb->closeTransaction(trans);
diff --git a/ndb/tools/restore/restore_main.cpp b/ndb/tools/restore/restore_main.cpp
index d786dffe89e..af7c751fb67 100644
--- a/ndb/tools/restore/restore_main.cpp
+++ b/ndb/tools/restore/restore_main.cpp
@@ -110,8 +110,10 @@ static my_bool
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
char *argument)
{
- ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_restore.trace");
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_restore.trace";
+#endif
+ ndb_std_get_one_option(optid, opt, argument);
switch (optid) {
case 'n':
if (ga_nodeId == 0)
@@ -227,6 +229,7 @@ free_data_callback()
g_consumers[i]->tuple_free();
}
+const char * g_connect_string = 0;
static void exitHandler(int code)
{
NDBT_ProgramExit(code);
@@ -246,7 +249,7 @@ main(int argc, char** argv)
exitHandler(NDBT_FAILED);
}
- Ndb::setConnectString(opt_connect_str);
+ g_connect_string = opt_connect_str;
/**
* we must always load meta data, even if we will only print it to stdout
@@ -324,7 +327,7 @@ main(int argc, char** argv)
if (ga_restore || ga_print)
{
- if (ga_restore)
+ if(_restore_data || _print_data)
{
RestoreDataIterator dataIter(metaData, &free_data_callback);
@@ -371,7 +374,10 @@ main(int argc, char** argv)
for (i= 0; i < g_consumers.size(); i++)
g_consumers[i]->endOfTuples();
+ }
+ if(_restore_data || _print_log)
+ {
RestoreLogIterator logIter(metaData);
if (!logIter.readHeader())
{
@@ -395,6 +401,10 @@ main(int argc, char** argv)
logIter.validateFooter(); //not implemented
for (i= 0; i < g_consumers.size(); i++)
g_consumers[i]->endOfLogEntrys();
+ }
+
+ if(_restore_data)
+ {
for(i = 0; i<metaData.getNoOfTables(); i++)
{
if (checkSysTable(metaData[i]->getTableName()))
diff --git a/ndb/tools/select_all.cpp b/ndb/tools/select_all.cpp
index 23fd2290349..baa18db1ebd 100644
--- a/ndb/tools/select_all.cpp
+++ b/ndb/tools/select_all.cpp
@@ -24,7 +24,6 @@
#include <NdbMain.h>
#include <NDBT.hpp>
#include <NdbSleep.h>
-#include <NdbScanFilter.hpp>
int scanReadRecords(Ndb*,
const NdbDictionary::Table*,
@@ -34,14 +33,15 @@ int scanReadRecords(Ndb*,
bool headers,
bool useHexFormat,
char delim,
- bool orderby);
+ bool orderby,
+ bool descending);
NDB_STD_OPTS_VARS;
static const char* _dbname = "TEST_DB";
static const char* _delimiter = "\t";
static int _unqualified, _header, _parallelism, _useHexFormat, _lock,
- _order;
+ _order, _descending;
static struct my_option my_long_options[] =
{
@@ -58,6 +58,9 @@ static struct my_option my_long_options[] =
{ "order", 'o', "Sort resultset according to index",
(gptr*) &_order, (gptr*) &_order, 0,
GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
+ { "descending", 'z', "Sort descending (requires order flag)",
+ (gptr*) &_descending, (gptr*) &_descending, 0,
+ GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 },
{ "header", 'h', "Print header",
(gptr*) &_header, (gptr*) &_header, 0,
GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0 },
@@ -82,13 +85,6 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_select_all.trace");
-}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
@@ -96,26 +92,35 @@ int main(int argc, char** argv){
load_defaults("my",load_default_groups,&argc,&argv);
const char* _tabname;
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_select_all.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
if ((_tabname = argv[0]) == 0) {
usage();
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Ndb::setConnectString(opt_connect_str);
- // Connect to Ndb
- Ndb MyNdb(_dbname);
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ ndbout << "Unable to connect to management server." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ if (con.wait_until_ready(30,0) < 0)
+ {
+ ndbout << "Cluster nodes not ready in 30 seconds." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, _dbname );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
}
- // Connect to Ndb and wait for it to become ready
- while(MyNdb.waitUntilReady() != 0)
- ndbout << "Waiting for ndb to become ready..." << endl;
-
// Check if table exists in db
const NdbDictionary::Table* pTab = NDBT_Table::discoverTableFromDb(&MyNdb, _tabname);
const NdbDictionary::Index * pIdx = 0;
@@ -138,6 +143,11 @@ int main(int argc, char** argv){
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
+ if (_descending && ! _order) {
+ ndbout << " Descending flag given without order flag" << endl;
+ return NDBT_ProgramExit(NDBT_WRONGARGS);
+ }
+
if (scanReadRecords(&MyNdb,
pTab,
pIdx,
@@ -145,7 +155,7 @@ int main(int argc, char** argv){
_lock,
_header,
_useHexFormat,
- (char)*_delimiter, _order) != 0){
+ (char)*_delimiter, _order, _descending) != 0){
return NDBT_ProgramExit(NDBT_FAILED);
}
@@ -160,12 +170,12 @@ int scanReadRecords(Ndb* pNdb,
int _lock,
bool headers,
bool useHexFormat,
- char delimiter, bool order){
+ char delimiter, bool order, bool descending){
int retryAttempt = 0;
const int retryMax = 100;
int check;
- NdbConnection *pTrans;
+ NdbTransaction *pTrans;
NdbScanOperation *pOp;
NdbIndexScanOperation * pIOp= 0;
@@ -202,7 +212,7 @@ int scanReadRecords(Ndb* pNdb,
return -1;
}
- NdbResultSet * rs;
+ int rs;
switch(_lock + (3 * order)){
case 1:
rs = pOp->readTuples(NdbScanOperation::LM_Read, 0, parallel);
@@ -212,20 +222,20 @@ int scanReadRecords(Ndb* pNdb,
break;
case 3:
rs = pIOp->readTuples(NdbScanOperation::LM_CommittedRead, 0, parallel,
- true);
+ true, descending);
break;
case 4:
- rs = pIOp->readTuples(NdbScanOperation::LM_Read, 0, parallel, true);
+ rs = pIOp->readTuples(NdbScanOperation::LM_Read, 0, parallel, true, descending);
break;
case 5:
- rs = pIOp->readTuples(NdbScanOperation::LM_Exclusive, 0, parallel, true);
+ rs = pIOp->readTuples(NdbScanOperation::LM_Exclusive, 0, parallel, true, descending);
break;
case 0:
default:
rs = pOp->readTuples(NdbScanOperation::LM_CommittedRead, 0, parallel);
break;
}
- if( rs == 0 ){
+ if( rs != 0 ){
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return -1;
@@ -291,7 +301,7 @@ int scanReadRecords(Ndb* pNdb,
}
}
- check = pTrans->execute(NoCommit);
+ check = pTrans->execute(NdbTransaction::NoCommit);
if( check == -1 ) {
const NdbError err = pTrans->getNdbError();
@@ -311,7 +321,7 @@ int scanReadRecords(Ndb* pNdb,
int eof;
int rows = 0;
- eof = rs->nextResult();
+ eof = pOp->nextResult();
while(eof == 0){
rows++;
@@ -322,7 +332,7 @@ int scanReadRecords(Ndb* pNdb,
ndbout << (*row) << endl;
}
- eof = rs->nextResult();
+ eof = pOp->nextResult();
}
if (eof == -1) {
const NdbError err = pTrans->getNdbError();
diff --git a/ndb/tools/select_count.cpp b/ndb/tools/select_count.cpp
index a9a3e71da67..6fa3c77f15a 100644
--- a/ndb/tools/select_count.cpp
+++ b/ndb/tools/select_count.cpp
@@ -30,7 +30,7 @@ static int
select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
int parallelism,
int* count_rows,
- UtilTransactions::ScanLock lock);
+ NdbOperation::LockMode lock);
NDB_STD_OPTS_VARS;
@@ -60,39 +60,41 @@ static void usage()
my_print_help(my_long_options);
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_select_count.trace");
-}
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_select_count.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
if (argc < 1) {
usage();
return NDBT_ProgramExit(NDBT_WRONGARGS);
}
- Ndb::setConnectString(opt_connect_str);
- // Connect to Ndb
- Ndb MyNdb(_dbname);
+ Ndb_cluster_connection con(opt_connect_str);
+ if(con.connect(12, 5, 1) != 0)
+ {
+ ndbout << "Unable to connect to management server." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ if (con.wait_until_ready(30,0) < 0)
+ {
+ ndbout << "Cluster nodes not ready in 30 seconds." << endl;
+ return NDBT_ProgramExit(NDBT_FAILED);
+ }
+ Ndb MyNdb(&con, _dbname );
if(MyNdb.init() != 0){
ERR(MyNdb.getNdbError());
return NDBT_ProgramExit(NDBT_FAILED);
}
- // Connect to Ndb and wait for it to become ready
- while(MyNdb.waitUntilReady() != 0)
- ndbout << "Waiting for ndb to become ready..." << endl;
-
for(int i = 0; i<argc; i++){
// Check if table exists in db
const NdbDictionary::Table * pTab = NDBT_Table::discoverTableFromDb(&MyNdb, argv[i]);
@@ -103,7 +105,7 @@ int main(int argc, char** argv){
int rows = 0;
if (select_count(&MyNdb, pTab, _parallelism, &rows,
- (UtilTransactions::ScanLock)_lock) != 0){
+ (NdbOperation::LockMode)_lock) != 0){
return NDBT_ProgramExit(NDBT_FAILED);
}
@@ -116,12 +118,12 @@ int
select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
int parallelism,
int* count_rows,
- UtilTransactions::ScanLock lock){
+ NdbOperation::LockMode lock){
int retryAttempt = 0;
const int retryMax = 100;
int check;
- NdbConnection *pTrans;
+ NdbTransaction *pTrans;
NdbScanOperation *pOp;
while (true){
@@ -151,8 +153,7 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
return NDBT_FAILED;
}
- NdbResultSet * rs = pOp->readTuples(NdbScanOperation::LM_Dirty);
- if( rs == 0 ) {
+ if( pOp->readTuples(NdbScanOperation::LM_Dirty) ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
return NDBT_FAILED;
@@ -167,9 +168,10 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
}
Uint64 tmp;
+ Uint32 row_size;
pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&tmp);
-
- check = pTrans->execute(NoCommit);
+ pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&row_size);
+ check = pTrans->execute(NdbTransaction::NoCommit);
if( check == -1 ) {
ERR(pTrans->getNdbError());
pNdb->closeTransaction(pTrans);
@@ -178,7 +180,7 @@ select_count(Ndb* pNdb, const NdbDictionary::Table* pTab,
Uint64 row_count = 0;
int eof;
- while((eof = rs->nextResult(true)) == 0){
+ while((eof = pOp->nextResult(true)) == 0){
row_count += tmp;
}
diff --git a/ndb/tools/waiter.cpp b/ndb/tools/waiter.cpp
index cc6a21428c8..db90bd8bd90 100644
--- a/ndb/tools/waiter.cpp
+++ b/ndb/tools/waiter.cpp
@@ -60,24 +60,19 @@ static void usage()
my_print_variables(my_long_options);
}
-static my_bool
-get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
- char *argument)
-{
- return ndb_std_get_one_option(optid, opt, argument ? argument :
- "d:t:O,/tmp/ndb_drop_table.trace");
-}
-
int main(int argc, char** argv){
NDB_INIT(argv[0]);
const char *load_default_groups[]= { "mysql_cluster",0 };
load_defaults("my",load_default_groups,&argc,&argv);
const char* _hostName = NULL;
int ho_error;
- if ((ho_error=handle_options(&argc, &argv, my_long_options, get_one_option)))
+#ifndef DBUG_OFF
+ opt_debug= "d:t:O,/tmp/ndb_waiter.trace";
+#endif
+ if ((ho_error=handle_options(&argc, &argv, my_long_options,
+ ndb_std_get_one_option)))
return NDBT_ProgramExit(NDBT_WRONGARGS);
- char buf[255];
_hostName = argv[0];
if (_hostName == 0)