summaryrefslogtreecommitdiff
path: root/storage/bdb/test
diff options
context:
space:
mode:
authorbrian@zim.(none) <>2005-04-26 18:19:54 -0700
committerbrian@zim.(none) <>2005-04-26 18:19:54 -0700
commit2a7c71e309337588b3815ea7ff1fe20d734d50d9 (patch)
treedf9016f3d70b4657f89dcddca2ec4e188fc7fbdf /storage/bdb/test
parent8e0eb65f9aaa3dc5a4f713988e58190b82466cde (diff)
downloadmariadb-git-2a7c71e309337588b3815ea7ff1fe20d734d50d9.tar.gz
Changes to create storage directory for storage engines.
Diffstat (limited to 'storage/bdb/test')
-rw-r--r--storage/bdb/test/archive.tcl230
-rw-r--r--storage/bdb/test/bigfile001.tcl85
-rw-r--r--storage/bdb/test/bigfile002.tcl45
-rw-r--r--storage/bdb/test/byteorder.tcl34
-rw-r--r--storage/bdb/test/conscript.tcl123
-rw-r--r--storage/bdb/test/dbm.tcl128
-rw-r--r--storage/bdb/test/dbscript.tcl357
-rw-r--r--storage/bdb/test/ddoyscript.tcl172
-rw-r--r--storage/bdb/test/ddscript.tcl44
-rw-r--r--storage/bdb/test/dead001.tcl88
-rw-r--r--storage/bdb/test/dead002.tcl75
-rw-r--r--storage/bdb/test/dead003.tcl98
-rw-r--r--storage/bdb/test/dead004.tcl108
-rw-r--r--storage/bdb/test/dead005.tcl87
-rw-r--r--storage/bdb/test/dead006.tcl16
-rw-r--r--storage/bdb/test/dead007.tcl34
-rw-r--r--storage/bdb/test/env001.tcl154
-rw-r--r--storage/bdb/test/env002.tcl156
-rw-r--r--storage/bdb/test/env003.tcl149
-rw-r--r--storage/bdb/test/env004.tcl103
-rw-r--r--storage/bdb/test/env005.tcl53
-rw-r--r--storage/bdb/test/env006.tcl42
-rw-r--r--storage/bdb/test/env007.tcl223
-rw-r--r--storage/bdb/test/env008.tcl73
-rw-r--r--storage/bdb/test/env009.tcl57
-rw-r--r--storage/bdb/test/env010.tcl49
-rw-r--r--storage/bdb/test/env011.tcl39
-rw-r--r--storage/bdb/test/hsearch.tcl51
-rw-r--r--storage/bdb/test/join.tcl455
-rw-r--r--storage/bdb/test/lock001.tcl122
-rw-r--r--storage/bdb/test/lock002.tcl157
-rw-r--r--storage/bdb/test/lock003.tcl99
-rw-r--r--storage/bdb/test/lock004.tcl29
-rw-r--r--storage/bdb/test/lock005.tcl177
-rw-r--r--storage/bdb/test/lockscript.tcl117
-rw-r--r--storage/bdb/test/log001.tcl120
-rw-r--r--storage/bdb/test/log002.tcl85
-rw-r--r--storage/bdb/test/log003.tcl118
-rw-r--r--storage/bdb/test/log004.tcl46
-rw-r--r--storage/bdb/test/log005.tcl89
-rw-r--r--storage/bdb/test/logtrack.tcl137
-rw-r--r--storage/bdb/test/mdbscript.tcl384
-rw-r--r--storage/bdb/test/memp001.tcl199
-rw-r--r--storage/bdb/test/memp002.tcl62
-rw-r--r--storage/bdb/test/memp003.tcl153
-rw-r--r--storage/bdb/test/mpoolscript.tcl171
-rw-r--r--storage/bdb/test/mutex001.tcl51
-rw-r--r--storage/bdb/test/mutex002.tcl94
-rw-r--r--storage/bdb/test/mutex003.tcl52
-rw-r--r--storage/bdb/test/mutexscript.tcl91
-rw-r--r--storage/bdb/test/ndbm.tcl144
-rw-r--r--storage/bdb/test/parallel.tcl295
-rw-r--r--storage/bdb/test/recd001.tcl242
-rw-r--r--storage/bdb/test/recd002.tcl103
-rw-r--r--storage/bdb/test/recd003.tcl119
-rw-r--r--storage/bdb/test/recd004.tcl95
-rw-r--r--storage/bdb/test/recd005.tcl230
-rw-r--r--storage/bdb/test/recd006.tcl262
-rw-r--r--storage/bdb/test/recd007.tcl886
-rw-r--r--storage/bdb/test/recd008.tcl227
-rw-r--r--storage/bdb/test/recd009.tcl180
-rw-r--r--storage/bdb/test/recd010.tcl257
-rw-r--r--storage/bdb/test/recd011.tcl116
-rw-r--r--storage/bdb/test/recd012.tcl432
-rw-r--r--storage/bdb/test/recd013.tcl287
-rw-r--r--storage/bdb/test/recd014.tcl445
-rw-r--r--storage/bdb/test/recd015.tcl160
-rw-r--r--storage/bdb/test/recd016.tcl183
-rw-r--r--storage/bdb/test/recd017.tcl151
-rw-r--r--storage/bdb/test/recd018.tcl110
-rw-r--r--storage/bdb/test/recd019.tcl121
-rw-r--r--storage/bdb/test/recd020.tcl180
-rw-r--r--storage/bdb/test/recd15scr.tcl74
-rw-r--r--storage/bdb/test/recdscript.tcl37
-rw-r--r--storage/bdb/test/rep001.tcl249
-rw-r--r--storage/bdb/test/rep002.tcl278
-rw-r--r--storage/bdb/test/rep003.tcl221
-rw-r--r--storage/bdb/test/rep004.tcl198
-rw-r--r--storage/bdb/test/rep005.tcl225
-rw-r--r--storage/bdb/test/reputils.tcl659
-rw-r--r--storage/bdb/test/rpc001.tcl449
-rw-r--r--storage/bdb/test/rpc002.tcl143
-rw-r--r--storage/bdb/test/rpc003.tcl166
-rw-r--r--storage/bdb/test/rpc004.tcl76
-rw-r--r--storage/bdb/test/rpc005.tcl137
-rw-r--r--storage/bdb/test/rsrc001.tcl221
-rw-r--r--storage/bdb/test/rsrc002.tcl66
-rw-r--r--storage/bdb/test/rsrc003.tcl173
-rw-r--r--storage/bdb/test/rsrc004.tcl52
-rw-r--r--storage/bdb/test/scr001/chk.code37
-rw-r--r--storage/bdb/test/scr002/chk.def64
-rw-r--r--storage/bdb/test/scr003/chk.define77
-rw-r--r--storage/bdb/test/scr004/chk.javafiles31
-rw-r--r--storage/bdb/test/scr005/chk.nl112
-rw-r--r--storage/bdb/test/scr006/chk.offt36
-rw-r--r--storage/bdb/test/scr007/chk.proto45
-rw-r--r--storage/bdb/test/scr008/chk.pubdef179
-rw-r--r--storage/bdb/test/scr009/chk.srcfiles39
-rw-r--r--storage/bdb/test/scr010/chk.str31
-rw-r--r--storage/bdb/test/scr010/spell.ok825
-rw-r--r--storage/bdb/test/scr011/chk.tags41
-rw-r--r--storage/bdb/test/scr012/chk.vx_code68
-rw-r--r--storage/bdb/test/scr013/chk.stats114
-rw-r--r--storage/bdb/test/scr014/chk.err34
-rw-r--r--storage/bdb/test/scr015/README36
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.cpp330
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testerr4
-rw-r--r--storage/bdb/test/scr015/TestConstruct01.testout27
-rw-r--r--storage/bdb/test/scr015/TestExceptInclude.cpp27
-rw-r--r--storage/bdb/test/scr015/TestGetSetMethods.cpp91
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.cpp171
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testin8
-rw-r--r--storage/bdb/test/scr015/TestKeyRange.testout19
-rw-r--r--storage/bdb/test/scr015/TestLogc.cpp101
-rw-r--r--storage/bdb/test/scr015/TestLogc.testout1
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.cpp67
-rw-r--r--storage/bdb/test/scr015/TestSimpleAccess.testout3
-rw-r--r--storage/bdb/test/scr015/TestTruncate.cpp84
-rw-r--r--storage/bdb/test/scr015/TestTruncate.testout6
-rw-r--r--storage/bdb/test/scr015/chk.cxxtests71
-rw-r--r--storage/bdb/test/scr015/ignore4
-rw-r--r--storage/bdb/test/scr015/testall32
-rw-r--r--storage/bdb/test/scr015/testone122
-rw-r--r--storage/bdb/test/scr016/CallbackTest.java83
-rw-r--r--storage/bdb/test/scr016/CallbackTest.testout60
-rw-r--r--storage/bdb/test/scr016/README37
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.java258
-rw-r--r--storage/bdb/test/scr016/TestAppendRecno.testout82
-rw-r--r--storage/bdb/test/scr016/TestAssociate.java333
-rw-r--r--storage/bdb/test/scr016/TestAssociate.testout30
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.java62
-rw-r--r--storage/bdb/test/scr016/TestClosedDb.testout2
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.java474
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testerr0
-rw-r--r--storage/bdb/test/scr016/TestConstruct01.testout3
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.java326
-rw-r--r--storage/bdb/test/scr016/TestConstruct02.testout3
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.java241
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testerr54
-rw-r--r--storage/bdb/test/scr016/TestDbtFlags.testout78
-rw-r--r--storage/bdb/test/scr016/TestGetSetMethods.java99
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.java203
-rw-r--r--storage/bdb/test/scr016/TestKeyRange.testout27
-rw-r--r--storage/bdb/test/scr016/TestLockVec.java249
-rw-r--r--storage/bdb/test/scr016/TestLockVec.testout8
-rw-r--r--storage/bdb/test/scr016/TestLogc.java100
-rw-r--r--storage/bdb/test/scr016/TestLogc.testout1
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.java189
-rw-r--r--storage/bdb/test/scr016/TestOpenEmpty.testerr2
-rw-r--r--storage/bdb/test/scr016/TestReplication.java289
-rw-r--r--storage/bdb/test/scr016/TestRpcServer.java193
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.java56
-rw-r--r--storage/bdb/test/scr016/TestSameDbt.testout2
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.java37
-rw-r--r--storage/bdb/test/scr016/TestSimpleAccess.testout3
-rw-r--r--storage/bdb/test/scr016/TestStat.java57
-rw-r--r--storage/bdb/test/scr016/TestStat.testout11
-rw-r--r--storage/bdb/test/scr016/TestTruncate.java87
-rw-r--r--storage/bdb/test/scr016/TestTruncate.testout6
-rw-r--r--storage/bdb/test/scr016/TestUtil.java57
-rw-r--r--storage/bdb/test/scr016/TestXAServlet.java313
-rw-r--r--storage/bdb/test/scr016/chk.javatests79
-rw-r--r--storage/bdb/test/scr016/ignore22
-rw-r--r--storage/bdb/test/scr016/testall32
-rw-r--r--storage/bdb/test/scr016/testone122
-rw-r--r--storage/bdb/test/scr017/O.BH196
-rw-r--r--storage/bdb/test/scr017/O.R196
-rw-r--r--storage/bdb/test/scr017/chk.db18526
-rw-r--r--storage/bdb/test/scr017/t.c188
-rw-r--r--storage/bdb/test/scr018/chk.comma30
-rw-r--r--storage/bdb/test/scr018/t.c46
-rw-r--r--storage/bdb/test/scr019/chk.include40
-rw-r--r--storage/bdb/test/scr020/chk.inc43
-rw-r--r--storage/bdb/test/scr021/chk.flags97
-rw-r--r--storage/bdb/test/scr022/chk.rr22
-rw-r--r--storage/bdb/test/sdb001.tcl156
-rw-r--r--storage/bdb/test/sdb002.tcl221
-rw-r--r--storage/bdb/test/sdb003.tcl179
-rw-r--r--storage/bdb/test/sdb004.tcl241
-rw-r--r--storage/bdb/test/sdb005.tcl146
-rw-r--r--storage/bdb/test/sdb006.tcl169
-rw-r--r--storage/bdb/test/sdb007.tcl132
-rw-r--r--storage/bdb/test/sdb008.tcl121
-rw-r--r--storage/bdb/test/sdb009.tcl108
-rw-r--r--storage/bdb/test/sdb010.tcl166
-rw-r--r--storage/bdb/test/sdb011.tcl143
-rw-r--r--storage/bdb/test/sdb012.tcl428
-rw-r--r--storage/bdb/test/sdbscript.tcl47
-rw-r--r--storage/bdb/test/sdbtest001.tcl150
-rw-r--r--storage/bdb/test/sdbtest002.tcl174
-rw-r--r--storage/bdb/test/sdbutils.tcl197
-rw-r--r--storage/bdb/test/sec001.tcl205
-rw-r--r--storage/bdb/test/sec002.tcl143
-rw-r--r--storage/bdb/test/shelltest.tcl88
-rw-r--r--storage/bdb/test/si001.tcl116
-rw-r--r--storage/bdb/test/si002.tcl167
-rw-r--r--storage/bdb/test/si003.tcl142
-rw-r--r--storage/bdb/test/si004.tcl194
-rw-r--r--storage/bdb/test/si005.tcl179
-rw-r--r--storage/bdb/test/si006.tcl129
-rw-r--r--storage/bdb/test/sindex.tcl259
-rw-r--r--storage/bdb/test/sysscript.tcl282
-rw-r--r--storage/bdb/test/test.tcl1863
-rw-r--r--storage/bdb/test/test001.tcl247
-rw-r--r--storage/bdb/test/test002.tcl161
-rw-r--r--storage/bdb/test/test003.tcl210
-rw-r--r--storage/bdb/test/test004.tcl169
-rw-r--r--storage/bdb/test/test005.tcl19
-rw-r--r--storage/bdb/test/test006.tcl150
-rw-r--r--storage/bdb/test/test007.tcl19
-rw-r--r--storage/bdb/test/test008.tcl200
-rw-r--r--storage/bdb/test/test009.tcl18
-rw-r--r--storage/bdb/test/test010.tcl176
-rw-r--r--storage/bdb/test/test011.tcl470
-rw-r--r--storage/bdb/test/test012.tcl139
-rw-r--r--storage/bdb/test/test013.tcl241
-rw-r--r--storage/bdb/test/test014.tcl253
-rw-r--r--storage/bdb/test/test015.tcl276
-rw-r--r--storage/bdb/test/test016.tcl207
-rw-r--r--storage/bdb/test/test017.tcl306
-rw-r--r--storage/bdb/test/test018.tcl16
-rw-r--r--storage/bdb/test/test019.tcl131
-rw-r--r--storage/bdb/test/test020.tcl137
-rw-r--r--storage/bdb/test/test021.tcl162
-rw-r--r--storage/bdb/test/test022.tcl62
-rw-r--r--storage/bdb/test/test023.tcl221
-rw-r--r--storage/bdb/test/test024.tcl268
-rw-r--r--storage/bdb/test/test025.tcl146
-rw-r--r--storage/bdb/test/test026.tcl155
-rw-r--r--storage/bdb/test/test027.tcl17
-rw-r--r--storage/bdb/test/test028.tcl222
-rw-r--r--storage/bdb/test/test029.tcl245
-rw-r--r--storage/bdb/test/test030.tcl231
-rw-r--r--storage/bdb/test/test031.tcl230
-rw-r--r--storage/bdb/test/test032.tcl231
-rw-r--r--storage/bdb/test/test033.tcl176
-rw-r--r--storage/bdb/test/test034.tcl17
-rw-r--r--storage/bdb/test/test035.tcl16
-rw-r--r--storage/bdb/test/test036.tcl173
-rw-r--r--storage/bdb/test/test037.tcl196
-rw-r--r--storage/bdb/test/test038.tcl227
-rw-r--r--storage/bdb/test/test039.tcl211
-rw-r--r--storage/bdb/test/test040.tcl17
-rw-r--r--storage/bdb/test/test041.tcl17
-rw-r--r--storage/bdb/test/test042.tcl181
-rw-r--r--storage/bdb/test/test043.tcl192
-rw-r--r--storage/bdb/test/test044.tcl250
-rw-r--r--storage/bdb/test/test045.tcl123
-rw-r--r--storage/bdb/test/test046.tcl813
-rw-r--r--storage/bdb/test/test047.tcl258
-rw-r--r--storage/bdb/test/test048.tcl170
-rw-r--r--storage/bdb/test/test049.tcl184
-rw-r--r--storage/bdb/test/test050.tcl221
-rw-r--r--storage/bdb/test/test051.tcl219
-rw-r--r--storage/bdb/test/test052.tcl276
-rw-r--r--storage/bdb/test/test053.tcl225
-rw-r--r--storage/bdb/test/test054.tcl461
-rw-r--r--storage/bdb/test/test055.tcl141
-rw-r--r--storage/bdb/test/test056.tcl169
-rw-r--r--storage/bdb/test/test057.tcl248
-rw-r--r--storage/bdb/test/test058.tcl103
-rw-r--r--storage/bdb/test/test059.tcl150
-rw-r--r--storage/bdb/test/test060.tcl60
-rw-r--r--storage/bdb/test/test061.tcl226
-rw-r--r--storage/bdb/test/test062.tcl153
-rw-r--r--storage/bdb/test/test063.tcl174
-rw-r--r--storage/bdb/test/test064.tcl69
-rw-r--r--storage/bdb/test/test065.tcl199
-rw-r--r--storage/bdb/test/test066.tcl99
-rw-r--r--storage/bdb/test/test067.tcl155
-rw-r--r--storage/bdb/test/test068.tcl226
-rw-r--r--storage/bdb/test/test069.tcl14
-rw-r--r--storage/bdb/test/test070.tcl142
-rw-r--r--storage/bdb/test/test071.tcl16
-rw-r--r--storage/bdb/test/test072.tcl252
-rw-r--r--storage/bdb/test/test073.tcl290
-rw-r--r--storage/bdb/test/test074.tcl271
-rw-r--r--storage/bdb/test/test075.tcl205
-rw-r--r--storage/bdb/test/test076.tcl80
-rw-r--r--storage/bdb/test/test077.tcl93
-rw-r--r--storage/bdb/test/test078.tcl130
-rw-r--r--storage/bdb/test/test079.tcl20
-rw-r--r--storage/bdb/test/test080.tcl126
-rw-r--r--storage/bdb/test/test081.tcl15
-rw-r--r--storage/bdb/test/test082.tcl14
-rw-r--r--storage/bdb/test/test083.tcl162
-rw-r--r--storage/bdb/test/test084.tcl53
-rw-r--r--storage/bdb/test/test085.tcl332
-rw-r--r--storage/bdb/test/test086.tcl166
-rw-r--r--storage/bdb/test/test087.tcl290
-rw-r--r--storage/bdb/test/test088.tcl172
-rw-r--r--storage/bdb/test/test089.tcl180
-rw-r--r--storage/bdb/test/test090.tcl16
-rw-r--r--storage/bdb/test/test091.tcl20
-rw-r--r--storage/bdb/test/test092.tcl241
-rw-r--r--storage/bdb/test/test093.tcl393
-rw-r--r--storage/bdb/test/test094.tcl251
-rw-r--r--storage/bdb/test/test095.tcl296
-rw-r--r--storage/bdb/test/test096.tcl202
-rw-r--r--storage/bdb/test/test097.tcl188
-rw-r--r--storage/bdb/test/test098.tcl91
-rw-r--r--storage/bdb/test/test099.tcl177
-rw-r--r--storage/bdb/test/test100.tcl17
-rw-r--r--storage/bdb/test/test101.tcl17
-rw-r--r--storage/bdb/test/testparams.tcl194
-rw-r--r--storage/bdb/test/testutils.tcl3209
-rw-r--r--storage/bdb/test/txn001.tcl116
-rw-r--r--storage/bdb/test/txn002.tcl91
-rw-r--r--storage/bdb/test/txn003.tcl238
-rw-r--r--storage/bdb/test/txn004.tcl62
-rw-r--r--storage/bdb/test/txn005.tcl75
-rw-r--r--storage/bdb/test/txn006.tcl47
-rw-r--r--storage/bdb/test/txn007.tcl57
-rw-r--r--storage/bdb/test/txn008.tcl32
-rw-r--r--storage/bdb/test/txn009.tcl32
-rw-r--r--storage/bdb/test/txnscript.tcl67
-rw-r--r--storage/bdb/test/update.tcl93
-rw-r--r--storage/bdb/test/upgrade.tcl294
-rw-r--r--storage/bdb/test/wordlist10001
-rw-r--r--storage/bdb/test/wrap.tcl71
320 files changed, 61407 insertions, 0 deletions
diff --git a/storage/bdb/test/archive.tcl b/storage/bdb/test/archive.tcl
new file mode 100644
index 00000000000..9b5e764b2b4
--- /dev/null
+++ b/storage/bdb/test/archive.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: archive.tcl,v 11.20 2002/04/30 19:21:21 sue Exp $
+#
+# Options are:
+# -checkrec <checkpoint frequency"
+# -dir <dbhome directory>
+# -maxfilesize <maxsize of log file>
+proc archive { args } {
+ global alphabet
+ source ./include.tcl
+
+ # Set defaults
+ set maxbsize [expr 8 * 1024]
+ set maxfile [expr 32 * 1024]
+ set checkrec 500
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -c.* { incr i; set checkrec [lindex $args $i] }
+ -d.* { incr i; set testdir [lindex $args $i] }
+ -m.* { incr i; set maxfile [lindex $args $i] }
+ default {
+ puts "FAIL:[timestamp] archive usage"
+ puts "usage: archive -checkrec <checkpt freq> \
+ -dir <directory> -maxfilesize <max size of log files>"
+ return
+ }
+
+ }
+ }
+
+ # Clean out old log if it existed
+ puts "Archive: Log archive test"
+ puts "Unlinking log: error message OK"
+ env_cleanup $testdir
+
+ # Now run the various functionality tests
+ set eflags "-create -txn -home $testdir \
+ -log_buffer $maxbsize -log_max $maxfile"
+ set dbenv [eval {berkdb_env} $eflags]
+ error_check_bad dbenv $dbenv NULL
+ error_check_good dbenv [is_substr $dbenv env] 1
+
+ set logc [$dbenv log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $dbenv] TRUE
+
+ # The basic test structure here is that we write a lot of log
+ # records (enough to fill up 100 log files; each log file it
+ # small). We take periodic checkpoints. Between each pair
+ # of checkpoints, we refer to 2 files, overlapping them each
+ # checkpoint. We also start transactions and let them overlap
+ # checkpoints as well. The pattern that we try to create is:
+ # ---- write log records----|||||--- write log records ---
+ # -T1 T2 T3 --- D1 D2 ------CHECK--- CT1 --- D2 D3 CD1 ----CHECK
+ # where TX is begin transaction, CTx is commit transaction, DX is
+ # open data file and CDx is close datafile.
+
+ set baserec "1:$alphabet:2:$alphabet:3:$alphabet:4:$alphabet"
+ puts "\tArchive.a: Writing log records; checkpoint every $checkrec records"
+ set nrecs $maxfile
+ set rec 0:$baserec
+
+ # Begin transaction and write a log record
+ set t1 [$dbenv txn]
+ error_check_good t1:txn_begin [is_substr $t1 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ error_check_bad l1:log_put [llength $l1] 0
+
+ set lsnlist [list [lindex $l1 0]]
+
+ set t2 [$dbenv txn]
+ error_check_good t2:txn_begin [is_substr $t2 "txn"] 1
+
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set t3 [$dbenv txn]
+ set l1 [$dbenv log_put $rec]
+ lappend lsnlist [lindex $l1 0]
+
+ set txnlist [list $t1 $t2 $t3]
+ set db1 [eval {berkdb_open} "-create -mode 0644 -hash -env $dbenv ar1"]
+ set db2 [eval {berkdb_open} "-create -mode 0644 -btree -env $dbenv ar2"]
+ set dbcount 3
+ set dblist [list $db1 $db2]
+
+ for { set i 1 } { $i <= $nrecs } { incr i } {
+ set rec $i:$baserec
+ set lsn [$dbenv log_put $rec]
+ error_check_bad log_put [llength $lsn] 0
+ if { [expr $i % $checkrec] == 0 } {
+ # Take a checkpoint
+ $dbenv txn_checkpoint
+ set ckp_file [lindex [lindex [$logc get -last] 0] 0]
+ catch { archive_command -h $testdir -a } res_log_full
+ if { [string first db_archive $res_log_full] == 0 } {
+ set res_log_full ""
+ }
+ catch { archive_command -h $testdir } res_log
+ if { [string first db_archive $res_log] == 0 } {
+ set res_log ""
+ }
+ catch { archive_command -h $testdir -l } res_alllog
+ catch { archive_command -h $testdir -a -s } \
+ res_data_full
+ catch { archive_command -h $testdir -s } res_data
+ error_check_good nlogfiles [llength $res_alllog] \
+ [lindex [lindex [$logc get -last] 0] 0]
+ error_check_good logs_match [llength $res_log_full] \
+ [llength $res_log]
+ error_check_good data_match [llength $res_data_full] \
+ [llength $res_data]
+
+ # Check right number of log files
+ error_check_good nlogs [llength $res_log] \
+ [expr [lindex $lsnlist 0] - 1]
+
+ # Check that the relative names are a subset of the
+ # full names
+ set n 0
+ foreach x $res_log {
+ error_check_bad log_name_match:$res_log \
+ [string first $x \
+ [lindex $res_log_full $n]] -1
+ incr n
+ }
+
+ set n 0
+ foreach x $res_data {
+ error_check_bad log_name_match:$res_data \
+ [string first $x \
+ [lindex $res_data_full $n]] -1
+ incr n
+ }
+
+ # Begin/commit any transactions
+ set t [lindex $txnlist 0]
+ if { [string length $t] != 0 } {
+ error_check_good txn_commit:$t [$t commit] 0
+ set txnlist [lrange $txnlist 1 end]
+ }
+ set lsnlist [lrange $lsnlist 1 end]
+
+ if { [llength $txnlist] == 0 } {
+ set t1 [$dbenv txn]
+ error_check_bad tx_begin $t1 NULL
+ error_check_good \
+ tx_begin [is_substr $t1 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t2 [$dbenv txn]
+ error_check_bad tx_begin $t2 NULL
+ error_check_good \
+ tx_begin [is_substr $t2 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set t3 [$dbenv txn]
+ error_check_bad tx_begin $t3 NULL
+ error_check_good \
+ tx_begin [is_substr $t3 $dbenv] 1
+ set l1 [lindex [$dbenv log_put $rec] 0]
+ lappend lsnlist [min $l1 $ckp_file]
+
+ set txnlist [list $t1 $t2 $t3]
+ }
+
+ # Open/close some DB files
+ if { [expr $dbcount % 2] == 0 } {
+ set type "-hash"
+ } else {
+ set type "-btree"
+ }
+ set db [eval {berkdb_open} \
+ "-create -mode 0644 $type -env $dbenv ar$dbcount"]
+ error_check_bad db_open:$dbcount $db NULL
+ error_check_good db_open:$dbcount [is_substr $db db] 1
+ incr dbcount
+
+ lappend dblist $db
+ set db [lindex $dblist 0]
+ error_check_good db_close:$db [$db close] 0
+ set dblist [lrange $dblist 1 end]
+
+ }
+ }
+ # Commit any transactions still running.
+ puts "\tArchive.b: Commit any transactions still running."
+ foreach t $txnlist {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Close any files that are still open.
+ puts "\tArchive.c: Close open files."
+ foreach d $dblist {
+ error_check_good db_close:$db [$d close] 0
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor_close [$logc close] 0
+ reset_env $dbenv
+}
+
+proc archive_command { args } {
+ source ./include.tcl
+
+ # Catch a list of files output by db_archive.
+ catch { eval exec $util_path/db_archive $args } output
+
+ if { $is_windows_test == 1 || 1 } {
+ # On Windows, convert all filenames to use forward slashes.
+ regsub -all {[\\]} $output / output
+ }
+
+ # Output the [possibly-transformed] list.
+ return $output
+}
+
+proc min { a b } {
+ if {$a < $b} {
+ return $a
+ } else {
+ return $b
+ }
+}
diff --git a/storage/bdb/test/bigfile001.tcl b/storage/bdb/test/bigfile001.tcl
new file mode 100644
index 00000000000..78dcd940f5e
--- /dev/null
+++ b/storage/bdb/test/bigfile001.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile001.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile001
+# TEST Create a database greater than 4 GB in size. Close, verify.
+# TEST Grow the database somewhat. Close, reverify. Lather, rinse,
+# TEST repeat. Since it will not work on all systems, this test is
+# TEST not run by default.
+proc bigfile001 { method \
+ { itemsize 4096 } { nitems 1048576 } { growby 5000 } { growtms 2 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Bigfile: $method ($args) $nitems * $itemsize bytes of data"
+
+ env_cleanup $testdir
+
+ # Create the database. Use 64K pages; we want a good fill
+ # factor, and page size doesn't matter much. Use a 50MB
+ # cache; that should be manageable, and will help
+ # performance.
+ set dbname $testdir/big.db
+
+ set db [eval {berkdb_open -create} {-pagesize 65536 \
+ -cachesize {0 50000000 0}} $omethod $args $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts -nonewline "\tBigfile.a: Creating database...0%..."
+ flush stdout
+
+ set data [string repeat z $itemsize]
+
+ set more_than_ten_already 0
+ for { set i 0 } { $i < $nitems } { incr i } {
+ set key key[format %08u $i]
+
+ error_check_good db_put($i) [$db put $key $data] 0
+
+ if { $i % 5000 == 0 } {
+ set pct [expr 100 * $i / $nitems]
+ puts -nonewline "\b\b\b\b\b"
+ if { $pct >= 10 } {
+ if { $more_than_ten_already } {
+ puts -nonewline "\b"
+ } else {
+ set more_than_ten_already 1
+ }
+ }
+
+ puts -nonewline "$pct%..."
+ flush stdout
+ }
+ }
+ puts "\b\b\b\b\b\b100%..."
+ error_check_good db_close [$db close] 0
+
+ puts "\tBigfile.b: Verifying database..."
+ error_check_good verify \
+ [verify_dir $testdir "\t\t" 0 0 1 50000000] 0
+
+ puts "\tBigfile.c: Grow database $growtms times by $growby items"
+
+ for { set j 0 } { $j < $growtms } { incr j } {
+ set db [eval {berkdb_open} {-cachesize {0 50000000 0}} $dbname]
+ error_check_good db_open [is_valid_db $db] TRUE
+ puts -nonewline "\t\tBigfile.c.1: Adding $growby items..."
+ flush stdout
+ for { set i 0 } { $i < $growby } { incr i } {
+ set key key[format %08u $i].$j
+ error_check_good db_put($j.$i) [$db put $key $data] 0
+ }
+ error_check_good db_close [$db close] 0
+ puts "done."
+
+ puts "\t\tBigfile.c.2: Verifying database..."
+ error_check_good verify($j) \
+ [verify_dir $testdir "\t\t\t" 0 0 1 50000000] 0
+ }
+}
diff --git a/storage/bdb/test/bigfile002.tcl b/storage/bdb/test/bigfile002.tcl
new file mode 100644
index 00000000000..f3e6defeaba
--- /dev/null
+++ b/storage/bdb/test/bigfile002.tcl
@@ -0,0 +1,45 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: bigfile002.tcl,v 11.7 2002/08/10 13:39:26 bostic Exp $
+#
+# TEST bigfile002
+# TEST This one should be faster and not require so much disk space,
+# TEST although it doesn't test as extensively. Create an mpool file
+# TEST with 1K pages. Dirty page 6000000. Sync.
+proc bigfile002 { args } {
+ source ./include.tcl
+
+ puts -nonewline \
+ "Bigfile002: Creating large, sparse file through mpool..."
+ flush stdout
+
+ env_cleanup $testdir
+
+ # Create env.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good valid_env [is_valid_env $env] TRUE
+
+ # Create the file.
+ set name big002.file
+ set file [$env mpool -create -pagesize 1024 $name]
+
+ # Dirty page 6000000
+ set pg [$file get -create 6000000]
+ error_check_good pg_init [$pg init A] 0
+ error_check_good pg_set [$pg is_setto A] 1
+
+ # Put page back.
+ error_check_good pg_put [$pg put -dirty] 0
+
+ # Fsync.
+ error_check_good fsync [$file fsync] 0
+
+ puts "succeeded."
+
+ # Close.
+ error_check_good fclose [$file close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/byteorder.tcl b/storage/bdb/test/byteorder.tcl
new file mode 100644
index 00000000000..823ca46270d
--- /dev/null
+++ b/storage/bdb/test/byteorder.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: byteorder.tcl,v 11.12 2002/07/29 18:09:25 sue Exp $
+#
+# Byte Order Test
+# Use existing tests and run with both byte orders.
+proc byteorder { method {nentries 1000} } {
+ source ./include.tcl
+ puts "Byteorder: $method $nentries"
+
+ eval {test001 $method $nentries 0 "01" 0 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test001 $method $nentries 0 "01" 0 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test003 $method -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 10 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test010 $method $nentries 5 10 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 11 -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test011 $method $nentries 5 11 -lorder 4321}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 1234}
+ eval {verify_dir $testdir}
+ eval {test018 $method $nentries -lorder 4321}
+ eval {verify_dir $testdir}
+}
diff --git a/storage/bdb/test/conscript.tcl b/storage/bdb/test/conscript.tcl
new file mode 100644
index 00000000000..fd12c6e51a0
--- /dev/null
+++ b/storage/bdb/test/conscript.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: conscript.tcl,v 11.17 2002/03/22 21:43:06 krinsky Exp $
+#
+# Script for DB_CONSUME test (test070.tcl).
+# Usage: conscript dir file runtype nitems outputfile tnum args
+# dir: DBHOME directory
+# file: db file on which to operate
+# runtype: PRODUCE or CONSUME--which am I?
+# nitems: number of items to put or get
+# outputfile: where to log consumer results
+# tnum: test number
+
+proc consumescript_produce { db_cmd nitems tnum args } {
+ source ./include.tcl
+ global mydata
+
+ set pid [pid]
+ puts "\tTest0$tnum: Producer $pid starting, producing $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oret -1
+ set ret 0
+ for { set ndx 0 } { $ndx < $nitems } { incr ndx } {
+ set oret $ret
+ if { 0xffffffff > 0 && $oret > 0x7fffffff } {
+ incr oret [expr 0 - 0x100000000]
+ }
+ set ret [$db put -append [chop_data q $mydata]]
+ error_check_good db_put \
+ [expr $ret > 0 ? $oret < $ret : \
+ $oret < 0 ? $oret < $ret : $oret > $ret] 1
+
+ }
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest0$tnum: Producer $pid finished."
+}
+
+proc consumescript_consume { db_cmd nitems tnum outputfile mode args } {
+ source ./include.tcl
+ global mydata
+ set pid [pid]
+ puts "\tTest0$tnum: Consumer $pid starting, seeking $nitems items."
+
+ set db [eval $db_cmd]
+ error_check_good db_open:$pid [is_valid_db $db] TRUE
+
+ set oid [open $outputfile w]
+
+ for { set ndx 0 } { $ndx < $nitems } { } {
+ set ret [$db get $mode]
+ if { [llength $ret] > 0 } {
+ error_check_good correct_data:$pid \
+ [lindex [lindex $ret 0] 1] [pad_data q $mydata]
+ set rno [lindex [lindex $ret 0] 0]
+ puts $oid $rno
+ incr ndx
+ } else {
+ # No data to consume; wait.
+ }
+ }
+
+ error_check_good output_close:$pid [close $oid] ""
+
+ set ret [catch {$db close} res]
+ error_check_good db_close:$pid $ret 0
+ puts "\t\tTest0$tnum: Consumer $pid finished."
+}
+
+source ./include.tcl
+source $test_path/test.tcl
+
+# Verify usage
+if { $argc < 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+set usage "conscript.tcl dir file runtype nitems outputfile tnum"
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set file [lindex $argv 1]
+set runtype [lindex $argv 2]
+set nitems [lindex $argv 3]
+set outputfile [lindex $argv 4]
+set tnum [lindex $argv 5]
+# args is the string "{ -len 20 -pad 0}", so we need to extract the
+# " -len 20 -pad 0" part.
+set args [lindex [lrange $argv 6 end] 0]
+
+set mydata "consumer data"
+
+# Open env
+set dbenv [berkdb_env -home $dir ]
+error_check_good db_env_create [is_valid_env $dbenv] TRUE
+
+# Figure out db opening command.
+set db_cmd [concat {berkdb_open -create -mode 0644 -queue -env}\
+ $dbenv $args $file]
+
+# Invoke consumescript_produce or consumescript_consume based on $runtype
+if { $runtype == "PRODUCE" } {
+ # Producers have nothing to log; make sure outputfile is null.
+ error_check_good no_producer_outputfile $outputfile ""
+ consumescript_produce $db_cmd $nitems $tnum $args
+} elseif { $runtype == "CONSUME" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume $args
+} elseif { $runtype == "WAIT" } {
+ consumescript_consume $db_cmd $nitems $tnum $outputfile -consume_wait \
+ $args
+} else {
+ error_check_good bad_args $runtype "either PRODUCE, CONSUME or WAIT"
+}
+error_check_good env_close [$dbenv close] 0
+exit
diff --git a/storage/bdb/test/dbm.tcl b/storage/bdb/test/dbm.tcl
new file mode 100644
index 00000000000..a392c7a9f3a
--- /dev/null
+++ b/storage/bdb/test/dbm.tcl
@@ -0,0 +1,128 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dbm.tcl,v 11.15 2002/01/11 15:53:19 bostic Exp $
+#
+# TEST dbm
+# TEST Historic DBM interface test. Use the first 1000 entries from the
+# TEST dictionary. Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Then reopen the file, re-retrieve everything. Finally, delete
+# TEST everything.
+proc dbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "DBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/dbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good dbminit [berkdb dbminit $testfile] 0
+ set did [open $dict]
+
+ set flags ""
+ set txn ""
+ set count 0
+ set skippednullkey 0
+
+ puts "\tDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # DBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [berkdb store $str $str]
+ error_check_good dbm_store $ret 0
+
+ set d [berkdb fetch $str]
+ error_check_good dbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tDBM.c: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good dbminit2 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set d [berkdb fetch $key]
+ error_check_good dbm_refetch $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tDBM.d: sequential scan and delete"
+
+ error_check_good dbminit3 [berkdb dbminit $testfile] 0
+ set oid [open $t1 w]
+
+ for { set key [berkdb firstkey] } { $key != -1 } {\
+ set key [berkdb nextkey $key] } {
+ puts $oid $key
+ set ret [berkdb delete $key]
+ error_check_good dbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good DBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ error_check_good "dbm_close" [berkdb dbmclose] 0
+}
diff --git a/storage/bdb/test/dbscript.tcl b/storage/bdb/test/dbscript.tcl
new file mode 100644
index 00000000000..5decc493e9e
--- /dev/null
+++ b/storage/bdb/test/dbscript.tcl
@@ -0,0 +1,357 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dbscript.tcl,v 11.14 2002/04/01 16:28:16 bostic Exp $
+#
+# Random db tester.
+# Usage: dbscript file numops min_del max_add key_avg data_avgdups
+# method: method (we pass this in so that fixed-length records work)
+# file: db file on which to operate
+# numops: number of operations to do
+# ncurs: number of cursors
+# min_del: minimum number of keys before you disable deletes.
+# max_add: maximum number of keys before you disable adds.
+# key_avg: average key size
+# data_avg: average data size
+# dups: 1 indicates dups allowed, 0 indicates no dups
+# errpct: What percent of operations should generate errors
+# seed: Random number generator seed (-1 means use pid)
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "dbscript file numops ncurs min_del max_add key_avg data_avg dups errpcnt"
+
+# Verify usage
+if { $argc != 10 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set file [lindex $argv 1]
+set numops [ lindex $argv 2 ]
+set ncurs [ lindex $argv 3 ]
+set min_del [ lindex $argv 4 ]
+set max_add [ lindex $argv 5 ]
+set key_avg [ lindex $argv 6 ]
+set data_avg [ lindex $argv 7 ]
+set dups [ lindex $argv 8 ]
+set errpct [ lindex $argv 9 ]
+
+berkdb srand $rand_init
+
+puts "Beginning execution for [pid]"
+puts "$file database"
+puts "$numops Operations"
+puts "$ncurs cursors"
+puts "$min_del keys before deletes allowed"
+puts "$max_add or fewer keys to add"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+if { $dups != 1 } {
+ puts "No dups"
+} else {
+ puts "Dups allowed"
+}
+puts "$errpct % Errors"
+
+flush stdout
+
+set db [berkdb_open $file]
+set cerr [catch {error_check_good dbopen [is_substr $db db] 1} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+# set method [$db get_type]
+set record_based [is_record_based $method]
+
+# Initialize globals including data
+global nkeys
+global l_keys
+global a_keys
+
+set nkeys [db_init $db 1]
+puts "Initial number of keys: $nkeys"
+
+set pflags ""
+set gflags ""
+set txn ""
+
+# Open the cursors
+set curslist {}
+for { set i 0 } { $i < $ncurs } { incr i } {
+ set dbc [$db cursor]
+ set cerr [catch {error_check_good dbopen [is_substr $dbc $db.c] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ set cerr [catch {error_check_bad cursor_create $dbc NULL} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ lappend curslist $dbc
+
+}
+
+# On each iteration we're going to generate random keys and
+# data. We'll select either a get/put/delete operation unless
+# we have fewer than min_del keys in which case, delete is not
+# an option or more than max_add in which case, add is not
+# an option. The tcl global arrays a_keys and l_keys keep track
+# of key-data pairs indexed by key and a list of keys, accessed
+# by integer.
+set adds 0
+set puts 0
+set gets 0
+set dels 0
+set bad_adds 0
+set bad_puts 0
+set bad_gets 0
+set bad_dels 0
+
+for { set iter 0 } { $iter < $numops } { incr iter } {
+ set op [pick_op $min_del $max_add $nkeys]
+ set err [is_err $errpct]
+
+ # The op0's indicate that there aren't any duplicates, so we
+ # exercise regular operations. If dups is 1, then we'll use
+ # cursor ops.
+ switch $op$dups$err {
+ add00 {
+ incr adds
+
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ newpair $k [pad_data $method $data]
+ }
+ add01 {
+ incr bad_adds
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ add10 {
+ incr adds
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ if { [berkdb random_int 1 2] == 1 } {
+ # Add a new key
+ set k [random_data $key_avg 1 a_keys \
+ $record_based]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn \
+ {-keyfirst $k $data}]
+ newpair $k [pad_data $method $data]
+ } else {
+ # Add a new duplicate
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+
+ set op [pick_cursput]
+ set data [chop_data $method $data]
+ set ret [eval {$dbc put} $txn {$op $k $data}]
+ adddup $k [lindex $dbcinfo 2] $data
+ }
+ }
+ add11 {
+ # TODO
+ incr bad_adds
+ set ret 1
+ }
+ put00 {
+ incr puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn {$k $data}]
+ changepair $k [pad_data $method $data]
+ }
+ put01 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $k $data}]
+ set cerr [catch {error_check_good put $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ put10 {
+ incr puts
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set k [lindex $dbcinfo 1]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+
+ set ret [eval {$dbc put} $txn {-current $data}]
+ changedup $k [lindex $dbcinfo 2] $data
+ }
+ put11 {
+ incr bad_puts
+ set k [random_key]
+ set data [random_data $data_avg 0 0]
+ set data [chop_data $method $data]
+ set dbc [$db cursor]
+ set ret [eval {$dbc put} $txn {-current $data}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ get00 {
+ incr gets
+ set k [random_key]
+ set val [eval {$db get} $txn {$k}]
+ set data [pad_data $method [lindex [lindex $val 0] 1]]
+ if { $data == $a_keys($k) } {
+ set ret 0
+ } else {
+ set ret "FAIL: Error got |$data| expected |$a_keys($k)|"
+ }
+ # Get command requires no state change
+ }
+ get01 {
+ incr bad_gets
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db get} $txn {$k}]
+ # Error case so no change to data state
+ }
+ get10 {
+ incr gets
+ set dbcinfo [random_cursor $curslist]
+ if { [llength $dbcinfo] == 3 } {
+ set ret 0
+ else
+ set ret 0
+ }
+ # Get command requires no state change
+ }
+ get11 {
+ incr bad_gets
+ set k [random_key]
+ set dbc [$db cursor]
+ if { [berkdb random_int 1 2] == 1 } {
+ set dir -next
+ } else {
+ set dir -prev
+ }
+ set ret [eval {$dbc get} $txn {-next $k}]
+ set cerr [catch {error_check_good curs_close \
+ [$dbc close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error and get case so no change to data state
+ }
+ del00 {
+ incr dels
+ set k [random_key]
+ set ret [eval {$db del} $txn {$k}]
+ rempair $k
+ }
+ del01 {
+ incr bad_dels
+ set k [random_data $key_avg 1 a_keys $record_based]
+ set ret [eval {$db del} $txn {$k}]
+ # Error case so no change to data state
+ }
+ del10 {
+ incr dels
+ set dbcinfo [random_cursor $curslist]
+ set dbc [lindex $dbcinfo 0]
+ set ret [eval {$dbc del} $txn]
+ remdup [lindex dbcinfo 1] [lindex dbcinfo 2]
+ }
+ del11 {
+ incr bad_dels
+ set c [$db cursor]
+ set ret [eval {$c del} $txn]
+ set cerr [catch {error_check_good curs_close \
+ [$c close] 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ # Error case so no change to data state
+ }
+ }
+ if { $err == 1 } {
+ # Verify failure.
+ set cerr [catch {error_check_good $op$dups$err:$k \
+ [is_substr Error $ret] 1} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ } else {
+ # Verify success
+ set cerr [catch {error_check_good $op$dups$err:$k $ret 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+ }
+
+ flush stdout
+}
+
+# Close cursors and file
+foreach i $curslist {
+ set r [$i close]
+ set cerr [catch {error_check_good cursor_close:$i $r 0} cret]
+ if {$cerr != 0} {
+ puts $cret
+ return
+ }
+}
+
+set r [$db close]
+set cerr [catch {error_check_good db_close:$db $r 0} cret]
+if {$cerr != 0} {
+ puts $cret
+ return
+}
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: $adds adds $gets gets $puts puts $dels dels"
+puts "Error ops: $bad_adds adds $bad_gets gets $bad_puts puts $bad_dels dels"
+flush stdout
+
+filecheck $file $txn
+
+exit
diff --git a/storage/bdb/test/ddoyscript.tcl b/storage/bdb/test/ddoyscript.tcl
new file mode 100644
index 00000000000..5478a1a98e0
--- /dev/null
+++ b/storage/bdb/test/ddoyscript.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ddoyscript.tcl,v 11.6 2002/02/20 16:35:18 sandstro Exp $
+#
+# Deadlock detector script tester.
+# Usage: ddoyscript dir lockerid numprocs
+# dir: DBHOME directory
+# lockerid: Lock id for this locker
+# numprocs: Total number of processes running
+# myid: id of this process --
+# the order that the processes are created is the same
+# in which their lockerid's were allocated so we know
+# that there is a locker age relationship that is isomorphic
+# with the order releationship of myid's.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddoyscript dir lockerid numprocs oldoryoung"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set lockerid [ lindex $argv 1 ]
+set numprocs [ lindex $argv 2 ]
+set old_or_young [lindex $argv 3]
+set myid [lindex $argv 4]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+# There are two cases here -- oldest/youngest or a ring locker.
+
+if { $myid == 0 || $myid == [expr $numprocs - 1] } {
+ set waitobj NULL
+ set ret 0
+
+ if { $myid == 0 } {
+ set objid 2
+ if { $old_or_young == "o" } {
+ set waitobj [expr $numprocs - 1]
+ }
+ } else {
+ if { $old_or_young == "y" } {
+ set waitobj 0
+ }
+ set objid 4
+ }
+
+ # Acquire own read lock
+ if {[catch {$myenv lock_get read $lockerid $myid} selflock] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good selfget:$objid [is_substr $selflock $myenv] 1
+ }
+
+ # Acquire read lock
+ if {[catch {$myenv lock_get read $lockerid $objid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$objid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 10
+
+ if { $waitobj == "NULL" } {
+ # Sleep for a good long while
+ tclsleep 90
+ } else {
+ # Acquire write lock
+ if {[catch {$myenv lock_get write $lockerid $waitobj} lock2]
+ != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockget:$waitobj \
+ [is_substr $lock2 $myenv] 1
+
+ # Now release it
+ if {[catch {$lock2 put} err] != 0} {
+ puts $errorInfo
+ set ret ERROR
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ }
+ }
+
+ }
+
+ # Release self lock
+ if {[catch {$selflock put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good selfput:oy:$myid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+ # Release first lock
+ if {[catch {$lock1 put} err] != 0} {
+ puts $errorInfo
+ if { $ret == 0 } {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockput:oy:$objid $err 0
+ if { $ret == 0 } {
+ set ret 1
+ }
+ }
+
+} else {
+ # Make sure that we succeed if we're locking the same object as
+ # oldest or youngest.
+ if { [expr $myid % 2] == 0 } {
+ set mode read
+ } else {
+ set mode write
+ }
+ # Obtain first lock (should always succeed).
+ if {[catch {$myenv lock_get $mode $lockerid $myid} lock1] != 0} {
+ puts $errorInfo
+ } else {
+ error_check_good lockget:$myid [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+
+ set nextobj [expr $myid + 1]
+ if { $nextobj == [expr $numprocs - 1] } {
+ set nextobj 1
+ }
+
+ set ret 1
+ if {[catch {$myenv lock_get write $lockerid $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$nextobj $lock2 NULL
+ error_check_good lockget:$nextobj [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+}
+
+puts $ret
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+exit
diff --git a/storage/bdb/test/ddscript.tcl b/storage/bdb/test/ddscript.tcl
new file mode 100644
index 00000000000..621906233a9
--- /dev/null
+++ b/storage/bdb/test/ddscript.tcl
@@ -0,0 +1,44 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ddscript.tcl,v 11.12 2002/02/20 16:35:18 sandstro Exp $
+#
+# Deadlock detector script tester.
+# Usage: ddscript dir test lockerid objid numprocs
+# dir: DBHOME directory
+# test: Which test to run
+# lockerid: Lock id for this locker
+# objid: Object id to lock.
+# numprocs: Total number of processes running
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "ddscript dir test lockerid objid numprocs"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set tnum [ lindex $argv 1 ]
+set lockerid [ lindex $argv 2 ]
+set objid [ lindex $argv 3 ]
+set numprocs [ lindex $argv 4 ]
+
+set myenv [berkdb_env -lock -home $dir -create -mode 0644 ]
+error_check_bad lock_open $myenv NULL
+error_check_good lock_open [is_substr $myenv "env"] 1
+
+puts [eval $tnum $myenv $lockerid $objid $numprocs]
+
+error_check_good lock_id_free [$myenv lock_id_free $lockerid] 0
+error_check_good envclose [$myenv close] 0
+
+exit
diff --git a/storage/bdb/test/dead001.tcl b/storage/bdb/test/dead001.tcl
new file mode 100644
index 00000000000..e9853a87e53
--- /dev/null
+++ b/storage/bdb/test/dead001.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead001.tcl,v 11.33 2002/09/05 17:23:05 sandstro Exp $
+#
+# TEST dead001
+# TEST Use two different configurations to test deadlock detection among a
+# TEST variable number of processes. One configuration has the processes
+# TEST deadlocked in a ring. The other has the processes all deadlocked on
+# TEST a single resource.
+proc dead001 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum "001"} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "Dead$tnum: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set env [berkdb_env -create \
+ -mode 0644 -lock -txn_timeout $timeout -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ if {$timeout == 0 } {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -h $testdir >& $testdir/dd.out &]
+ } else {
+ set dpid [exec $util_path/db_deadlock -vw \
+ -ae -h $testdir >& $testdir/dd.out &]
+ }
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ # Windows needs files closed before deleting files, so pause a little
+ tclsleep 3
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/storage/bdb/test/dead002.tcl b/storage/bdb/test/dead002.tcl
new file mode 100644
index 00000000000..bc19e7127e5
--- /dev/null
+++ b/storage/bdb/test/dead002.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead002.tcl,v 11.23 2002/09/05 17:23:05 sandstro Exp $
+#
+# TEST dead002
+# TEST Same test as dead001, but use "detect on every collision" instead
+# TEST of separate deadlock detector.
+proc dead002 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 0} {tnum 002} } {
+ source ./include.tcl
+
+ puts "Dead$tnum: Deadlock detector tests"
+
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead$tnum.a: creating environment"
+ set lmode "default"
+ if { $timeout != 0 } {
+ set lmode "expire"
+ }
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir \
+ -lock -txn_timeout $timeout -lock_detect $lmode]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+
+ # Fire off the tests
+ puts "\tDead$tnum: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead$tnum.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead$tnum.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead$tnum.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n $timeout $dead $clean $other
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead$tnum.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+}
diff --git a/storage/bdb/test/dead003.tcl b/storage/bdb/test/dead003.tcl
new file mode 100644
index 00000000000..48088e1427c
--- /dev/null
+++ b/storage/bdb/test/dead003.tcl
@@ -0,0 +1,98 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead003.tcl,v 1.17 2002/09/05 17:23:05 sandstro Exp $
+#
+# TEST dead003
+# TEST
+# TEST Same test as dead002, but explicitly specify DB_LOCK_OLDEST and
+# TEST DB_LOCK_YOUNGEST. Verify the correct lock was aborted/granted.
+proc dead003 { { procs "2 4 10" } {tests "ring clump" } } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set detects { oldest youngest }
+ puts "Dead003: Deadlock detector tests: $detects"
+
+ # Create the environment.
+ foreach d $detects {
+ env_cleanup $testdir
+ puts "\tDead003.a: creating environment for $d"
+ set env [berkdb_env \
+ -create -mode 0644 -home $testdir -lock -lock_detect $d]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ foreach t $tests {
+ foreach n $procs {
+ set pidlist ""
+ sentinel_init
+ set ret [$env lock_id_set \
+ $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead003: $n procs of test $t"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path\
+ test_path/ddscript.tcl $testdir \
+ $t $locker $i $n >& \
+ $testdir/dead003.log.$i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl \
+ $testdir/dead003.log.$i $testdir \
+ $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead003.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ dead_check $t $n 0 $dead $clean $other
+ #
+ # If we get here we know we have the
+ # correct number of dead/clean procs, as
+ # checked by dead_check above. Now verify
+ # that the right process was the one.
+ puts "\tDead003: Verify $d locks were aborted"
+ set l ""
+ if { $d == "oldest" } {
+ set l [expr $n - 1]
+ }
+ if { $d == "youngest" } {
+ set l 0
+ }
+ set did [open $testdir/dead003.log.$l]
+ while { [gets $did val] != -1 } {
+ error_check_good check_abort \
+ $val 1
+ }
+ close $did
+ }
+ }
+
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead003.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/storage/bdb/test/dead004.tcl b/storage/bdb/test/dead004.tcl
new file mode 100644
index 00000000000..f5306a0d892
--- /dev/null
+++ b/storage/bdb/test/dead004.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead004.tcl,v 11.11 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 4.
+# This test is designed to make sure that we handle youngest and oldest
+# deadlock detection even when the youngest and oldest transactions in the
+# system are not involved in the deadlock (that is, we want to abort the
+# youngest/oldest which is actually involved in the deadlock, not simply
+# the youngest/oldest in the system).
+# Since this is used for transaction systems, the locker ID is what we
+# use to identify age (smaller number is older).
+#
+# The set up is that we have a total of 6 processes. The oldest (locker 0)
+# and the youngest (locker 5) simply acquire a lock, hold it for a long time
+# and then release it. The rest form a ring, obtaining lock N and requesting
+# a lock on (N+1) mod 4. The deadlock detector ought to pick locker 1 or 4
+# to abort and not 0 or 5.
+
+proc dead004 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ foreach a { o y } {
+ puts "Dead004: Deadlock detector test -a $a"
+ env_cleanup $testdir
+
+ # Create the environment.
+ puts "\tDead004.a: creating environment"
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ set dpid [exec $util_path/db_deadlock -v -t 5 -a $a \
+ -h $testdir >& $testdir/dd.out &]
+
+ set procs 6
+
+ foreach n $procs {
+
+ sentinel_init
+ set pidlist ""
+ set ret [$env lock_id_set $lock_curid $lock_maxid]
+ error_check_good lock_id_set $ret 0
+
+ # Fire off the tests
+ puts "\tDead004: $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead004.log.$i \
+ ddoyscript.tcl $testdir $locker $n $a $i"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddoyscript.tcl $testdir/dead004.log.$i \
+ $testdir $locker $n $a $i &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ }
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead004.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+
+ puts "dead check..."
+ dead_check oldyoung $n 0 $dead $clean $other
+
+ # Now verify that neither the oldest nor the
+ # youngest were the deadlock.
+ set did [open $testdir/dead004.log.0]
+ error_check_bad file:young [gets $did val] -1
+ error_check_good read:young $val 1
+ close $did
+
+ set did [open $testdir/dead004.log.[expr $procs - 1]]
+ error_check_bad file:old [gets $did val] -1
+ error_check_good read:old $val 1
+ close $did
+
+ # Windows needs files closed before deleting files,
+ # so pause a little
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead004.log.$i
+ }
+ error_check_good lock_env:close [$env close] 0
+ }
+}
diff --git a/storage/bdb/test/dead005.tcl b/storage/bdb/test/dead005.tcl
new file mode 100644
index 00000000000..71be8b1713f
--- /dev/null
+++ b/storage/bdb/test/dead005.tcl
@@ -0,0 +1,87 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead005.tcl,v 11.10 2002/09/05 17:23:05 sandstro Exp $
+#
+# Deadlock Test 5.
+# Test out the minlocks, maxlocks, and minwrites options
+# to the deadlock detector.
+proc dead005 { { procs "4 6 10" } {tests "maxlocks minwrites minlocks" } } {
+ source ./include.tcl
+
+ puts "Dead005: minlocks, maxlocks, and minwrites deadlock detection tests"
+ foreach t $tests {
+ puts "Dead005.$t: creating environment"
+ env_cleanup $testdir
+
+ # Create the environment.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+ case $t {
+ minlocks { set to n }
+ maxlocks { set to m }
+ minwrites { set to w }
+ }
+ foreach n $procs {
+ set dpid [exec $util_path/db_deadlock -vw -h $testdir \
+ -a $to >& $testdir/dd.out &]
+ sentinel_init
+ set pidlist ""
+
+ # Fire off the tests
+ puts "\tDead005: $t test with $n procs"
+ for { set i 0 } { $i < $n } { incr i } {
+ set locker [$env lock_id]
+ puts "$tclsh_path $test_path/wrap.tcl \
+ $testdir/dead005.log.$i \
+ ddscript.tcl $testdir $t $locker $i $n"
+ set p [exec $tclsh_path \
+ $test_path/wrap.tcl \
+ ddscript.tcl $testdir/dead005.log.$i \
+ $testdir $t $locker $i $n &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ # Now check output
+ set dead 0
+ set clean 0
+ set other 0
+ for { set i 0 } { $i < $n } { incr i } {
+ set did [open $testdir/dead005.log.$i]
+ while { [gets $did val] != -1 } {
+ switch $val {
+ DEADLOCK { incr dead }
+ 1 { incr clean }
+ default { incr other }
+ }
+ }
+ close $did
+ }
+ tclkill $dpid
+ puts "dead check..."
+ dead_check $t $n 0 $dead $clean $other
+ # Now verify that the correct participant
+ # got deadlocked.
+ switch $t {
+ minlocks {set f 0}
+ minwrites {set f 1}
+ maxlocks {set f [expr $n - 1]}
+ }
+ set did [open $testdir/dead005.log.$f]
+ error_check_bad file:$t [gets $did val] -1
+ error_check_good read($f):$t $val DEADLOCK
+ close $did
+ }
+ error_check_good lock_env:close [$env close] 0
+ # Windows needs files closed before deleting them, so pause
+ tclsleep 2
+ fileremove -f $testdir/dd.out
+ # Remove log files
+ for { set i 0 } { $i < $n } { incr i } {
+ fileremove -f $testdir/dead001.log.$i
+ }
+ }
+}
diff --git a/storage/bdb/test/dead006.tcl b/storage/bdb/test/dead006.tcl
new file mode 100644
index 00000000000..b70e011fb74
--- /dev/null
+++ b/storage/bdb/test/dead006.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead006.tcl,v 1.4 2002/01/11 15:53:21 bostic Exp $
+#
+# TEST dead006
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead006 { { procs "2 4 10" } {tests "ring clump" } \
+ {timeout 1000} {tnum 006} } {
+ source ./include.tcl
+
+ dead001 $procs $tests $timeout $tnum
+ dead002 $procs $tests $timeout $tnum
+}
diff --git a/storage/bdb/test/dead007.tcl b/storage/bdb/test/dead007.tcl
new file mode 100644
index 00000000000..2b6a78cb4b9
--- /dev/null
+++ b/storage/bdb/test/dead007.tcl
@@ -0,0 +1,34 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: dead007.tcl,v 1.3 2002/01/11 15:53:22 bostic Exp $
+#
+# TEST dead007
+# TEST use timeouts rather than the normal dd algorithm.
+proc dead007 { } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+ puts "Dead007.a -- wrap around"
+ set lock_curid [expr $lock_maxid - 2]
+ dead001 "2 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "4 10"
+ dead004
+
+ puts "Dead007.b -- extend space"
+ set lock_maxid [expr $lock_maxid - 3]
+ set lock_curid [expr $lock_maxid - 1]
+ dead001 "4 10"
+ ## Oldest/youngest breaks when the id wraps
+ # dead003 "10"
+ dead004
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/storage/bdb/test/env001.tcl b/storage/bdb/test/env001.tcl
new file mode 100644
index 00000000000..781029f6a5c
--- /dev/null
+++ b/storage/bdb/test/env001.tcl
@@ -0,0 +1,154 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env001.tcl,v 11.26 2002/05/08 19:01:43 margo Exp $
+#
+# TEST env001
+# TEST Test of env remove interface (formerly env_remove).
+proc env001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile $testdir/env.db
+ set t1 $testdir/t1
+
+ puts "Env001: Test of environment remove interface."
+ env_cleanup $testdir
+
+ # Try opening without Create flag should error
+ puts "\tEnv001.a: Open without create (should fail)."
+ catch {set env [berkdb_env_noerr -home $testdir]} ret
+ error_check_good env:fail [is_substr $ret "no such file"] 1
+
+ # Now try opening with create
+ puts "\tEnv001.b: Open with create."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # Make sure that close works.
+ puts "\tEnv001.c: Verify close."
+ error_check_good env:close:$env [$env close] 0
+
+ # Make sure we can reopen -- this doesn't work on Windows
+ # because if there is only one opener, the region disappears
+ # when it is closed. We can't do a second opener, because
+ # that will fail on HP-UX.
+ puts "\tEnv001.d: Remove on closed environments."
+ if { $is_windows_test != 1 } {
+ puts "\t\tEnv001.d.1: Verify re-open."
+ set env [berkdb_env -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ # remove environment
+ puts "\t\tEnv001.d.2: Close environment."
+ error_check_good env:close [$env close] 0
+ puts "\t\tEnv001.d.3: Try remove with force (should succeed)."
+ error_check_good \
+ envremove [berkdb envremove -force -home $testdir] 0
+ }
+
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ puts "\tEnv001.e: Remove on open environments."
+ puts "\t\tEnv001.e.1: Env is open by single proc,\
+ remove no force."
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -home $testdir} ret]
+ error_check_good env:remove $stat 1
+ error_check_good env:close [$env close] 0
+ }
+
+ puts \
+ "\t\tEnv001.e.2: Env is open by single proc, remove with force."
+ # Now that envremove doesn't do a close, this won't work on Windows.
+ if { $is_windows_test != 1 && $is_hp_test != 1} {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set stat [catch {berkdb envremove -force -home $testdir} ret]
+ error_check_good env:remove(force) $ret 0
+ #
+ # Even though the underlying env is gone, we need to close
+ # the handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_remove $stat 0
+ error_check_good env:close_after_remove \
+ [is_substr $ret "recovery"] 1
+ }
+
+ puts "\t\tEnv001.e.3: Env is open by 2 procs, remove no force."
+ # should fail
+ set env [berkdb_env -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env_noerr -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+ # First close our env, but leave remote open
+ error_check_good env:close [$env close] 0
+ catch {berkdb envremove -home $testdir} ret
+ error_check_good envremove:2procs:noforce [is_substr $errorCode EBUSY] 1
+ #
+ # even though it failed, $env is no longer valid, so remove it in
+ # the remote process
+ set remote_close [send_cmd $f1 "$remote_env close"]
+ error_check_good remote_close $remote_close 0
+
+ # exit remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+
+ puts "\t\tEnv001.e.4: Env is open by 2 procs, remove with force."
+ # You cannot do this on windows because you can't remove files that
+ # are open, so we skip this test for Windows. On UNIX, it should
+ # succeed
+ if { $is_windows_test != 1 && $is_hp_test != 1 } {
+ set env [berkdb_env_noerr -create -mode 0644 -home $testdir]
+ error_check_bad env:$testdir $env NULL
+ error_check_good env:$testdir [is_substr $env "env"] 1
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 "berkdb_env -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ catch {berkdb envremove -force -home $testdir} ret
+ error_check_good envremove:2procs:force $ret 0
+ #
+ # We still need to close our handle.
+ #
+ set stat [catch {$env close} ret]
+ error_check_bad env:close_after_error $stat 0
+ error_check_good env:close_after_error \
+ [is_substr $ret recovery] 1
+
+ # Close down remote process
+ set err [catch { close $f1 } result]
+ error_check_good close_remote_process $err 0
+ }
+
+ # Try opening in a different dir
+ puts "\tEnv001.f: Try opening env in another directory."
+ if { [file exists $testdir/NEWDIR] != 1 } {
+ file mkdir $testdir/NEWDIR
+ }
+ set eflags "-create -home $testdir/NEWDIR -mode 0644"
+ set env [eval {berkdb_env} $eflags]
+ error_check_bad env:open $env NULL
+ error_check_good env:close [$env close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/NEWDIR] 0
+
+ puts "\tEnv001 complete."
+}
diff --git a/storage/bdb/test/env002.tcl b/storage/bdb/test/env002.tcl
new file mode 100644
index 00000000000..89c44f63a12
--- /dev/null
+++ b/storage/bdb/test/env002.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env002.tcl,v 11.15 2002/02/20 16:35:20 sandstro Exp $
+#
+# TEST env002
+# TEST Test of DB_LOG_DIR and env name resolution.
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the set_lg_dir option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the DB_LOG_DIR db_config argument is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- db_config and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env002 { } {
+ # env002 is essentially just a small driver that runs
+ # env002_body--formerly the entire test--twice; once, it
+ # supplies a "home" argument to use with environment opens,
+ # and the second time it sets DB_HOME instead.
+ # Note that env002_body itself calls env002_run_test to run
+ # the body of the actual test and check for the presence
+ # of logs. The nesting, I hope, makes this test's structure simpler.
+
+ global env
+ source ./include.tcl
+
+ puts "Env002: set_lg_dir test."
+
+ puts "\tEnv002: Running with -home argument to berkdb_env."
+ env002_body "-home $testdir"
+
+ puts "\tEnv002: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env002_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv002: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env002_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+
+}
+
+proc env002_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set logdir "logs_in_here"
+
+ file mkdir $testdir/$logdir
+
+ # Set up full path to $logdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$logdir
+ set fulllogdir [pwd]
+ cd $curdir
+
+ env002_make_config $logdir
+
+ # Run the meat of the test.
+ env002_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # Run the test again
+ env002_run_test a 2 "absolute path, config file" $home_arg \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ file mkdir $testdir/$logdir
+ env002_run_test b 1 "relative path, db_config" "$home_arg \
+ -log_dir $logdir -data_dir ." \
+ $testdir/$logdir
+
+ env_cleanup $testdir
+
+ # absolute
+ file mkdir $fulllogdir
+ env002_run_test b 2 "absolute path, db_config" "$home_arg \
+ -log_dir $fulllogdir -data_dir ." \
+ $fulllogdir
+
+ env_cleanup $testdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/$logdir
+ env002_make_config $logdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -log_dir $testdir/bogus \
+ -data_dir ." $testdir/$logdir
+ env_cleanup $testdir
+
+ file mkdir $fulllogdir
+ env002_make_config $fulllogdir
+
+ # note that we supply a -nonexistent- log dir to db_config
+ env002_run_test c 2 "relative path, both db_config and file" \
+ "$home_arg -log_dir $fulllogdir/bogus \
+ -data_dir ." $fulllogdir
+}
+
+proc env002_run_test { major minor msg env_args log_path} {
+ global testdir
+ set testfile "env002.db"
+
+ puts "\t\tEnv002.$major.$minor: $msg"
+
+ # Create an environment, with logging, and scribble some
+ # stuff in a [btree] database in it.
+ # puts [concat {berkdb_env -create -log -private} $env_args]
+ set dbenv [eval {berkdb_env -create -log -private} $env_args]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+ set db [berkdb_open -env $dbenv -create -btree -mode 0644 $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set key "some_key"
+ set data "some_data"
+
+ error_check_good db_put \
+ [$db put $key [chop_data btree $data]] 0
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now make sure the log file is where we want it to be.
+ error_check_good db_exists [file exists $testdir/$testfile] 1
+ error_check_good log_exists \
+ [file exists $log_path/log.0000000001] 1
+}
+
+proc env002_make_config { logdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_lg_dir $logdir"
+ close $cid
+}
diff --git a/storage/bdb/test/env003.tcl b/storage/bdb/test/env003.tcl
new file mode 100644
index 00000000000..c16b54dd5e0
--- /dev/null
+++ b/storage/bdb/test/env003.tcl
@@ -0,0 +1,149 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env003.tcl,v 11.21 2002/08/08 15:38:06 bostic Exp $
+#
+# TEST env003
+# TEST Test DB_TMP_DIR and env name resolution
+# TEST With an environment path specified using -home, and then again
+# TEST with it specified by the environment variable DB_HOME:
+# TEST 1) Make sure that the DB_TMP_DIR config file option is respected
+# TEST a) as a relative pathname.
+# TEST b) as an absolute pathname.
+# TEST 2) Make sure that the -tmp_dir config option is respected,
+# TEST again as relative and absolute pathnames.
+# TEST 3) Make sure that if -both- -tmp_dir and a file are present,
+# TEST only the file is respected (see doc/env/naming.html).
+proc env003 { } {
+ # env003 is essentially just a small driver that runs
+ # env003_body twice. First, it supplies a "home" argument
+ # to use with environment opens, and the second time it sets
+ # DB_HOME instead.
+ # Note that env003_body itself calls env003_run_test to run
+ # the body of the actual test.
+
+ global env
+ source ./include.tcl
+
+ puts "Env003: DB_TMP_DIR test."
+
+ puts "\tEnv003: Running with -home argument to berkdb_env."
+ env003_body "-home $testdir"
+
+ puts "\tEnv003: Running with environment variable DB_HOME set."
+ set env(DB_HOME) $testdir
+ env003_body "-use_environ"
+
+ unset env(DB_HOME)
+
+ puts "\tEnv003: Running with both DB_HOME and -home set."
+ # Should respect -only- -home, so we give it a bogus
+ # environment variable setting.
+ set env(DB_HOME) $testdir/bogus_home
+ env003_body "-use_environ -home $testdir"
+ unset env(DB_HOME)
+}
+
+proc env003_body { home_arg } {
+ source ./include.tcl
+
+ env_cleanup $testdir
+ set tmpdir "tmpfiles_in_here"
+ file mkdir $testdir/$tmpdir
+
+ # Set up full path to $tmpdir for when we test absolute paths.
+ set curdir [pwd]
+ cd $testdir/$tmpdir
+ set fulltmpdir [pwd]
+ cd $curdir
+
+ # Create DB_CONFIG
+ env003_make_config $tmpdir
+
+ # Run the meat of the test.
+ env003_run_test a 1 "relative path, config file" $home_arg \
+ $testdir/$tmpdir
+
+ env003_make_config $fulltmpdir
+
+ # Run the test again
+ env003_run_test a 2 "absolute path, config file" $home_arg \
+ $fulltmpdir
+
+ # Now we try without a config file, but instead with db_config
+ # relative paths
+ env003_run_test b 1 "relative path, db_config" "$home_arg \
+ -tmp_dir $tmpdir -data_dir ." \
+ $testdir/$tmpdir
+
+ # absolute paths
+ env003_run_test b 2 "absolute path, db_config" "$home_arg \
+ -tmp_dir $fulltmpdir -data_dir ." \
+ $fulltmpdir
+
+ # Now, set db_config -and- have a # DB_CONFIG file, and make
+ # sure only the latter is honored.
+
+ file mkdir $testdir/bogus
+ env003_make_config $tmpdir
+
+ env003_run_test c 1 "relative path, both db_config and file" \
+ "$home_arg -tmp_dir $testdir/bogus -data_dir ." \
+ $testdir/$tmpdir
+
+ file mkdir $fulltmpdir/bogus
+ env003_make_config $fulltmpdir
+
+ env003_run_test c 2 "absolute path, both db_config and file" \
+ "$home_arg -tmp_dir $fulltmpdir/bogus -data_dir ." \
+ $fulltmpdir
+}
+
+proc env003_run_test { major minor msg env_args tmp_path} {
+ global testdir
+ global alphabet
+ global errorCode
+
+ puts "\t\tEnv003.$major.$minor: $msg"
+
+ # Create an environment and small-cached in-memory database to
+ # use.
+ set dbenv [eval {berkdb_env -create -home $testdir} $env_args \
+ {-cachesize {0 50000 1}}]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ set db [berkdb_open -env $dbenv -create -btree]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Fill the database with more than its cache can fit.
+ #
+ # When CONFIG_TEST is defined, the tempfile is left linked so
+ # we can check for its existence. Size the data to overfill
+ # the cache--the temp file is created lazily, so it is created
+ # when the cache overflows.
+ #
+ set key "key"
+ set data [repeat $alphabet 2000]
+ error_check_good db_put [$db put $key $data] 0
+
+ # Check for exactly one temp file.
+ set ret [glob -nocomplain $tmp_path/BDB*]
+ error_check_good temp_file_exists [llength $ret] 1
+
+ # Can't remove temp file until db is closed on Windows.
+ error_check_good db_close [$db close] 0
+ fileremove -f $ret
+ error_check_good env_close [$dbenv close] 0
+
+}
+
+proc env003_make_config { tmpdir } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_tmp_dir $tmpdir"
+ close $cid
+}
diff --git a/storage/bdb/test/env004.tcl b/storage/bdb/test/env004.tcl
new file mode 100644
index 00000000000..e93a0d95308
--- /dev/null
+++ b/storage/bdb/test/env004.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env004.tcl,v 11.18 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env004
+# TEST Test multiple data directories. Do a bunch of different opens
+# TEST to make sure that the files are detected in different directories.
+proc env004 { } {
+ source ./include.tcl
+
+ set method "hash"
+ set omethod [convert_method $method]
+ set args [convert_args $method ""]
+
+ puts "Env004: Multiple data directory test."
+
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ puts "\tEnv004.a: Multiple data directories in DB_CONFIG file"
+
+ # Create a config file
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "set_data_dir ."
+ puts $cid "set_data_dir data1"
+ puts $cid "set_data_dir data2"
+ puts $cid "set_data_dir data3"
+ close $cid
+
+ # Now get pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+
+ set e [berkdb_env -create -private -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ puts "\tEnv004.b: Multiple data directories in berkdb_env call."
+ env_cleanup $testdir
+ file mkdir $testdir/data1
+ file mkdir $testdir/data2
+ file mkdir $testdir/data3
+
+ # Now call dbenv with config specified
+ set e [berkdb_env -create -private \
+ -data_dir . -data_dir data1 -data_dir data2 \
+ -data_dir data3 -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ ddir_test $fulldir $method $e $args
+ error_check_good env_close [$e close] 0
+
+ env_cleanup $testdir
+}
+
+proc ddir_test { fulldir method e args } {
+ source ./include.tcl
+
+ set args [convert_args $args]
+ set omethod [convert_method $method]
+
+ # Now create one file in each directory
+ set db1 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data1/datafile1.db}]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data2/datafile2.db}]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [eval {berkdb_open -create \
+ -truncate -mode 0644 $omethod -env $e} $args {data3/datafile3.db}]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Close the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+
+ # Now, reopen the files without complete pathnames and make
+ # sure that we find them.
+
+ set db1 [berkdb_open -env $e $fulldir/data1/datafile1.db]
+ error_check_good dbopen1 [is_valid_db $db1] TRUE
+
+ set db2 [berkdb_open -env $e $fulldir/data2/datafile2.db]
+ error_check_good dbopen2 [is_valid_db $db2] TRUE
+
+ set db3 [berkdb_open -env $e $fulldir/data3/datafile3.db]
+ error_check_good dbopen3 [is_valid_db $db3] TRUE
+
+ # Finally close all the files
+ error_check_good db_close1 [$db1 close] 0
+ error_check_good db_close2 [$db2 close] 0
+ error_check_good db_close3 [$db3 close] 0
+}
diff --git a/storage/bdb/test/env005.tcl b/storage/bdb/test/env005.tcl
new file mode 100644
index 00000000000..03bb1b40b34
--- /dev/null
+++ b/storage/bdb/test/env005.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env005.tcl,v 11.15 2002/02/22 14:28:37 sandstro Exp $
+#
+# TEST env005
+# TEST Test that using subsystems without initializing them correctly
+# TEST returns an error. Cannot test mpool, because it is assumed in
+# TEST the Tcl code.
+proc env005 { } {
+ source ./include.tcl
+
+ puts "Env005: Uninitialized env subsystems test."
+
+ env_cleanup $testdir
+ puts "\tEnv005.a: Creating env with no subsystems."
+
+ set e [berkdb_env_noerr -create -home $testdir]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [berkdb_open -create -btree $testdir/env005.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rlist {
+ { "lock_detect" "Env005.b0"}
+ { "lock_get read 1 1" "Env005.b1"}
+ { "lock_id" "Env005.b2"}
+ { "lock_stat" "Env005.b3"}
+ { "lock_timeout 100" "Env005.b4"}
+ { "log_archive" "Env005.c0"}
+ { "log_cursor" "Env005.c1"}
+ { "log_file {1 1}" "Env005.c2"}
+ { "log_flush" "Env005.c3"}
+ { "log_put record" "Env005.c4"}
+ { "log_stat" "Env005.c5"}
+ { "txn" "Env005.d0"}
+ { "txn_checkpoint" "Env005.d1"}
+ { "txn_stat" "Env005.d2"}
+ { "txn_timeout 100" "Env005.d3"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err [is_substr $ret invalid] 1
+ }
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/storage/bdb/test/env006.tcl b/storage/bdb/test/env006.tcl
new file mode 100644
index 00000000000..48fc6982772
--- /dev/null
+++ b/storage/bdb/test/env006.tcl
@@ -0,0 +1,42 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env006.tcl,v 11.8 2002/01/11 15:53:23 bostic Exp $
+#
+# TEST env006
+# TEST Make sure that all the utilities exist and run.
+proc env006 { } {
+ source ./include.tcl
+
+ puts "Env006: Run underlying utilities."
+
+ set rlist {
+ { "db_archive" "Env006.a"}
+ { "db_checkpoint" "Env006.b"}
+ { "db_deadlock" "Env006.c"}
+ { "db_dump" "Env006.d"}
+ { "db_load" "Env006.e"}
+ { "db_printlog" "Env006.f"}
+ { "db_recover" "Env006.g"}
+ { "db_stat" "Env006.h"}
+ { "db_upgrade" "Env006.h"}
+ { "db_verify" "Env006.h"}
+ }
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+
+ puts "\t$msg: $cmd"
+
+ set stat [catch {exec $util_path/$cmd -?} ret]
+ error_check_good $cmd $stat 1
+
+ #
+ # Check for "usage", but only check "sage" so that
+ # we can handle either Usage or usage.
+ #
+ error_check_good $cmd.err [is_substr $ret sage] 1
+ }
+}
diff --git a/storage/bdb/test/env007.tcl b/storage/bdb/test/env007.tcl
new file mode 100644
index 00000000000..5748d2dbc89
--- /dev/null
+++ b/storage/bdb/test/env007.tcl
@@ -0,0 +1,223 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env007.tcl,v 11.21 2002/08/12 20:49:36 sandstro Exp $
+#
+# TEST env007
+# TEST Test various DB_CONFIG config file options.
+# TEST 1) Make sure command line option is respected
+# TEST 2) Make sure that config file option is respected
+# TEST 3) Make sure that if -both- DB_CONFIG and the set_<whatever>
+# TEST method is used, only the file is respected.
+# TEST Then test all known config options.
+proc env007 { } {
+ global errorInfo
+
+ # env007 is essentially just a small driver that runs
+ # env007_body twice. First, it supplies a "set" argument
+ # to use with environment opens, and the second time it sets
+ # DB_CONFIG instead.
+ # Note that env007_body itself calls env007_run_test to run
+ # the body of the actual test.
+
+ source ./include.tcl
+
+ puts "Env007: DB_CONFIG test."
+
+ #
+ # Test only those options we can easily check via stat
+ #
+ set rlist {
+ { " -txn_max " "set_tx_max" "19" "31" "Env007.a: Txn Max"
+ "txn_stat" "Max Txns"}
+ { " -lock_max_locks " "set_lk_max_locks" "17" "29" "Env007.b: Lock Max"
+ "lock_stat" "Maximum locks"}
+ { " -lock_max_lockers " "set_lk_max_lockers" "1500" "2000"
+ "Env007.c: Max Lockers" "lock_stat" "Maximum lockers"}
+ { " -lock_max_objects " "set_lk_max_objects" "1500" "2000"
+ "Env007.d: Max Objects" "lock_stat" "Maximum objects"}
+ { " -log_buffer " "set_lg_bsize" "65536" "131072" "Env007.e: Log Bsize"
+ "log_stat" "Log record cache size"}
+ { " -log_max " "set_lg_max" "8388608" "9437184" "Env007.f: Log Max"
+ "log_stat" "Current log file size"}
+ }
+
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $rlist {
+ set envarg [lindex $item 0]
+ set configarg [lindex $item 1]
+ set envval [lindex $item 2]
+ set configval [lindex $item 3]
+ set msg [lindex $item 4]
+ set statcmd [lindex $item 5]
+ set statstr [lindex $item 6]
+
+ env_cleanup $testdir
+ # First verify using just env args
+ puts "\t$msg Environment argument only"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:0 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $envval
+ error_check_good envclose:0 [$env close] 0
+
+ env_cleanup $testdir
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t$msg Config file only"
+ set env [eval $e]
+ error_check_good envopen:1 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:1 [$env close] 0
+
+ # First verify using just env args
+ puts "\t$msg Environment arg and config file"
+ set env [eval $e $envarg $envval]
+ error_check_good envopen:2 [is_valid_env $env] TRUE
+ env007_check $env $statcmd $statstr $configval
+ error_check_good envclose:2 [$env close] 0
+ }
+
+ #
+ # Test all options. For all config options, write it out
+ # to the file and make sure we can open the env. We cannot
+ # necessarily check via stat that it worked but this execs
+ # the config file code itself.
+ #
+ set cfglist {
+ { "set_cachesize" "0 1048576 0" }
+ { "set_data_dir" "." }
+ { "set_flags" "db_cdb_alldb" }
+ { "set_flags" "db_direct_db" }
+ { "set_flags" "db_direct_log" }
+ { "set_flags" "db_nolocking" }
+ { "set_flags" "db_nommap" }
+ { "set_flags" "db_nopanic" }
+ { "set_flags" "db_overwrite" }
+ { "set_flags" "db_region_init" }
+ { "set_flags" "db_txn_nosync" }
+ { "set_flags" "db_txn_write_nosync" }
+ { "set_flags" "db_yieldcpu" }
+ { "set_lg_bsize" "65536" }
+ { "set_lg_dir" "." }
+ { "set_lg_max" "8388608" }
+ { "set_lg_regionmax" "65536" }
+ { "set_lk_detect" "db_lock_default" }
+ { "set_lk_detect" "db_lock_expire" }
+ { "set_lk_detect" "db_lock_maxlocks" }
+ { "set_lk_detect" "db_lock_minlocks" }
+ { "set_lk_detect" "db_lock_minwrite" }
+ { "set_lk_detect" "db_lock_oldest" }
+ { "set_lk_detect" "db_lock_random" }
+ { "set_lk_detect" "db_lock_youngest" }
+ { "set_lk_max" "50" }
+ { "set_lk_max_lockers" "1500" }
+ { "set_lk_max_locks" "29" }
+ { "set_lk_max_objects" "1500" }
+ { "set_lock_timeout" "100" }
+ { "set_mp_mmapsize" "12582912" }
+ { "set_region_init" "1" }
+ { "set_shm_key" "15" }
+ { "set_tas_spins" "15" }
+ { "set_tmp_dir" "." }
+ { "set_tx_max" "31" }
+ { "set_txn_timeout" "100" }
+ { "set_verbose" "db_verb_chkpoint" }
+ { "set_verbose" "db_verb_deadlock" }
+ { "set_verbose" "db_verb_recovery" }
+ { "set_verbose" "db_verb_waitsfor" }
+ }
+
+ puts "\tEnv007.g: Config file settings"
+ set e "berkdb_env -create -mode 0644 -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ env_cleanup $testdir
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set env [eval $e]
+ error_check_good envvalid:1 [is_valid_env $env] TRUE
+ error_check_good envclose:1 [$env close] 0
+ }
+
+ set cfglist {
+ { "set_cachesize" "1048576" }
+ { "set_flags" "db_xxx" }
+ { "set_flags" "1" }
+ { "set_flags" "db_txn_nosync x" }
+ { "set_lg_bsize" "db_xxx" }
+ { "set_lg_max" "db_xxx" }
+ { "set_lg_regionmax" "db_xxx" }
+ { "set_lk_detect" "db_xxx" }
+ { "set_lk_detect" "1" }
+ { "set_lk_detect" "db_lock_youngest x" }
+ { "set_lk_max" "db_xxx" }
+ { "set_lk_max_locks" "db_xxx" }
+ { "set_lk_max_lockers" "db_xxx" }
+ { "set_lk_max_objects" "db_xxx" }
+ { "set_mp_mmapsize" "db_xxx" }
+ { "set_region_init" "db_xxx" }
+ { "set_shm_key" "db_xxx" }
+ { "set_tas_spins" "db_xxx" }
+ { "set_tx_max" "db_xxx" }
+ { "set_verbose" "db_xxx" }
+ { "set_verbose" "1" }
+ { "set_verbose" "db_verb_recovery x" }
+ }
+ puts "\tEnv007.h: Config value errors"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ foreach item $cfglist {
+ set configarg [lindex $item 0]
+ set configval [lindex $item 1]
+
+ env007_make_config $configarg $configval
+
+ # verify using just config file
+ puts "\t\t $configarg $configval"
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "incorrect arguments for name-value pair"] 1
+ }
+
+ puts "\tEnv007.i: Config name error set_xxx"
+ set e "berkdb_env_noerr -create -mode 0644 \
+ -home $testdir -log -lock -txn "
+ env007_make_config "set_xxx" 1
+ set stat [catch {eval $e} ret]
+ error_check_good envopen $stat 1
+ error_check_good error [is_substr $errorInfo \
+ "unrecognized name-value pair"] 1
+}
+
+proc env007_check { env statcmd statstr testval } {
+ set stat [$env $statcmd]
+ set checked 0
+ foreach statpair $stat {
+ if {$checked == 1} {
+ break
+ }
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $statstr] != 0} {
+ set checked 1
+ error_check_good $statstr:ck $statval $testval
+ }
+ }
+ error_check_good $statstr:test $checked 1
+}
+
+proc env007_make_config { carg cval } {
+ global testdir
+
+ set cid [open $testdir/DB_CONFIG w]
+ puts $cid "$carg $cval"
+ close $cid
+}
diff --git a/storage/bdb/test/env008.tcl b/storage/bdb/test/env008.tcl
new file mode 100644
index 00000000000..dccdb41f612
--- /dev/null
+++ b/storage/bdb/test/env008.tcl
@@ -0,0 +1,73 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env008.tcl,v 11.6 2002/02/22 14:29:34 sandstro Exp $
+#
+# TEST env008
+# TEST Test environments and subdirectories.
+proc env008 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ env_cleanup $testdir
+
+ set subdir 1/1
+ set subdir1 1/2
+ file mkdir $testdir/$subdir $testdir/$subdir1
+ set testfile $subdir/env.db
+
+ puts "Env008: Test of environments and subdirectories."
+
+ puts "\tEnv008.a: Create env and db."
+ set env [berkdb_env -create -mode 0644 -home $testdir -txn]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tEnv008.b: Remove db in subdir."
+ env008_db $env $testfile
+ error_check_good dbremove:$testfile \
+ [berkdb dbremove -env $env $testfile] 0
+
+ #
+ # Rather than remaking the db every time for the renames
+ # just move around the new file name to another new file
+ # name.
+ #
+ puts "\tEnv008.c: Rename db in subdir."
+ env008_db $env $testfile
+ set newfile $subdir/new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.d: Rename db to parent dir."
+ set newfile $subdir/../new.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.e: Rename db to child dir."
+ set newfile $subdir/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+ set testfile $newfile
+
+ puts "\tEnv008.f: Rename db to another dir."
+ set newfile $subdir1/env.db
+ error_check_good dbrename:$testfile/.. \
+ [berkdb dbrename -env $env $testfile $newfile] 0
+
+ error_check_good envclose [$env close] 0
+ puts "\tEnv008 complete."
+}
+
+proc env008_db { env testfile } {
+ set db [berkdb_open -env $env -create -btree $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db put key data]
+ error_check_good dbput $ret 0
+ error_check_good dbclose [$db close] 0
+}
diff --git a/storage/bdb/test/env009.tcl b/storage/bdb/test/env009.tcl
new file mode 100644
index 00000000000..264d5e2dfec
--- /dev/null
+++ b/storage/bdb/test/env009.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env009.tcl,v 11.5 2002/08/12 20:40:36 sandstro Exp $
+#
+# TEST env009
+# TEST Test calls to all the various stat functions. We have several
+# TEST sprinkled throughout the test suite, but this will ensure that
+# TEST we run all of them at least once.
+proc env009 { } {
+ source ./include.tcl
+
+ puts "Env009: Various stat function test."
+
+ env_cleanup $testdir
+ puts "\tEnv009.a: Setting up env and a database."
+
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set dbbt [berkdb_open -create -btree $testdir/env009bt.db]
+ error_check_good dbopen [is_valid_db $dbbt] TRUE
+ set dbh [berkdb_open -create -hash $testdir/env009h.db]
+ error_check_good dbopen [is_valid_db $dbh] TRUE
+ set dbq [berkdb_open -create -btree $testdir/env009q.db]
+ error_check_good dbopen [is_valid_db $dbq] TRUE
+
+ set rlist {
+ { "lock_stat" "Maximum locks" "Env009.b"}
+ { "log_stat" "Magic" "Env009.c"}
+ { "mpool_stat" "Number of caches" "Env009.d"}
+ { "txn_stat" "Max Txns" "Env009.e"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set str [lindex $pair 1]
+ set msg [lindex $pair 2]
+ puts "\t$msg: $cmd"
+ set ret [$e $cmd]
+ error_check_good $cmd [is_substr $ret $str] 1
+ }
+ puts "\tEnv009.f: btree stats"
+ set ret [$dbbt stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.g: hash stats"
+ set ret [$dbh stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ puts "\tEnv009.f: queue stats"
+ set ret [$dbq stat]
+ error_check_good $cmd [is_substr $ret "Magic"] 1
+ error_check_good dbclose [$dbbt close] 0
+ error_check_good dbclose [$dbh close] 0
+ error_check_good dbclose [$dbq close] 0
+ error_check_good envclose [$e close] 0
+}
diff --git a/storage/bdb/test/env010.tcl b/storage/bdb/test/env010.tcl
new file mode 100644
index 00000000000..4444e34e439
--- /dev/null
+++ b/storage/bdb/test/env010.tcl
@@ -0,0 +1,49 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env010.tcl,v 1.4 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env010
+# TEST Run recovery in an empty directory, and then make sure we can still
+# TEST create a database in that directory.
+proc env010 { } {
+ source ./include.tcl
+
+ puts "Env010: Test of recovery in an empty directory."
+
+ # Create a new directory used only for this test
+
+ if { [file exists $testdir/EMPTYDIR] != 1 } {
+ file mkdir $testdir/EMPTYDIR
+ } else {
+ puts "\nDirectory already exists."
+ }
+
+ # Do the test twice, for regular recovery and catastrophic
+ # Open environment and recover, but don't create a database
+
+ foreach rmethod {recover recover_fatal} {
+
+ puts "\tEnv010: Creating env for $rmethod test."
+ env_cleanup $testdir/EMPTYDIR
+ set e [berkdb_env -create -home $testdir/EMPTYDIR -$rmethod]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ # Open and close a database
+ # The method doesn't matter, so picked btree arbitrarily
+
+ set db [eval {berkdb_open -env $e \
+ -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ # Close environment
+
+ error_check_good envclose [$e close] 0
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir/EMPTYDIR] 0
+ }
+ puts "\tEnv010 complete."
+}
diff --git a/storage/bdb/test/env011.tcl b/storage/bdb/test/env011.tcl
new file mode 100644
index 00000000000..4061bb3fe51
--- /dev/null
+++ b/storage/bdb/test/env011.tcl
@@ -0,0 +1,39 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: env011.tcl,v 1.2 2002/02/20 17:08:21 sandstro Exp $
+#
+# TEST env011
+# TEST Run with region overwrite flag.
+proc env011 { } {
+ source ./include.tcl
+
+ puts "Env011: Test of region overwriting."
+ env_cleanup $testdir
+
+ puts "\tEnv011: Creating/closing env for open test."
+ set e [berkdb_env -create -overwrite -home $testdir -txn]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ set db [eval \
+ {berkdb_open -auto_commit -env $e -btree -create -mode 0644} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [eval {$db put} -auto_commit "aaa" "data"]
+ error_check_good put $ret 0
+ set ret [eval {$db put} -auto_commit "bbb" "data"]
+ error_check_good put $ret 0
+ error_check_good db_close [$db close] 0
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Opening the environment with overwrite set."
+ set e [berkdb_env -create -overwrite -home $testdir -txn -recover]
+ error_check_good dbenv [is_valid_env $e] TRUE
+ error_check_good envclose [$e close] 0
+
+ puts "\tEnv011: Removing the environment with overwrite set."
+ error_check_good berkdb:envremove \
+ [berkdb envremove -home $testdir -overwrite] 0
+
+ puts "\tEnv011 complete."
+}
diff --git a/storage/bdb/test/hsearch.tcl b/storage/bdb/test/hsearch.tcl
new file mode 100644
index 00000000000..afeed93f74e
--- /dev/null
+++ b/storage/bdb/test/hsearch.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: hsearch.tcl,v 11.9 2002/01/11 15:53:24 bostic Exp $
+#
+# Historic Hsearch interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc hsearch { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "HSEARCH interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ error_check_good hcreate [berkdb hcreate $nentries] 0
+ set did [open $dict]
+ set count 0
+
+ puts "\tHSEARCH.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ret [berkdb hsearch $str $str enter]
+ error_check_good hsearch:enter $ret 0
+
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+
+ puts "\tHSEARCH.b: re-get loop"
+ set did [open $dict]
+ # Here is the loop where we retrieve each key
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set d [berkdb hsearch $str 0 find]
+ error_check_good hsearch:find $d $str
+ incr count
+ }
+ close $did
+ error_check_good hdestroy [berkdb hdestroy] 0
+}
diff --git a/storage/bdb/test/join.tcl b/storage/bdb/test/join.tcl
new file mode 100644
index 00000000000..87b0d1fae58
--- /dev/null
+++ b/storage/bdb/test/join.tcl
@@ -0,0 +1,455 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: join.tcl,v 11.21 2002/02/20 17:08:22 sandstro Exp $
+#
+# TEST jointest
+# TEST Test duplicate assisted joins. Executes 1, 2, 3 and 4-way joins
+# TEST with differing index orders and selectivity.
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those
+# TEST work, everything else does as well. We'll create test databases
+# TEST called join1.db, join2.db, join3.db, and join4.db. The number on
+# TEST the database describes the duplication -- duplicates are of the
+# TEST form 0, N, 2N, 3N, ... where N is the number of the database.
+# TEST Primary.db is the primary database, and null.db is the database
+# TEST that has no matching duplicates.
+# TEST
+# TEST We should test this on all btrees, all hash, and a combination thereof
+proc jointest { {psize 8192} {with_dup_dups 0} {flags 0} } {
+ global testdir
+ global rand_init
+ source ./include.tcl
+
+ env_cleanup $testdir
+ berkdb srand $rand_init
+
+ # Use one environment for all database opens so we don't
+ # need oodles of regions.
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # With the new offpage duplicate code, we don't support
+ # duplicate duplicates in sorted dup sets. Thus, if with_dup_dups
+ # is greater than one, run only with "-dup".
+ if { $with_dup_dups > 1 } {
+ set doptarray {"-dup"}
+ } else {
+ set doptarray {"-dup -dupsort" "-dup" RANDOMMIX RANDOMMIX }
+ }
+
+ # NB: these flags are internal only, ok
+ foreach m "DB_BTREE DB_HASH DB_BOTH" {
+ # run with two different random mixes.
+ foreach dopt $doptarray {
+ set opt [list "-env" $env $dopt]
+
+ puts "Join test: ($m $dopt) psize $psize,\
+ $with_dup_dups dup\
+ dups, flags $flags."
+
+ build_all $m $psize $opt oa $with_dup_dups
+
+ # null.db is db_built fifth but is referenced by
+ # zero; set up the option array appropriately.
+ set oa(0) $oa(5)
+
+ # Build the primary
+ puts "\tBuilding the primary database $m"
+ set oflags "-create -truncate -mode 0644 -env $env\
+ [conv $m [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ set key [format "%04d" $i]
+ set ret [$db put $key stub]
+ error_check_good "primary put" $ret 0
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join primary.db "1 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "1 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "1 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 1" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3" $str oa $flags $with_dup_dups
+ gets $did str
+ do_join primary.db "2 3 4" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 4 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "0 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 2 0" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 2 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "4 3 0 1" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "3 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str
+ do_join primary.db "2 2 3 3" $str oa $flags\
+ $with_dup_dups
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2" $str oa $flags\
+ $with_dup_dups "3" $str2
+
+ # You really don't want to run this section
+ # with $with_dup_dups > 2.
+ if { $with_dup_dups <= 2 } {
+ gets $did str2
+ gets $did str
+ do_join primary.db "1 2 3" $str\
+ oa $flags $with_dup_dups "3 3 1" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "4 0 2" $str\
+ oa $flags $with_dup_dups "4 3 3" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "3 2 1" $str\
+ oa $flags $with_dup_dups "0 2" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "1 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str\
+ oa $flags $with_dup_dups "0 0 4 4" $str2
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "2 4 4" $str
+ gets $did str2
+ gets $did str
+ do_join primary.db "2 2 3 3" $str2\
+ oa $flags $with_dup_dups "0 0 4 4" $str
+ }
+ close $did
+ }
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc build_all { method psize opt oaname with_dup_dups {nentries 100} } {
+ global testdir
+ db_build join1.db $nentries 50 1 [conv $method 1]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join2.db $nentries 25 2 [conv $method 2]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join3.db $nentries 16 3 [conv $method 3]\
+ $psize $opt $oaname $with_dup_dups
+ db_build join4.db $nentries 12 4 [conv $method 4]\
+ $psize $opt $oaname $with_dup_dups
+ db_build null.db $nentries 0 5 [conv $method 5]\
+ $psize $opt $oaname $with_dup_dups
+}
+
+proc conv { m i } {
+ switch -- $m {
+ DB_HASH { return "-hash"}
+ "-hash" { return "-hash"}
+ DB_BTREE { return "-btree"}
+ "-btree" { return "-btree"}
+ DB_BOTH {
+ if { [expr $i % 2] == 0 } {
+ return "-hash";
+ } else {
+ return "-btree";
+ }
+ }
+ }
+}
+
+proc random_opts { } {
+ set j [berkdb random_int 0 1]
+ if { $j == 0 } {
+ return " -dup"
+ } else {
+ return " -dup -dupsort"
+ }
+}
+
+proc db_build { name nkeys ndups dup_interval method psize lopt oaname \
+ with_dup_dups } {
+ source ./include.tcl
+
+ # Get array of arg names (from two levels up the call stack)
+ upvar 2 $oaname oa
+
+ # Search for "RANDOMMIX" in $opt, and if present, replace
+ # with " -dup" or " -dup -dupsort" at random.
+ set i [lsearch $lopt RANDOMMIX]
+ if { $i != -1 } {
+ set lopt [lreplace $lopt $i $i [random_opts]]
+ }
+
+ # Save off db_open arguments for this database.
+ set opt [eval concat $lopt]
+ set oa($dup_interval) $opt
+
+ # Create the database and open the dictionary
+ set oflags "-create -truncate -mode 0644 $method\
+ -pagesize $psize"
+ set db [eval {berkdb_open} $oflags $opt $name]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ puts -nonewline "\tBuilding $name: $nkeys keys "
+ puts -nonewline "with $ndups duplicates at interval of $dup_interval"
+ if { $with_dup_dups > 0 } {
+ puts ""
+ puts "\t\tand $with_dup_dups duplicate duplicates."
+ } else {
+ puts "."
+ }
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ set str $str$name
+ # We need to make sure that the dups are inserted in a
+ # random, or near random, order. Do this by generating
+ # them and putting each in a list, then sorting the list
+ # at random.
+ set duplist {}
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ lappend duplist $data
+ }
+ # randomize the list
+ for { set i 0 } { $i < $ndups } {incr i } {
+ # set j [berkdb random_int $i [expr $ndups - 1]]
+ set j [expr ($i % 2) + $i]
+ if { $j >= $ndups } { set j $i }
+ set dupi [lindex $duplist $i]
+ set dupj [lindex $duplist $j]
+ set duplist [lreplace $duplist $i $i $dupj]
+ set duplist [lreplace $duplist $j $j $dupi]
+ }
+ foreach data $duplist {
+ if { $with_dup_dups != 0 } {
+ for { set j 0 }\
+ { $j < $with_dup_dups }\
+ {incr j} {
+ set ret [$db put $str $data]
+ error_check_good put$j $ret 0
+ }
+ } else {
+ set ret [$db put $str $data]
+ error_check_good put $ret 0
+ }
+ }
+
+ if { $ndups == 0 } {
+ set ret [$db put $str NODUP]
+ error_check_good put $ret 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join { primary dbs key oanm flags with_dup_dups {dbs2 ""} {key2 ""} } {
+ global testdir
+ source ./include.tcl
+
+ upvar $oanm oa
+
+ puts -nonewline "\tJoining: $dbs on $key"
+ if { $dbs2 == "" } {
+ puts ""
+ } else {
+ puts " with $dbs2 on $key2"
+ }
+
+ # Open all the databases
+ set p [berkdb_open -unknown $testdir/$primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ set ndx [llength $dbs]
+
+ foreach i [concat $dbs $dbs2] {
+ set opt $oa($i)
+ set db [eval {berkdb_open -unknown} $opt [n_to_name $i]]
+ error_check_good "[n_to_name $i] open" [is_valid_db $db] TRUE
+ set curs [$db cursor]
+ error_check_good "$db cursor" \
+ [is_substr $curs "$db.c"] 1
+ lappend dblist $db
+ lappend curslist $curs
+
+ if { $ndx > 0 } {
+ set realkey [concat $key[n_to_name $i]]
+ } else {
+ set realkey [concat $key2[n_to_name $i]]
+ }
+
+ set pair [$curs get -set $realkey]
+ error_check_good cursor_set:$realkey:$pair \
+ [llength [lindex $pair 0]] 2
+
+ incr ndx -1
+ }
+
+ set join_curs [eval {$p join} $curslist]
+ error_check_good join_cursor \
+ [is_substr $join_curs "$p.c"] 1
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n [concat $dbs $dbs2] {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ # If $with_dup_dups is greater than zero, each datum has
+ # been inserted $with_dup_dups times. So we expect the number
+ # of dups to go up by a factor of ($with_dup_dups)^(number of databases)
+
+ if { $with_dup_dups > 0 } {
+ foreach n [concat $dbs $dbs2] {
+ set expected [expr $expected * $with_dup_dups]
+ }
+ }
+
+ set ndups 0
+ if { $flags == " -join_item"} {
+ set l 1
+ } else {
+ set flags ""
+ set l 2
+ }
+ for { set pair [eval {$join_curs get} $flags] } { \
+ [llength [lindex $pair 0]] == $l } {
+ set pair [eval {$join_curs get} $flags] } {
+ set k [lindex [lindex $pair 0] 0]
+ foreach i $dbs {
+ error_check_bad valid_dup:$i:$dbs $i 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good valid_dup:$i:$dbs [expr $kval % $i] 0
+ }
+ incr ndups
+ }
+ error_check_good number_of_dups:$dbs $ndups $expected
+
+ error_check_good close_primary [$p close] 0
+ foreach i $curslist {
+ error_check_good close_cursor:$i [$i close] 0
+ }
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_name { n } {
+global testdir
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return join$n.db;
+ }
+}
+
+proc gcd { a b } {
+ set g 1
+
+ for { set i 2 } { $i <= $a } { incr i } {
+ if { [expr $a % $i] == 0 && [expr $b % $i] == 0 } {
+ set g $i
+ }
+ }
+ return $g
+}
diff --git a/storage/bdb/test/lock001.tcl b/storage/bdb/test/lock001.tcl
new file mode 100644
index 00000000000..1afcc471fc1
--- /dev/null
+++ b/storage/bdb/test/lock001.tcl
@@ -0,0 +1,122 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock001.tcl,v 11.19 2002/04/25 19:30:28 sue Exp $
+#
+
+# TEST lock001
+# TEST Make sure that the basic lock tests work. Do some simple gets
+# TEST and puts for a single locker.
+proc lock001 { {iterations 1000} {maxlocks 1000} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ # Set defaults
+ # Adjusted to make exact match of isqrt
+ #set conflicts { 3 0 0 0 0 0 1 0 1 1}
+ #set conflicts { 3 0 0 0 0 1 0 1 1}
+
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Cleanup
+ env_cleanup $testdir
+
+ # Open the region we'll use for testing.
+ set eflags "-create -lock -home $testdir -mode 0644 \
+ -lock_max $maxlocks -lock_conflict {$nmodes {$conflicts}}"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+ error_check_good lock_id_set \
+ [$env lock_id_set $lock_curid $lock_maxid] 0
+
+ puts "Lock001: test basic lock operations"
+ set locker [$env lock_id]
+ # Get and release each type of lock
+ puts "\tLock001.a: get and release each type of lock"
+ foreach m {ng write read} {
+ set obj obj$m
+ set lockp [$env lock_get $m $locker $obj]
+ error_check_good lock_get:a [is_blocked $lockp] 0
+ error_check_good lock_get:a [is_substr $lockp $env] 1
+ set ret [ $lockp put ]
+ error_check_good lock_put $ret 0
+ }
+
+ # Get a bunch of locks for the same locker; these should work
+ set obj OBJECT
+ puts "\tLock001.b: Get a bunch of locks for the same locker"
+ foreach m {ng write read} {
+ set lockp [$env lock_get $m $locker $obj ]
+ lappend locklist $lockp
+ error_check_good lock_get:b [is_blocked $lockp] 0
+ error_check_good lock_get:b [is_substr $lockp $env] 1
+ }
+ release_list $locklist
+
+ set locklist {}
+ # Check that reference counted locks work
+ puts "\tLock001.c: reference counted locks."
+ for {set i 0} { $i < 10 } {incr i} {
+ set lockp [$env lock_get -nowait write $locker $obj]
+ error_check_good lock_get:c [is_blocked $lockp] 0
+ error_check_good lock_get:c [is_substr $lockp $env] 1
+ lappend locklist $lockp
+ }
+ release_list $locklist
+
+ # Finally try some failing locks
+ set locklist {}
+ foreach i {ng write read} {
+ set lockp [$env lock_get $i $locker $obj]
+ lappend locklist $lockp
+ error_check_good lock_get:d [is_blocked $lockp] 0
+ error_check_good lock_get:d [is_substr $lockp $env] 1
+ }
+
+ # Change the locker
+ set locker [$env lock_id]
+ set blocklist {}
+ # Skip NO_LOCK lock.
+ puts "\tLock001.d: Change the locker, acquire read and write."
+ foreach i {write read} {
+ catch {$env lock_get -nowait $i $locker $obj} ret
+ error_check_good lock_get:e [is_substr $ret "not granted"] 1
+ #error_check_good lock_get:e [is_substr $lockp $env] 1
+ #error_check_good lock_get:e [is_blocked $lockp] 0
+ }
+ # Now release original locks
+ release_list $locklist
+
+ # Now re-acquire blocking locks
+ set locklist {}
+ puts "\tLock001.e: Re-acquire blocking locks."
+ foreach i {write read} {
+ set lockp [$env lock_get -nowait $i $locker $obj ]
+ error_check_good lock_get:f [is_substr $lockp $env] 1
+ error_check_good lock_get:f [is_blocked $lockp] 0
+ lappend locklist $lockp
+ }
+
+ # Now release new locks
+ release_list $locklist
+ error_check_good free_id [$env lock_id_free $locker] 0
+
+ error_check_good envclose [$env close] 0
+
+}
+
+# Blocked locks appear as lockmgrN.lockM\nBLOCKED
+proc is_blocked { l } {
+ if { [string compare $l BLOCKED ] == 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
diff --git a/storage/bdb/test/lock002.tcl b/storage/bdb/test/lock002.tcl
new file mode 100644
index 00000000000..a1ad8760c9d
--- /dev/null
+++ b/storage/bdb/test/lock002.tcl
@@ -0,0 +1,157 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock002.tcl,v 11.19 2002/04/25 19:30:29 sue Exp $
+#
+# TEST lock002
+# TEST Exercise basic multi-process aspects of lock.
+proc lock002 { {maxlocks 1000} {conflicts {0 0 0 0 0 1 0 1 1} } } {
+ source ./include.tcl
+
+ puts "Lock002: Basic multi-process lock tests."
+
+ env_cleanup $testdir
+
+ set nmodes [isqrt [llength $conflicts]]
+
+ # Open the lock
+ mlock_open $maxlocks $nmodes $conflicts
+ mlock_wait
+}
+
+# Make sure that we can create a region; destroy it, attach to it,
+# detach from it, etc.
+proc mlock_open { maxl nmodes conflicts } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ puts "\tLock002.a multi-process open/close test"
+
+ # Open/Create region here. Then close it and try to open from
+ # other test process.
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ $local_env lock_id_set $lock_curid $lock_maxid
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Open from other test process
+ set env_cmd "berkdb_env -mode 0644 -home $testdir"
+
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Now make sure that we can reopen the region.
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+ set ret [$local_env close]
+ error_check_good env_close $ret 0
+
+ # Try closing the remote region
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:lock_close $ret 0
+
+ # Try opening for create. Will succeed because region exists.
+ set env_cmd [concat "berkdb_env -create -mode 0644 \
+ -lock -lock_max $maxl -lock_conflict" \
+ [list [list $nmodes $conflicts]] "-home $testdir"]
+ set local_env [eval $env_cmd]
+ error_check_good remote:env_open [is_valid_env $local_env] TRUE
+
+ # close locally
+ reset_env $local_env
+
+ # Close and exit remote
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ catch { close $f1 } result
+}
+
+proc mlock_wait { } {
+ source ./include.tcl
+
+ puts "\tLock002.b multi-process get/put wait test"
+
+ # Open region locally
+ set env_cmd "berkdb_env -lock -home $testdir"
+ set local_env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ # Open region remotely
+ set f1 [open |$tclsh_path r+]
+
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Get a write lock locally; try for the read lock
+ # remotely. We hold the locks for several seconds
+ # so that we can use timestamps to figure out if the
+ # other process waited.
+ set locker1 [$local_env lock_id]
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now request a lock that we expect to hang; generate
+ # timestamps so we can tell if it actually hangs.
+ set locker2 [send_cmd $f1 "$remote_env lock_id"]
+ set remote_lock [send_timed_cmd $f1 1 \
+ "set lock \[$remote_env lock_get write $locker2 object1\]"]
+
+ # Now sleep before releasing lock
+ tclsleep 5
+ set result [$local_lock put]
+ error_check_good lock_put $result 0
+
+ # Now get the result from the other script
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Now get the remote lock
+ set remote_lock [send_cmd $f1 "puts \$lock"]
+ error_check_good remote:lock_get \
+ [is_valid_lock $remote_lock $remote_env] TRUE
+
+ # Now make the other guy wait 5 second and then release his
+ # lock while we try to get a write lock on it
+ set start [timestamp -r]
+
+ set ret [send_cmd $f1 "tclsleep 5"]
+
+ set ret [send_cmd $f1 "$remote_lock put"]
+
+ set local_lock [$local_env lock_get write $locker1 object1]
+ error_check_good lock_get:time \
+ [expr [expr [timestamp -r] - $start] > 2] 1
+ error_check_good lock_get:local \
+ [is_valid_lock $local_lock $local_env] TRUE
+
+ # Now check remote's result
+ set result [rcv_result $f1]
+ error_check_good lock_put:remote $result 0
+
+ # Clean up remote
+ set result [send_cmd $f1 "$remote_env lock_id_free $locker2" ]
+ error_check_good remote_free_id $result 0
+ set ret [send_cmd $f1 "reset_env $remote_env"]
+
+ close $f1
+
+ # Now close up locally
+ set ret [$local_lock put]
+ error_check_good lock_put $ret 0
+ error_check_good lock_id_free [$local_env lock_id_free $locker1] 0
+
+ reset_env $local_env
+}
diff --git a/storage/bdb/test/lock003.tcl b/storage/bdb/test/lock003.tcl
new file mode 100644
index 00000000000..91a8a2e90f6
--- /dev/null
+++ b/storage/bdb/test/lock003.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock003.tcl,v 11.25 2002/09/05 17:23:06 sandstro Exp $
+#
+# TEST lock003
+# TEST Exercise multi-process aspects of lock. Generate a bunch of parallel
+# TEST testers that try to randomly obtain locks; make sure that the locks
+# TEST correctly protect corresponding objects.
+proc lock003 { {iter 500} {max 1000} {procs 5} } {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set ldegree 5
+ set objs 75
+ set reads 65
+ set wait 1
+ set conflicts { 0 0 0 0 0 1 0 1 1}
+ set seeds {}
+
+ puts "Lock003: Multi-process random lock test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ puts "\tLock003.a: Create environment"
+ set e [berkdb_env -create -lock -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+ $e lock_id_set $lock_curid $lock_maxid
+
+ error_check_good env_close [$e close] 0
+
+ # Now spawn off processes
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { [llength $seeds] == $procs } {
+ set s [lindex $seeds $i]
+ }
+# puts "$tclsh_path\
+# $test_path/wrap.tcl \
+# lockscript.tcl $testdir/$i.lockout\
+# $testdir $iter $objs $wait $ldegree $reads &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ lockscript.tcl $testdir/lock003.$i.out \
+ $testdir $iter $objs $wait $ldegree $reads &]
+ lappend pidlist $p
+ }
+
+ puts "\tLock003.b: $procs independent processes now running"
+ watch_procs $pidlist 30 10800
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/lock003.*.out]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Remove log files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/lock003.$i.out
+ }
+}
+
+# Create and destroy flag files to show we have an object locked, and
+# verify that the correct files exist or don't exist given that we've
+# just read or write locked a file.
+proc lock003_create { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [open $pref.$rw.[pid].$obj w]
+ close $f
+}
+
+proc lock003_destroy { obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ set f [glob -nocomplain $pref.*.[pid].$obj]
+ error_check_good l3_destroy [llength $f] 1
+ fileremove $f
+}
+
+proc lock003_vrfy { rw obj } {
+ source ./include.tcl
+
+ set pref $testdir/L3FLAG
+ if { [string compare $rw "write"] == 0 } {
+ set fs [glob -nocomplain $pref.*.*.$obj]
+ error_check_good "number of other locks on $obj" [llength $fs] 0
+ } else {
+ set fs [glob -nocomplain $pref.write.*.$obj]
+ error_check_good "number of write locks on $obj" [llength $fs] 0
+ }
+}
+
diff --git a/storage/bdb/test/lock004.tcl b/storage/bdb/test/lock004.tcl
new file mode 100644
index 00000000000..7fd51ee42f2
--- /dev/null
+++ b/storage/bdb/test/lock004.tcl
@@ -0,0 +1,29 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock004.tcl,v 11.5 2002/04/25 19:30:30 sue Exp $
+#
+# TEST lock004
+# TEST Test locker ids wraping around.
+
+proc lock004 {} {
+ source ./include.tcl
+ global lock_curid
+ global lock_maxid
+
+ set save_curid $lock_curid
+ set save_maxid $lock_maxid
+
+ set lock_curid [expr $lock_maxid - 1]
+ puts "Lock004: Locker id wraparound test"
+ puts "\tLock004.a: repeat lock001-lock003 with wraparound lockids"
+
+ lock001
+ lock002
+ lock003
+
+ set lock_curid $save_curid
+ set lock_maxid $save_maxid
+}
diff --git a/storage/bdb/test/lock005.tcl b/storage/bdb/test/lock005.tcl
new file mode 100644
index 00000000000..5afe7344d36
--- /dev/null
+++ b/storage/bdb/test/lock005.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lock005.tcl,v 1.7 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST lock005
+# TEST Check that page locks are being released properly.
+
+proc lock005 { } {
+ source ./include.tcl
+
+ puts "Lock005: Page lock release test"
+
+ # Clean up after previous runs
+ env_cleanup $testdir
+
+ # Open/create the lock region
+ set e [berkdb_env -create -lock -home $testdir -txn -log]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create the database
+ set db [berkdb open -create -auto_commit -env $e -len 10 -queue q.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Check that records are locking by trying to
+ # fetch a record on the wrong transaction.
+ puts "\tLock005.a: Verify that we are locking"
+
+ # Start the first transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Start second txn while the first is still running ...
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+
+ # ... and try to get a record from the first txn (should fail)
+ set ret [catch {$db get -txn $txn2 $recno1} res]
+ error_check_good dbget_wrong_record \
+ [is_substr $res "Lock not granted"] 1
+
+ # End transactions
+ error_check_good txn1commit [$txn1 commit] 0
+ how_many_locks 1 $e
+ error_check_good txn2commit [$txn2 commit] 0
+ # The number of locks stays the same here because the first
+ # lock is released and the second lock was never granted.
+ how_many_locks 1 $e
+
+ # Test lock behavior for both abort and commit
+ puts "\tLock005.b: Verify locks after abort or commit"
+ foreach endorder {forward reverse} {
+ end_order_test $db $e commit abort $endorder
+ end_order_test $db $e abort commit $endorder
+ end_order_test $db $e commit commit $endorder
+ end_order_test $db $e abort abort $endorder
+ }
+
+ # Clean up
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
+
+proc end_order_test { db e txn1end txn2end endorder } {
+ # Start one transaction
+ set txn1 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn1 $e] TRUE
+ set ret [catch {$db put -txn $txn1 -append record1} recno1]
+ error_check_good dbput_txn1 $ret 0
+
+ # Check number of locks
+ how_many_locks 2 $e
+
+ # Start a second transaction while first is still running
+ set txn2 [$e txn -nowait]
+ error_check_good txn_begin [is_valid_txn $txn2 $e] TRUE
+ set ret [catch {$db put -txn $txn2 -append record2} recno2]
+ error_check_good dbput_txn2 $ret 0
+ how_many_locks 3 $e
+
+ # Now commit or abort one txn and make sure the other is okay
+ if {$endorder == "forward"} {
+ # End transaction 1 first
+ puts "\tLock005.b.1: $txn1end txn1 then $txn2end txn2"
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 2 $e
+
+ # txn1 is now ended, but txn2 is still running
+ set ret1 [catch {$db get -txn $txn2 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn2 $recno2} res2]
+ if { $txn1end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret1 0
+ error_check_good txn2_sees_txn2 $ret2 0
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn2_cantsee_txn1 [llength $res1] 0
+ }
+
+ # End transaction 2 second
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn2end == "commit" } {
+ error_check_good txn2_sees_txn1 $ret3 0
+ error_check_good txn2_sees_txn2 $ret4 0
+ error_check_good txn2_has_record2 \
+ [is_substr $res4 "record2"] 1
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn2_cantsee_txn1 $ret3 0
+ error_check_good txn2_aborted [llength $res4] 0
+ }
+
+ } elseif { $endorder == "reverse" } {
+ # End transaction 2 first
+ puts "\tLock005.b.2: $txn2end txn2 then $txn1end txn1"
+ error_check_good txn_$txn2end [$txn2 $txn2end] 0
+ how_many_locks 2 $e
+
+ # txn2 is ended, but txn1 is still running
+ set ret1 [catch {$db get -txn $txn1 $recno1} res1]
+ set ret2 [catch {$db get -txn $txn1 $recno2} res2]
+ if { $txn2end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret1 0
+ error_check_good txn1_sees_txn2 $ret2 0
+ } else {
+ # transaction 2 was aborted
+ error_check_good txn1_cantsee_txn2 [llength $res2] 0
+ }
+
+ # End transaction 1 second
+ error_check_good txn_$txn1end [$txn1 $txn1end] 0
+ how_many_locks 1 $e
+
+ # txn1 and txn2 should both now be invalid
+ # The get no longer needs to be transactional
+ set ret3 [catch {$db get $recno1} res3]
+ set ret4 [catch {$db get $recno2} res4]
+
+ if { $txn1end == "commit" } {
+ error_check_good txn1_sees_txn1 $ret3 0
+ error_check_good txn1_sees_txn2 $ret4 0
+ error_check_good txn1_has_record1 \
+ [is_substr $res3 "record1"] 1
+ } else {
+ # transaction 1 was aborted
+ error_check_good txn1_cantsee_txn2 $ret4 0
+ error_check_good txn1_aborted [llength $res3] 0
+ }
+ }
+}
+
+proc how_many_locks { expected env } {
+ set stat [$env lock_stat]
+ set str "Current number of locks"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ set nlocks [lindex $statpair 1]
+ error_check_good expected_nlocks $nlocks $expected
+ }
+ }
+ error_check_good checked $checked 1
+}
diff --git a/storage/bdb/test/lockscript.tcl b/storage/bdb/test/lockscript.tcl
new file mode 100644
index 00000000000..812339a4a70
--- /dev/null
+++ b/storage/bdb/test/lockscript.tcl
@@ -0,0 +1,117 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: lockscript.tcl,v 11.17 2002/02/20 17:08:23 sandstro Exp $
+#
+# Random lock tester.
+# Usage: lockscript dir numiters numobjs sleepint degree readratio
+# dir: lock directory.
+# numiters: Total number of iterations.
+# numobjs: Number of objects on which to lock.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+# readratio: Percent of locks that should be reads.
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "lockscript dir numiters numobjs sleepint degree readratio"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set numobjs [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set readratio [ lindex $argv 5 ]
+
+# Initialize random number generator
+global rand_init
+berkdb srand $rand_init
+
+
+catch { berkdb_env -create -lock -home $dir } e
+error_check_good env_open [is_substr $e env] 1
+catch { $e lock_id } locker
+error_check_good locker [is_valid_locker $locker] TRUE
+
+puts -nonewline "Beginning execution for $locker: $numiters $numobjs "
+puts "$sleepint $degree $readratio"
+flush stdout
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set locklist {}
+ set objlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj $numobjs]
+ set lastobj [expr $obj + 1]
+ set x [berkdb random_int 1 100 ]
+ if { $x <= $readratio } {
+ set rw read
+ } else {
+ set rw write
+ }
+ puts "[timestamp -c] $locker $lnum: $rw $obj"
+
+ # Do get; add to list
+ catch {$e lock_get $rw $locker $obj} lockp
+ error_check_good lock_get [is_valid_lock $lockp $e] TRUE
+
+ # Create a file to flag that we've a lock of the given
+ # type, after making sure only other read locks exist
+ # (if we're read locking) or no other locks exist (if
+ # we're writing).
+ lock003_vrfy $rw $obj
+ lock003_create $rw $obj
+ lappend objlist [list $obj $rw]
+
+ lappend locklist $lockp
+ if {$lastobj > $numobjs} {
+ break
+ }
+ }
+ # Pick sleep interval
+ puts "[timestamp -c] $locker sleeping"
+ # We used to sleep 1 to $sleepint seconds. This makes the test
+ # run for hours. Instead, make it sleep for 10 to $sleepint * 100
+ # milliseconds, for a maximum sleep time of 0.5 s.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+ puts "[timestamp -c] $locker awake"
+
+ # Now release locks
+ puts "[timestamp -c] $locker released locks"
+
+ # Delete our locking flag files, then reverify. (Note that the
+ # locking flag verification function assumes that our own lock
+ # is not currently flagged.)
+ foreach pair $objlist {
+ set obj [lindex $pair 0]
+ set rw [lindex $pair 1]
+ lock003_destroy $obj
+ lock003_vrfy $rw $obj
+ }
+
+ release_list $locklist
+ flush stdout
+}
+
+set ret [$e close]
+error_check_good env_close $ret 0
+
+puts "[timestamp -c] $locker Complete"
+flush stdout
+
+exit
diff --git a/storage/bdb/test/log001.tcl b/storage/bdb/test/log001.tcl
new file mode 100644
index 00000000000..87df780cb5a
--- /dev/null
+++ b/storage/bdb/test/log001.tcl
@@ -0,0 +1,120 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log001.tcl,v 11.29 2002/04/30 20:27:56 sue Exp $
+#
+
+# TEST log001
+# TEST Read/write log records.
+proc log001 { } {
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set iter 1000
+ set max [expr 1024 * 128]
+ log001_body $max $iter 1
+ log001_body $max $iter 0
+ log001_body $max $iter 1 "-encryptaes $passwd"
+ log001_body $max $iter 0 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 1
+ log001_body $max [expr $iter * 15] 0
+ log001_body $max [expr $iter * 15] 1 "-encryptaes $passwd"
+ log001_body $max [expr $iter * 15] 0 "-encryptaes $passwd"
+}
+
+proc log001_body { max nrecs fixedlength {encargs ""} } {
+ source ./include.tcl
+
+ puts -nonewline "Log001: Basic put/get log records "
+ if { $fixedlength == 1 } {
+ puts "(fixed-length $encargs)"
+ } else {
+ puts "(variable-length $encargs)"
+ }
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -log -create -home $testdir -mode 0644} \
+ $encargs -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We will write records to the log and make sure we can
+ # read them back correctly. We'll use a standard pattern
+ # repeated some number of times for each record.
+ set lsn_list {}
+ set rec_list {}
+ puts "\tLog001.a: Writing $nrecs log records"
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set rec ""
+ for { set j 0 } { $j < [expr $i % 10 + 1] } {incr j} {
+ set rec $rec$i:logrec:$i
+ }
+ if { $fixedlength != 1 } {
+ set rec $rec:[random_data 237 0 0]
+ }
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [is_substr $lsn log_cmd] 1
+ lappend lsn_list $lsn
+ lappend rec_list $rec
+ }
+
+ # Open a log cursor.
+ set logc [$env log_cursor]
+ error_check_good logc [is_valid_logc $logc $env] TRUE
+
+ puts "\tLog001.b: Retrieving log records sequentially (forward)"
+ set i 0
+ for { set grec [$logc get -first] } { [llength $grec] != 0 } {
+ set grec [$logc get -next]} {
+ error_check_good log_get:seq [lindex $grec 1] \
+ [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.c: Retrieving log records sequentially (backward)"
+ set i [llength $rec_list]
+ for { set grec [$logc get -last] } { [llength $grec] != 0 } {
+ set grec [$logc get -prev] } {
+ incr i -1
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ }
+
+ puts "\tLog001.d: Retrieving log records sequentially by LSN"
+ set i 0
+ foreach lsn $lsn_list {
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ incr i
+ }
+
+ puts "\tLog001.e: Retrieving log records randomly by LSN"
+ set m [expr [llength $lsn_list] - 1]
+ for { set i 0 } { $i < $nrecs } { incr i } {
+ set recno [berkdb random_int 0 $m ]
+ set lsn [lindex $lsn_list $recno]
+ set grec [$logc get -set $lsn]
+ error_check_good \
+ log_get:seq [lindex $grec 1] [lindex $rec_list $recno]
+ }
+
+ puts "\tLog001.f: Retrieving first/current, last/current log record"
+ set grec [$logc get -first]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list 0]
+ set i [expr [llength $rec_list] - 1]
+ set grec [$logc get -last]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+ set grec [$logc get -current]
+ error_check_good log_get:seq [lindex $grec 1] [lindex $rec_list $i]
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/storage/bdb/test/log002.tcl b/storage/bdb/test/log002.tcl
new file mode 100644
index 00000000000..6e91f55398f
--- /dev/null
+++ b/storage/bdb/test/log002.tcl
@@ -0,0 +1,85 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log002.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log002
+# TEST Tests multiple logs
+# TEST Log truncation
+# TEST LSN comparison and file functionality.
+proc log002 { } {
+ source ./include.tcl
+
+ puts "Log002: Multiple log test w/trunc, file, compare functionality"
+
+ env_cleanup $testdir
+
+ set max [expr 1024 * 128]
+ set env [berkdb_env -create -home $testdir -mode 0644 \
+ -log -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # We'll record every hundred'th record for later use
+ set info_list {}
+
+ puts "\tLog002.a: Writing log records"
+ set i 0
+ for {set s 0} { $s < [expr 3 * $max] } { incr s $len } {
+ set rec [random_data 120 0 0]
+ set len [string length $rec]
+ set lsn [$env log_put $rec]
+
+ if { [expr $i % 100 ] == 0 } {
+ lappend info_list [list $lsn $rec]
+ }
+ incr i
+ }
+
+ puts "\tLog002.b: Checking log_compare"
+ set last {0 0}
+ foreach p $info_list {
+ set l [lindex $p 0]
+ if { [llength $last] != 0 } {
+ error_check_good \
+ log_compare [$env log_compare $l $last] 1
+ error_check_good \
+ log_compare [$env log_compare $last $l] -1
+ error_check_good \
+ log_compare [$env log_compare $l $l] 0
+ }
+ set last $l
+ }
+
+ puts "\tLog002.c: Checking log_file"
+ set flist [glob $testdir/log*]
+ foreach p $info_list {
+
+ set lsn [lindex $p 0]
+ set f [$env log_file $lsn]
+
+ # Change all backslash separators on Windows to forward slash
+ # separators, which is what the rest of the test suite expects.
+ regsub -all {\\} $f {/} f
+
+ error_check_bad log_file:$f [lsearch $flist $f] -1
+ }
+
+ puts "\tLog002.d: Verifying records"
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ for {set i [expr [llength $info_list] - 1] } { $i >= 0 } { incr i -1} {
+ set p [lindex $info_list $i]
+ set grec [$logc get -set [lindex $p 0]]
+ error_check_good log_get:$env [lindex $grec 1] [lindex $p 1]
+ }
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+}
diff --git a/storage/bdb/test/log003.tcl b/storage/bdb/test/log003.tcl
new file mode 100644
index 00000000000..11297b59d50
--- /dev/null
+++ b/storage/bdb/test/log003.tcl
@@ -0,0 +1,118 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log003.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log003
+# TEST Verify that log_flush is flushing records correctly.
+proc log003 { } {
+ source ./include.tcl
+
+ puts "Log003: Verify log_flush behavior"
+
+ set max [expr 1024 * 128]
+ env_cleanup $testdir
+ set short_rec "abcdefghijklmnopqrstuvwxyz"
+ set long_rec [repeat $short_rec 200]
+ set very_long_rec [repeat $long_rec 4]
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.a: Verify flush on [string length $rec] byte rec"
+
+ set env [berkdb_env -log -home $testdir \
+ -create -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ set ret [$env log_flush $lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+ set ret [$env close]
+ error_check_good log_env:close $ret 0
+
+ # Now, remove the log region
+ #set ret [berkdb envremove -home $testdir]
+ #error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env -create -home $testdir \
+ -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -first]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+
+ foreach rec "$short_rec $long_rec $very_long_rec" {
+ puts "\tLog003.b: \
+ Verify flush on non-last record [string length $rec]"
+ set env [berkdb_env \
+ -create -log -home $testdir -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ # Put 10 random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Put the record we are interested in
+ set save_lsn [$env log_put $rec]
+ error_check_bad log_put [lindex $save_lsn 0] "ERROR:"
+
+ # Put 10 more random records
+ for { set i 0 } { $i < 10 } { incr i} {
+ set r [random_data 450 0 0]
+ set lsn [$env log_put $r]
+ error_check_bad log_put [lindex $lsn 0] "ERROR:"
+ }
+
+ # Now check the flush
+ set ret [$env log_flush $save_lsn]
+ error_check_good log_flush $ret 0
+
+ # Now, we want to crash the region and recheck. Closing the
+ # log does not flush any records, so we'll use a close to
+ # do the "crash"
+
+ #
+ # Now, close and remove the log region
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env:remove $ret 0
+
+ # Re-open the log and try to read the record.
+ set env [berkdb_env \
+ -home $testdir -create -log -mode 0644 -log_max $max]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set gotrec [$logc get -set $save_lsn]
+ error_check_good lp_get [lindex $gotrec 1] $rec
+
+ # Close and unlink the file
+ error_check_good log_cursor:close:$logc [$logc close] 0
+ error_check_good env:close:$env [$env close] 0
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ log_cleanup $testdir
+ }
+}
diff --git a/storage/bdb/test/log004.tcl b/storage/bdb/test/log004.tcl
new file mode 100644
index 00000000000..66968a8c1b4
--- /dev/null
+++ b/storage/bdb/test/log004.tcl
@@ -0,0 +1,46 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log004.tcl,v 11.28 2002/04/29 20:07:54 sue Exp $
+#
+
+# TEST log004
+# TEST Make sure that if we do PREVs on a log, but the beginning of the
+# TEST log has been truncated, we do the right thing.
+proc log004 { } {
+ source ./include.tcl
+
+ puts "Log004: Prev on log when beginning of log has been truncated."
+ # Use archive test to populate log
+ env_cleanup $testdir
+ puts "\tLog004.a: Call archive to populate log."
+ archive
+
+ # Delete all log files under 100
+ puts "\tLog004.b: Delete all log files under 100."
+ set ret [catch { glob $testdir/log.00000000* } result]
+ if { $ret == 0 } {
+ eval fileremove -f $result
+ }
+
+ # Now open the log and get the first record and try a prev
+ puts "\tLog004.c: Open truncated log, attempt to access missing portion."
+ set env [berkdb_env -create -log -home $testdir]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set logc [$env log_cursor]
+ error_check_good log_cursor [is_valid_logc $logc $env] TRUE
+
+ set ret [$logc get -first]
+ error_check_bad log_get [llength $ret] 0
+
+ # This should give DB_NOTFOUND which is a ret of length 0
+ catch {$logc get -prev} ret
+ error_check_good log_get_prev [string length $ret] 0
+
+ puts "\tLog004.d: Close log and environment."
+ error_check_good log_cursor_close [$logc close] 0
+ error_check_good log_close [$env close] 0
+}
diff --git a/storage/bdb/test/log005.tcl b/storage/bdb/test/log005.tcl
new file mode 100644
index 00000000000..ab2ad703c55
--- /dev/null
+++ b/storage/bdb/test/log005.tcl
@@ -0,0 +1,89 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: log005.tcl,v 11.1 2002/05/30 22:16:49 bostic Exp $
+#
+# TEST log005
+# TEST Check that log file sizes can change on the fly.
+proc log005 { } {
+ source ./include.tcl
+
+ puts "Log005: Check that log file sizes can change."
+ env_cleanup $testdir
+
+ # Open the environment, set and check the log file size.
+ puts "\tLog005.a: open, set and check the log file size."
+ set env [berkdb_env \
+ -create -home $testdir -log_buffer 10000 -log_max 1000000 -txn]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set db [berkdb_open \
+ -env $env -create -mode 0644 -btree -auto_commit a.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Get the current log file maximum.
+ set max [log005_stat $env "Current log file size"]
+ error_check_good max_set $max 1000000
+
+ # Reset the log file size using a second open, and make sure
+ # it changes.
+ puts "\tLog005.b: reset during open, check the log file size."
+ set envtmp [berkdb_env -home $testdir -log_max 900000 -txn]
+ error_check_good envtmp_open [is_valid_env $envtmp] TRUE
+ error_check_good envtmp_close [$envtmp close] 0
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good max_changed 900000 $tmp
+
+ puts "\tLog005.c: fill in the current log file size."
+ # Fill in the current log file.
+ set new_lsn 0
+ set data [repeat "a" 1024]
+ for { set i 1 } \
+ { [log005_stat $env "Current log file number"] != 2 } \
+ { incr i } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set ret [$db put -txn $t $i $data]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set last_lsn $new_lsn
+ set new_lsn [log005_stat $env "Current log file offset"]
+ }
+
+ # The last LSN in the first file should be more than our new
+ # file size.
+ error_check_good "lsn check < 900000" [expr 900000 < $last_lsn] 1
+
+ # Close down the environment.
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ puts "\tLog005.d: check the log file size is unchanged after recovery."
+ # Open again, running recovery. Verify the log file size is as we
+ # left it.
+ set env [berkdb_env -create -home $testdir -recover -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set tmp [log005_stat $env "Current log file size"]
+ error_check_good after_recovery 900000 $tmp
+
+ error_check_good env_close [$env close] 0
+}
+
+# log005_stat --
+# Return the current log statistics.
+proc log005_stat { env s } {
+ set stat [$env log_stat]
+ foreach statpair $stat {
+ set statmsg [lindex $statpair 0]
+ set statval [lindex $statpair 1]
+ if {[is_substr $statmsg $s] != 0} {
+ return $statval
+ }
+ }
+ puts "FAIL: log005: stat string $s not found"
+ return 0
+}
diff --git a/storage/bdb/test/logtrack.tcl b/storage/bdb/test/logtrack.tcl
new file mode 100644
index 00000000000..ad6b480b4e3
--- /dev/null
+++ b/storage/bdb/test/logtrack.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: logtrack.tcl,v 11.11 2002/09/03 16:44:37 sue Exp $
+#
+# logtrack.tcl: A collection of routines, formerly implemented in Perl
+# as log.pl, to track which log record types the test suite hits.
+
+set ltsname "logtrack_seen.db"
+set ltlist $test_path/logtrack.list
+set tmpname "logtrack_tmp"
+
+proc logtrack_clean { } {
+ global ltsname
+
+ file delete -force $ltsname
+
+ return
+}
+
+proc logtrack_init { } {
+ global ltsname
+
+ logtrack_clean
+
+ # Create an empty tracking database.
+ [berkdb_open -create -truncate -btree $ltsname] close
+
+ return
+}
+
+# Dump the logs for directory dirname and record which log
+# records were seen.
+proc logtrack_read { dirname } {
+ global ltsname tmpname util_path
+ global encrypt passwd
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+
+ file delete -force $tmpname
+ set pargs " -N -h $dirname "
+ if { $encrypt > 0 } {
+ append pargs " -P $passwd "
+ }
+ set ret [catch {eval exec $util_path/db_printlog $pargs > $tmpname} res]
+ error_check_good printlog $ret 0
+ error_check_good tmpfile_exists [file exists $tmpname] 1
+
+ set f [open $tmpname r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good seendb_put [$seendb put $name ""] 0
+ }
+ }
+ close $f
+ file delete -force $tmpname
+
+ error_check_good seendb_close [$seendb close] 0
+}
+
+# Print the log record types that were seen but should not have been
+# seen and the log record types that were not seen but should have been seen.
+proc logtrack_summary { } {
+ global ltsname ltlist testdir
+
+ set seendb [berkdb_open $ltsname]
+ error_check_good seendb_open [is_valid_db $seendb] TRUE
+ set existdb [berkdb_open -create -btree]
+ error_check_good existdb_open [is_valid_db $existdb] TRUE
+ set deprecdb [berkdb_open -create -btree]
+ error_check_good deprecdb_open [is_valid_db $deprecdb] TRUE
+
+ error_check_good ltlist_exists [file exists $ltlist] 1
+ set f [open $ltlist r]
+ set pref ""
+ while { [gets $f line] >= 0 } {
+ # Get the keyword, the first thing on the line:
+ # BEGIN/DEPRECATED/IGNORED/PREFIX
+ set keyword [lindex $line 0]
+
+ if { [string compare $keyword PREFIX] == 0 } {
+ # New prefix.
+ set pref [lindex $line 1]
+ } elseif { [string compare $keyword BEGIN] == 0 } {
+ # A log type we care about; put it on our list.
+
+ # Skip noop and debug.
+ if { [string compare [lindex $line 1] noop] == 0 } {
+ continue
+ }
+ if { [string compare [lindex $line 1] debug] == 0 } {
+ continue
+ }
+
+ error_check_good exist_put [$existdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ } elseif { [string compare $keyword DEPRECATED] == 0 ||
+ [string compare $keyword IGNORED] == 0 } {
+ error_check_good deprec_put [$deprecdb put \
+ ${pref}_[lindex $line 1] ""] 0
+ }
+ }
+
+ error_check_good exist_curs \
+ [is_valid_cursor [set ec [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$ec get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$seendb count $rec] == 0 } {
+ puts "FAIL: log record type $rec not seen"
+ }
+ }
+ error_check_good exist_curs_close [$ec close] 0
+
+ error_check_good seen_curs \
+ [is_valid_cursor [set sc [$existdb cursor]] $existdb] TRUE
+ while { [llength [set dbt [$sc get -next]]] != 0 } {
+ set rec [lindex [lindex $dbt 0] 0]
+ if { [$existdb count $rec] == 0 } {
+ if { [$deprecdb count $rec] == 0 } {
+ puts "FAIL: unknown log record type $rec seen"
+ } else {
+ puts "FAIL: deprecated log record type $rec seen"
+ }
+ }
+ }
+ error_check_good seen_curs_close [$sc close] 0
+
+ error_check_good seendb_close [$seendb close] 0
+ error_check_good existdb_close [$existdb close] 0
+ error_check_good deprecdb_close [$deprecdb close] 0
+
+ logtrack_clean
+}
diff --git a/storage/bdb/test/mdbscript.tcl b/storage/bdb/test/mdbscript.tcl
new file mode 100644
index 00000000000..9f3c971ee3c
--- /dev/null
+++ b/storage/bdb/test/mdbscript.tcl
@@ -0,0 +1,384 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mdbscript.tcl,v 11.29 2002/03/22 21:43:06 krinsky Exp $
+#
+# Process script for the multi-process db tester.
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+global dbenv
+global klock
+global l_keys
+global procid
+global alphabet
+
+# In Tcl, when there are multiple catch handlers, *all* handlers
+# are called, so we have to resort to this hack.
+#
+global exception_handled
+
+set exception_handled 0
+
+set datastr $alphabet$alphabet
+
+# Usage: mdbscript dir file nentries iter procid procs seed
+# dir: DBHOME directory
+# file: db file on which to operate
+# nentries: number of entries taken from dictionary
+# iter: number of operations to run
+# procid: this processes' id number
+# procs: total number of processes running
+set usage "mdbscript method dir file nentries iter procid procs"
+
+# Verify usage
+if { $argc != 7 } {
+ puts "FAIL:[timestamp] test042: Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set method [lindex $argv 0]
+set dir [lindex $argv 1]
+set file [lindex $argv 2]
+set nentries [ lindex $argv 3 ]
+set iter [ lindex $argv 4 ]
+set procid [ lindex $argv 5 ]
+set procs [ lindex $argv 6 ]
+
+set pflags ""
+set gflags ""
+set txn ""
+
+set renum [is_rrecno $method]
+set omethod [convert_method $method]
+
+if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+}
+
+# Initialize seed
+global rand_init
+
+# We want repeatable results, but we also want each instance of mdbscript
+# to do something different. So we add the procid to the fixed seed.
+# (Note that this is a serial number given by the caller, not a pid.)
+berkdb srand [expr $rand_init + $procid]
+
+puts "Beginning execution for [pid] $method"
+puts "$dir db_home"
+puts "$file database"
+puts "$nentries data elements"
+puts "$iter iterations"
+puts "$procid process id"
+puts "$procs processes"
+
+set klock NOLOCK
+
+# Note: all I/O operations, and especially flush, are expensive
+# on Win2000 at least with Tcl version 8.3.2. So we'll avoid
+# flushes in the main part of the loop below.
+flush stdout
+
+set dbenv [berkdb_env -create -cdb -home $dir]
+#set dbenv [berkdb_env -create -cdb -log -home $dir]
+error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+set locker [ $dbenv lock_id ]
+
+set db [berkdb_open -env $dbenv -create -mode 0644 $omethod $file]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+# Init globals (no data)
+set nkeys [db_init $db 0]
+puts "Initial number of keys: $nkeys"
+error_check_good db_init $nkeys $nentries
+tclsleep 5
+
+proc get_lock { k } {
+ global dbenv
+ global procid
+ global locker
+ global klock
+ global DB_LOCK_WRITE
+ global DB_LOCK_NOWAIT
+ global errorInfo
+ global exception_handled
+ # Make sure that the key isn't in the middle of
+ # a delete operation
+ if {[catch {$dbenv lock_get -nowait write $locker $k} klock] != 0 } {
+ set exception_handled 1
+
+ error_check_good \
+ get_lock [is_substr $errorInfo "DB_LOCK_NOTGRANTED"] 1
+ puts "Warning: key $k locked"
+ set klock NOLOCK
+ return 1
+ } else {
+ error_check_good get_lock [is_valid_lock $klock $dbenv] TRUE
+ }
+ return 0
+}
+
+# On each iteration we're going to randomly pick a key.
+# 1. We'll either get it (verifying that its contents are reasonable).
+# 2. Put it (using an overwrite to make the data be datastr:ID).
+# 3. Get it and do a put through the cursor, tacking our ID on to
+# 4. Get it, read forward some random number of keys.
+# 5. Get it, read forward some random number of keys and do a put (replace).
+# 6. Get it, read forward some random number of keys and do a del. And then
+# do a put of the key.
+set gets 0
+set getput 0
+set overwrite 0
+set seqread 0
+set seqput 0
+set seqdel 0
+set dlen [string length $datastr]
+
+for { set i 0 } { $i < $iter } { incr i } {
+ set op [berkdb random_int 0 5]
+ puts "iteration $i operation $op"
+ set close_cursor 0
+ if {[catch {
+ switch $op {
+ 0 {
+ incr gets
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ continue;
+ }
+
+ set rec [eval {$db get} $txn $gflags {$key}]
+ error_check_bad "$db get $key" [llength $rec] 0
+ set partial [string range \
+ [lindex [lindex $rec 0] 1] 0 [expr $dlen - 1]]
+ error_check_good \
+ "$db get $key" $partial [pad_data $method $datastr]
+ }
+ 1 {
+ incr overwrite
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ set data $datastr:$procid
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good "$db put $key" $ret 0
+ }
+ 2 {
+ incr getput
+ set dbc [$db cursor -update]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 [expr $dlen - 1]]
+ error_check_good \
+ "$dbc get $key" $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put \
+ -current [chop_data $method $rec]]
+ error_check_good "$dbc put $key" $ret 0
+ error_check_good "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ 3 -
+ 4 -
+ 5 {
+ if { $op == 3 } {
+ set flags ""
+ } else {
+ set flags -update
+ }
+ set dbc [eval {$db cursor} $flags]
+ error_check_good "$db cursor" \
+ [is_valid_cursor $dbc $db] TRUE
+ set close_cursor 1
+ set k [rand_key $method $nkeys $renum $procs]
+ if {[is_record_based $method] == 1} {
+ set key $k
+ } else {
+ set key [lindex $l_keys $k]
+ }
+
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue;
+ }
+
+ set ret [$dbc get -set $key]
+ error_check_good \
+ "$dbc get $key" [llength [lindex $ret 0]] 2
+
+ # Now read a few keys sequentially
+ set nloop [berkdb random_int 0 10]
+ if { [berkdb random_int 0 1] == 0 } {
+ set flags -next
+ } else {
+ set flags -prev
+ }
+ while { $nloop > 0 } {
+ set lastret $ret
+ set ret [eval {$dbc get} $flags]
+ # Might read beginning/end of file
+ if { [llength $ret] == 0} {
+ set ret $lastret
+ break
+ }
+ incr nloop -1
+ }
+ switch $op {
+ 3 {
+ incr seqread
+ }
+ 4 {
+ incr seqput
+ set rec [lindex [lindex $ret 0] 1]
+ set partial [string range $rec 0 \
+ [expr $dlen - 1]]
+ error_check_good "$dbc get $key" \
+ $partial [pad_data $method $datastr]
+ append rec ":$procid"
+ set ret [$dbc put -current \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $key" $ret 0
+ }
+ 5 {
+ incr seqdel
+ set k [lindex [lindex $ret 0] 0]
+ # We need to lock the item we're
+ # deleting so that someone else can't
+ # try to do a get while we're
+ # deleting
+ error_check_good "$klock put" \
+ [$klock put] 0
+ set klock NOLOCK
+ set cur [$dbc get -current]
+ error_check_bad get_current \
+ [llength $cur] 0
+ set key [lindex [lindex $cur 0] 0]
+ if { [get_lock $key] == 1 } {
+ incr i -1
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ continue
+ }
+ set ret [$dbc del]
+ error_check_good "$dbc del" $ret 0
+ set rec $datastr
+ append rec ":$procid"
+ if { $renum == 1 } {
+ set ret [$dbc put -before \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret $k
+ } elseif { \
+ [is_record_based $method] == 1 } {
+ error_check_good "$dbc close" \
+ [$dbc close] 0
+ set close_cursor 0
+ set ret [$db put $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$db put $k" $ret 0
+ } else {
+ set ret [$dbc put -keylast $k \
+ [chop_data $method $rec]]
+ error_check_good \
+ "$dbc put $k" $ret 0
+ }
+ }
+ }
+ if { $close_cursor == 1 } {
+ error_check_good \
+ "$dbc close" [$dbc close] 0
+ set close_cursor 0
+ }
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+ global exception_handled;
+
+ puts $errorInfo
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+
+ if { [string compare $klock NOLOCK] != 0 } {
+ catch {$klock put}
+ }
+ if {$close_cursor == 1} {
+ catch {$dbc close}
+ set close_cursor 0
+ }
+
+ if {[string first FAIL $theError] == 0 && \
+ $exception_handled != 1} {
+ flush stdout
+ error "FAIL:[timestamp] test042: key $k: $theError"
+ }
+ set exception_handled 0
+ } else {
+ if { [string compare $klock NOLOCK] != 0 } {
+ error_check_good "$klock put" [$klock put] 0
+ set klock NOLOCK
+ }
+ }
+}
+
+error_check_good db_close_catch [catch {$db close} ret] 0
+error_check_good db_close $ret 0
+error_check_good dbenv_close [$dbenv close] 0
+
+flush stdout
+exit
+
+puts "[timestamp] [pid] Complete"
+puts "Successful ops: "
+puts "\t$gets gets"
+puts "\t$overwrite overwrites"
+puts "\t$getput getputs"
+puts "\t$seqread seqread"
+puts "\t$seqput seqput"
+puts "\t$seqdel seqdel"
+flush stdout
diff --git a/storage/bdb/test/memp001.tcl b/storage/bdb/test/memp001.tcl
new file mode 100644
index 00000000000..c4bbf99b9b2
--- /dev/null
+++ b/storage/bdb/test/memp001.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp001.tcl,v 11.50 2002/08/07 16:46:28 bostic Exp $
+#
+
+# TEST memp001
+# TEST Randomly updates pages.
+proc memp001 { } {
+
+ memp001_body 1 ""
+ memp001_body 3 ""
+ memp001_body 1 -private
+ memp001_body 3 -private
+ memp001_body 1 "-system_mem -shm_key 1"
+ memp001_body 3 "-system_mem -shm_key 1"
+
+}
+
+proc memp001_body { ncache flags } {
+ source ./include.tcl
+ global rand_init
+
+ set nfiles 5
+ set iter 500
+ set psize 512
+ set cachearg "-cachesize {0 400000 $ncache}"
+
+ puts \
+"Memp001: { $flags } random update $iter iterations on $nfiles files."
+ #
+ # Check if this platform supports this set of flags
+ #
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ puts "\tMemp001.a: Create env with $ncache caches"
+ set env [eval {berkdb_env -create -mode 0644} \
+ $cachearg {-home $testdir} $flags]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ #
+ # Do a simple mpool_stat call to verify the number of caches
+ # just to exercise the stat code.
+ set stat [$env mpool_stat]
+ set str "Number of caches"
+ set checked 0
+ foreach statpair $stat {
+ if { $checked == 1 } {
+ break
+ }
+ if { [is_substr [lindex $statpair 0] $str] != 0} {
+ set checked 1
+ error_check_good ncache [lindex $statpair 1] $ncache
+ }
+ }
+ error_check_good checked $checked 1
+
+ # Open N memp files
+ puts "\tMemp001.b: Create $nfiles mpool files"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ set fname "data_file.$i"
+ file_create $testdir/$fname 50 $psize
+
+ set mpools($i) \
+ [$env mpool -create -pagesize $psize -mode 0644 $fname]
+ error_check_good mp_open [is_substr $mpools($i) $env.mp] 1
+ }
+
+ # Now, loop, picking files at random
+ berkdb srand $rand_init
+ puts "\tMemp001.c: Random page replacement loop"
+ for {set i 0} {$i < $iter} {incr i} {
+ set mpool $mpools([berkdb random_int 1 $nfiles])
+ set p(1) [get_range $mpool 10]
+ set p(2) [get_range $mpool 10]
+ set p(3) [get_range $mpool 10]
+ set p(1) [replace $mpool $p(1)]
+ set p(3) [replace $mpool $p(3)]
+ set p(4) [get_range $mpool 20]
+ set p(4) [replace $mpool $p(4)]
+ set p(5) [get_range $mpool 10]
+ set p(6) [get_range $mpool 20]
+ set p(7) [get_range $mpool 10]
+ set p(8) [get_range $mpool 20]
+ set p(5) [replace $mpool $p(5)]
+ set p(6) [replace $mpool $p(6)]
+ set p(9) [get_range $mpool 40]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [get_range $mpool 40]
+ set p(7) [replace $mpool $p(7)]
+ set p(8) [replace $mpool $p(8)]
+ set p(9) [replace $mpool $p(9)]
+ set p(10) [replace $mpool $p(10)]
+ #
+ # We now need to put all the pages we have here or
+ # else they end up pinned.
+ #
+ for {set x 1} { $x <= 10} {incr x} {
+ error_check_good pgput [$p($x) put] 0
+ }
+ }
+
+ # Close N memp files, close the environment.
+ puts "\tMemp001.d: Close mpools"
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ error_check_good memp_close:$mpools($i) [$mpools($i) close] 0
+ }
+ error_check_good envclose [$env close] 0
+
+ for {set i 1} {$i <= $nfiles} {incr i} {
+ fileremove -f $testdir/data_file.$i
+ }
+}
+
+proc file_create { fname nblocks blocksize } {
+ set fid [open $fname w]
+ for {set i 0} {$i < $nblocks} {incr i} {
+ seek $fid [expr $i * $blocksize] start
+ puts -nonewline $fid $i
+ }
+ seek $fid [expr $nblocks * $blocksize - 1]
+
+ # We don't end the file with a newline, because some platforms (like
+ # Windows) emit CR/NL. There does not appear to be a BINARY open flag
+ # that prevents this.
+ puts -nonewline $fid "Z"
+ close $fid
+
+ # Make sure it worked
+ if { [file size $fname] != $nblocks * $blocksize } {
+ error "FAIL: file_create could not create correct file size"
+ }
+}
+
+proc get_range { mpool max } {
+ set pno [berkdb random_int 0 $max]
+ set p [$mpool get $pno]
+ error_check_good page [is_valid_page $p $mpool] TRUE
+ set got [$p pgnum]
+ if { $got != $pno } {
+ puts "Get_range: Page mismatch page |$pno| val |$got|"
+ }
+ set ret [$p init "Page is pinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ return $p
+}
+
+proc replace { mpool p } {
+ set pgno [$p pgnum]
+
+ set ret [$p init "Page is unpinned by [pid]"]
+ error_check_good page_init $ret 0
+
+ set ret [$p put -dirty]
+ error_check_good page_put $ret 0
+
+ set p2 [$mpool get $pgno]
+ error_check_good page [is_valid_page $p2 $mpool] TRUE
+
+ return $p2
+}
+
+proc mem_chk { flags } {
+ source ./include.tcl
+ global errorCode
+
+ # Open the memp with region init specified
+ env_cleanup $testdir
+
+ set cachearg " -cachesize {0 400000 3}"
+ set ret [catch {eval {berkdb_env -create -mode 0644}\
+ $cachearg {-region_init -home $testdir} $flags} env]
+ if { $ret != 0 } {
+ # If the env open failed, it may be because we're on a platform
+ # such as HP-UX 10 that won't support mutexes in shmget memory.
+ # Or QNX, which doesn't support system memory at all.
+ # Verify that the return value was EINVAL or EOPNOTSUPP
+ # and bail gracefully.
+ error_check_good is_shm_test [is_substr $flags -system_mem] 1
+ error_check_good returned_error [expr \
+ [is_substr $errorCode EINVAL] || \
+ [is_substr $errorCode EOPNOTSUPP]] 1
+ puts "Warning:\
+ platform does not support mutexes in shmget memory."
+ puts "Skipping shared memory mpool test."
+ return 1
+ }
+ error_check_good env_open [is_valid_env $env] TRUE
+ error_check_good env_close [$env close] 0
+ env_cleanup $testdir
+
+ return 0
+}
diff --git a/storage/bdb/test/memp002.tcl b/storage/bdb/test/memp002.tcl
new file mode 100644
index 00000000000..d55f2987f06
--- /dev/null
+++ b/storage/bdb/test/memp002.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp002.tcl,v 11.47 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST memp002
+# TEST Tests multiple processes accessing and modifying the same files.
+proc memp002 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp002_body with -private.
+ #
+ memp002_body ""
+ memp002_body "-system_mem -shm_key 1"
+}
+
+proc memp002_body { flags } {
+ source ./include.tcl
+
+ puts "Memp002: {$flags} Multiprocess mpool tester"
+
+ set procs 4
+ set psizes "512 1024 2048 4096 8192"
+ set iterations 500
+ set npages 100
+
+ # Check if this combination of flags is supported by this arch.
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ set iter [expr $iterations / $procs]
+
+ # Clean up old stuff and create new.
+ env_cleanup $testdir
+
+ for { set i 0 } { $i < [llength $psizes] } { incr i } {
+ fileremove -f $testdir/file$i
+ }
+ set e [eval {berkdb_env -create -lock -home $testdir} $flags]
+ error_check_good dbenv [is_valid_env $e] TRUE
+
+ set pidlist {}
+ for { set i 0 } { $i < $procs } {incr i} {
+
+ puts "$tclsh_path\
+ $test_path/mpoolscript.tcl $testdir $i $procs \
+ $iter $psizes $npages 3 $flags > \
+ $testdir/memp002.$i.out &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mpoolscript.tcl $testdir/memp002.$i.out $testdir $i $procs \
+ $iter $psizes $npages 3 $flags &]
+ lappend pidlist $p
+ }
+ puts "Memp002: $procs independent processes now running"
+ watch_procs $pidlist
+
+ reset_env $e
+}
diff --git a/storage/bdb/test/memp003.tcl b/storage/bdb/test/memp003.tcl
new file mode 100644
index 00000000000..31eb55b757c
--- /dev/null
+++ b/storage/bdb/test/memp003.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: memp003.tcl,v 11.46 2002/04/30 17:26:06 sue Exp $
+#
+
+# TEST memp003
+# TEST Test reader-only/writer process combinations; we use the access methods
+# TEST for testing.
+proc memp003 { } {
+ #
+ # Multiple processes not supported by private memory so don't
+ # run memp003_body with -private.
+ #
+ memp003_body ""
+ memp003_body "-system_mem -shm_key 1"
+}
+
+proc memp003_body { flags } {
+ global alphabet
+ source ./include.tcl
+
+ puts "Memp003: {$flags} Reader/Writer tests"
+
+ if { [mem_chk $flags] == 1 } {
+ return
+ }
+
+ env_cleanup $testdir
+ set psize 1024
+ set nentries 500
+ set testfile mpool.db
+ set t1 $testdir/t1
+
+ # Create an environment that the two processes can share, with
+ # 20 pages per cache.
+ set c [list 0 [expr $psize * 20 * 3] 3]
+ set dbenv [eval {berkdb_env \
+ -create -lock -home $testdir -cachesize $c} $flags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # First open and create the file.
+ set db [berkdb_open -env $dbenv -create -truncate \
+ -mode 0644 -pagesize $psize -btree $testfile]
+ error_check_good dbopen/RW [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set txn ""
+ set count 0
+
+ puts "\tMemp003.a: create database"
+ set keys ""
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys $str
+
+ set ret [eval {$db put} $txn {$str $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn {$str}]
+ error_check_good get $ret [list [list $str $str]]
+
+ incr count
+ }
+ close $did
+ error_check_good close [$db close] 0
+
+ # Now open the file for read-only
+ set db [berkdb_open -env $dbenv -rdonly $testfile]
+ error_check_good dbopen/RO [is_substr $db db] 1
+
+ puts "\tMemp003.b: verify a few keys"
+ # Read and verify a couple of keys; saving them to check later
+ set testset ""
+ for { set i 0 } { $i < 10 } { incr i } {
+ set ndx [berkdb random_int 0 [expr $nentries - 1]]
+ set key [lindex $keys $ndx]
+ if { [lsearch $testset $key] != -1 } {
+ incr i -1
+ continue;
+ }
+
+ # The remote process stuff is unhappy with
+ # zero-length keys; make sure we don't pick one.
+ if { [llength $key] == 0 } {
+ incr i -1
+ continue
+ }
+
+ lappend testset $key
+
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get/RO $ret [list [list $key $key]]
+ }
+
+ puts "\tMemp003.c: retrieve and modify keys in remote process"
+ # Now open remote process where we will open the file RW
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ puts $f1 "flush stdout"
+ flush $f1
+
+ set c [concat "{" [list 0 [expr $psize * 20 * 3] 3] "}" ]
+ set remote_env [send_cmd $f1 \
+ "berkdb_env -create -lock -home $testdir -cachesize $c $flags"]
+ error_check_good remote_dbenv [is_valid_env $remote_env] TRUE
+
+ set remote_db [send_cmd $f1 "berkdb_open -env $remote_env $testfile"]
+ error_check_good remote_dbopen [is_valid_db $remote_db] TRUE
+
+ foreach k $testset {
+ # Get the key
+ set ret [send_cmd $f1 "$remote_db get $k"]
+ error_check_good remote_get $ret [list [list $k $k]]
+
+ # Now replace the key
+ set ret [send_cmd $f1 "$remote_db put $k $k$k"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.d: verify changes in local process"
+ foreach k $testset {
+ set ret [eval {$db get} $txn {$key}]
+ error_check_good get_verify/RO $ret [list [list $key $key$key]]
+ }
+
+ puts "\tMemp003.e: Fill up the cache with dirty buffers"
+ foreach k $testset {
+ # Now rewrite the keys with BIG data
+ set data [replicate $alphabet 32]
+ set ret [send_cmd $f1 "$remote_db put $k $data"]
+ error_check_good remote_put $ret 0
+ }
+
+ puts "\tMemp003.f: Get more pages for the read-only file"
+ dump_file $db $txn $t1 nop
+
+ puts "\tMemp003.g: Sync from the read-only file"
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_close [$db close] 0
+
+ set ret [send_cmd $f1 "$remote_db close"]
+ error_check_good remote_get $ret 0
+
+ # Close the environment both remotely and locally.
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+ close $f1
+
+ reset_env $dbenv
+}
diff --git a/storage/bdb/test/mpoolscript.tcl b/storage/bdb/test/mpoolscript.tcl
new file mode 100644
index 00000000000..c13f70eb945
--- /dev/null
+++ b/storage/bdb/test/mpoolscript.tcl
@@ -0,0 +1,171 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mpoolscript.tcl,v 11.16 2002/04/29 14:47:16 sandstro Exp $
+#
+# Random multiple process mpool tester.
+# Usage: mpoolscript dir id numiters numfiles numpages sleepint
+# dir: lock directory.
+# id: Unique identifier for this process.
+# maxprocs: Number of procs in this test.
+# numiters: Total number of iterations.
+# pgsizes: Pagesizes for the different files. Length of this item indicates
+# how many files to use.
+# numpages: Number of pages per file.
+# sleepint: Maximum sleep interval.
+# flags: Flags for env open
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage \
+ "mpoolscript dir id maxprocs numiters pgsizes numpages sleepint flags"
+
+# Verify usage
+if { $argc != 8 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ puts $argc
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set id [lindex $argv 1]
+set maxprocs [lindex $argv 2]
+set numiters [ lindex $argv 3 ]
+set pgsizes [ lindex $argv 4 ]
+set numpages [ lindex $argv 5 ]
+set sleepint [ lindex $argv 6 ]
+set flags [ lindex $argv 7]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+# Give time for all processes to start up.
+tclsleep 10
+
+puts -nonewline "Beginning execution for $id: $maxprocs $dir $numiters"
+puts " $pgsizes $numpages $sleepint"
+flush stdout
+
+# Figure out how small/large to make the cache
+set max 0
+foreach i $pgsizes {
+ if { $i > $max } {
+ set max $i
+ }
+}
+
+set cache [list 0 [expr $maxprocs * ([lindex $pgsizes 0] + $max)] 1]
+set env_cmd {berkdb_env -lock -cachesize $cache -home $dir}
+set e [eval $env_cmd $flags]
+error_check_good env_open [is_valid_env $e] TRUE
+
+# Now open files
+set mpools {}
+set nfiles 0
+foreach psize $pgsizes {
+ set mp [$e mpool -create -mode 0644 -pagesize $psize file$nfiles]
+ error_check_good memp_fopen:$nfiles [is_valid_mpool $mp $e] TRUE
+ lappend mpools $mp
+ incr nfiles
+}
+
+puts "Establishing long-term pin on file 0 page $id for process $id"
+
+# Set up the long-pin page
+set locker [$e lock_id]
+set lock [$e lock_get write $locker 0:$id]
+error_check_good lock_get [is_valid_lock $lock $e] TRUE
+
+set mp [lindex $mpools 0]
+set master_page [$mp get -create $id]
+error_check_good mp_get:$master_page [is_valid_page $master_page $mp] TRUE
+
+set r [$master_page init MASTER$id]
+error_check_good page_init $r 0
+
+# Release the lock but keep the page pinned
+set r [$lock put]
+error_check_good lock_put $r 0
+
+# Main loop. On each iteration, we'll check every page in each of
+# of the files. On any file, if we see the appropriate tag in the
+# field, we'll rewrite the page, else we won't. Keep track of
+# how many pages we actually process.
+set pages 0
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ puts "[timestamp]: iteration $iter, $pages pages set so far"
+ flush stdout
+ for { set fnum 1 } { $fnum < $nfiles } { incr fnum } {
+ if { [expr $fnum % 2 ] == 0 } {
+ set pred [expr ($id + $maxprocs - 1) % $maxprocs]
+ } else {
+ set pred [expr ($id + $maxprocs + 1) % $maxprocs]
+ }
+
+ set mpf [lindex $mpools $fnum]
+ for { set p 0 } { $p < $numpages } { incr p } {
+ set lock [$e lock_get write $locker $fnum:$p]
+ error_check_good lock_get:$fnum:$p \
+ [is_valid_lock $lock $e] TRUE
+
+ # Now, get the page
+ set pp [$mpf get -create $p]
+ error_check_good page_get:$fnum:$p \
+ [is_valid_page $pp $mpf] TRUE
+
+ if { [$pp is_setto $pred] == 0 || [$pp is_setto 0] == 0 } {
+ # Set page to self.
+ set r [$pp init $id]
+ error_check_good page_init:$fnum:$p $r 0
+ incr pages
+ set r [$pp put -dirty]
+ error_check_good page_put:$fnum:$p $r 0
+ } else {
+ error_check_good page_put:$fnum:$p [$pp put] 0
+ }
+ error_check_good lock_put:$fnum:$p [$lock put] 0
+ }
+ }
+ tclsleep [berkdb random_int 1 $sleepint]
+}
+
+# Now verify your master page, release its pin, then verify everyone else's
+puts "$id: End of run verification of master page"
+set r [$master_page is_setto MASTER$id]
+error_check_good page_check $r 1
+set r [$master_page put -dirty]
+error_check_good page_put $r 0
+
+set i [expr ($id + 1) % $maxprocs]
+set mpf [lindex $mpools 0]
+
+while { $i != $id } {
+ set p [$mpf get -create $i]
+ error_check_good mp_get [is_valid_page $p $mpf] TRUE
+
+ if { [$p is_setto MASTER$i] != 1 } {
+ puts "Warning: Master page $i not set."
+ }
+ error_check_good page_put:$p [$p put] 0
+
+ set i [expr ($i + 1) % $maxprocs]
+}
+
+# Close files
+foreach i $mpools {
+ set r [$i close]
+ error_check_good mpf_close $r 0
+}
+
+# Close environment system
+set r [$e close]
+error_check_good env_close $r 0
+
+puts "[timestamp] $id Complete"
+flush stdout
diff --git a/storage/bdb/test/mutex001.tcl b/storage/bdb/test/mutex001.tcl
new file mode 100644
index 00000000000..93f858993a5
--- /dev/null
+++ b/storage/bdb/test/mutex001.tcl
@@ -0,0 +1,51 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex001.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex001
+# TEST Test basic mutex functionality
+proc mutex001 { } {
+ source ./include.tcl
+
+ puts "Mutex001: Basic functionality"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Test open w/out create; should fail
+ error_check_bad \
+ env_open [catch {berkdb_env -lock -home $testdir} env] 0
+
+ puts "\tMutex001.a: Create lock env"
+ # Now open for real
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ puts "\tMutex001.b: Create $nlocks mutexes"
+ set m [$env mutex 0644 $nlocks]
+ error_check_good mutex_init [is_valid_mutex $m $env] TRUE
+
+ # Get, set each mutex; sleep, then get Release
+ puts "\tMutex001.c: Get/set loop"
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m get $i ]
+ error_check_good mutex_get $r 0
+
+ set r [$m setval $i $i]
+ error_check_good mutex_setval $r 0
+ }
+ tclsleep 5
+ for { set i 0 } { $i < $nlocks } { incr i } {
+ set r [$m getval $i]
+ error_check_good mutex_getval $r $i
+
+ set r [$m release $i ]
+ error_check_good mutex_get $r 0
+ }
+
+ error_check_good mutex_close [$m close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/mutex002.tcl b/storage/bdb/test/mutex002.tcl
new file mode 100644
index 00000000000..193e600fe8b
--- /dev/null
+++ b/storage/bdb/test/mutex002.tcl
@@ -0,0 +1,94 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex002.tcl,v 11.23 2002/04/30 19:37:36 sue Exp $
+#
+
+# TEST mutex002
+# TEST Test basic mutex synchronization
+proc mutex002 { } {
+ source ./include.tcl
+
+ puts "Mutex002: Basic synchronization"
+ env_cleanup $testdir
+ set nlocks 20
+
+ # Fork off child before we open any files.
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+ flush $f1
+
+ # Open the environment and the mutex locally
+ puts "\tMutex002.a: Open local and remote env"
+ set local_env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $local_env] TRUE
+
+ set local_mutex [$local_env mutex 0644 $nlocks]
+ error_check_good \
+ mutex_init [is_valid_mutex $local_mutex $local_env] TRUE
+
+ # Open the environment and the mutex remotely
+ set remote_env [send_cmd $f1 "berkdb_env -lock -home $testdir"]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ set remote_mutex [send_cmd $f1 "$remote_env mutex 0644 $nlocks"]
+ error_check_good \
+ mutex_init [is_valid_mutex $remote_mutex $remote_env] TRUE
+
+ # Do a get here, then set the value to be pid.
+ # On the remote side fire off a get and getval.
+ puts "\tMutex002.b: Local and remote get/set"
+ set r [$local_mutex get 1]
+ error_check_good lock_get $r 0
+
+ set r [$local_mutex setval 1 [pid]]
+ error_check_good lock_get $r 0
+
+ # Now have the remote side request the lock and check its
+ # value. Then wait 5 seconds, release the mutex and see
+ # what the remote side returned.
+ send_timed_cmd $f1 1 "$remote_mutex get 1"
+ send_timed_cmd $f1 1 "set ret \[$remote_mutex getval 1\]"
+
+ # Now sleep before resetting and releasing lock
+ tclsleep 5
+ set newv [expr [pid] - 1]
+ set r [$local_mutex setval 1 $newv]
+ error_check_good mutex_setval $r 0
+
+ set r [$local_mutex release 1]
+ error_check_good mutex_release $r 0
+
+ # Now get the result from the other script
+ # Timestamp
+ set result [rcv_result $f1]
+ error_check_good lock_get:remote_time [expr $result > 4] 1
+
+ # Timestamp
+ set result [rcv_result $f1]
+
+ # Mutex value
+ set result [send_cmd $f1 "puts \$ret"]
+ error_check_good lock_get:remote_getval $result $newv
+
+ # Close down the remote
+ puts "\tMutex002.c: Close remote"
+ set ret [send_cmd $f1 "$remote_mutex close" 5]
+ # Not sure why we need this, but we do... an extra blank line
+ # someone gets output somewhere
+ gets $f1 ret
+ error_check_good remote:mutex_close $ret 0
+
+ set ret [send_cmd $f1 "$remote_env close"]
+ error_check_good remote:env_close $ret 0
+
+ catch { close $f1 } result
+
+ set ret [$local_mutex close]
+ error_check_good local:mutex_close $ret 0
+
+ set ret [$local_env close]
+ error_check_good local:env_close $ret 0
+}
diff --git a/storage/bdb/test/mutex003.tcl b/storage/bdb/test/mutex003.tcl
new file mode 100644
index 00000000000..da35ac0d115
--- /dev/null
+++ b/storage/bdb/test/mutex003.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutex003.tcl,v 11.24 2002/09/05 17:23:06 sandstro Exp $
+#
+
+# TEST mutex003
+# TEST Generate a bunch of parallel testers that try to randomly obtain locks.
+proc mutex003 { } {
+ source ./include.tcl
+
+ set nmutex 20
+ set iter 500
+ set procs 5
+ set mdegree 3
+ set wait 2
+ puts "Mutex003: Multi-process random mutex test"
+
+ env_cleanup $testdir
+
+ puts "\tMutex003.a: Create environment"
+ # Now open the region we'll use for multiprocess testing.
+ set env [berkdb_env -create -mode 0644 -lock -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set mutex [$env mutex 0644 $nmutex]
+ error_check_good mutex_init [is_valid_mutex $mutex $env] TRUE
+
+ error_check_good mutex_close [$mutex close] 0
+
+ # Now spawn off processes
+ puts "\tMutex003.b: Create $procs processes"
+ set pidlist {}
+ for { set i 0 } {$i < $procs} {incr i} {
+ puts "$tclsh_path\
+ $test_path/mutexscript.tcl $testdir\
+ $iter $nmutex $wait $mdegree > $testdir/$i.mutexout &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mutexscript.tcl $testdir/$i.mutexout $testdir\
+ $iter $nmutex $wait $mdegree &]
+ lappend pidlist $p
+ }
+ puts "\tMutex003.c: $procs independent processes now running"
+ watch_procs $pidlist
+ error_check_good env_close [$env close] 0
+ # Remove output files
+ for { set i 0 } {$i < $procs} {incr i} {
+ fileremove -f $testdir/$i.mutexout
+ }
+}
diff --git a/storage/bdb/test/mutexscript.tcl b/storage/bdb/test/mutexscript.tcl
new file mode 100644
index 00000000000..bc410f2716d
--- /dev/null
+++ b/storage/bdb/test/mutexscript.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: mutexscript.tcl,v 11.16 2002/04/29 14:58:16 sandstro Exp $
+#
+# Random mutex tester.
+# Usage: mutexscript dir numiters mlocks sleepint degree
+# dir: dir in which all the mutexes live.
+# numiters: Total number of iterations.
+# nmutex: Total number of mutexes.
+# sleepint: Maximum sleep interval.
+# degree: Maximum number of locks to acquire at once
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "mutexscript dir numiters nmutex sleepint degree"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set numiters [ lindex $argv 1 ]
+set nmutex [ lindex $argv 2 ]
+set sleepint [ lindex $argv 3 ]
+set degree [ lindex $argv 4 ]
+set locker [pid]
+set mypid [sanitized_pid]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts -nonewline "Mutexscript: Beginning execution for $locker:"
+puts " $numiters $nmutex $sleepint $degree"
+flush stdout
+
+# Open the environment and the mutex
+set e [berkdb_env -create -mode 0644 -lock -home $dir]
+error_check_good evn_open [is_valid_env $e] TRUE
+
+set mutex [$e mutex 0644 $nmutex]
+error_check_good mutex_init [is_valid_mutex $mutex $e] TRUE
+
+# Sleep for awhile to make sure that everyone has gotten in
+tclsleep 5
+
+for { set iter 0 } { $iter < $numiters } { incr iter } {
+ set nlocks [berkdb random_int 1 $degree]
+ # We will always lock objects in ascending order to avoid
+ # deadlocks.
+ set lastobj 1
+ set mlist {}
+ for { set lnum 0 } { $lnum < $nlocks } { incr lnum } {
+ # Pick lock parameters
+ set obj [berkdb random_int $lastobj [expr $nmutex - 1]]
+ set lastobj [expr $obj + 1]
+ puts "[timestamp] $locker $lnum: $obj"
+
+ # Do get, set its val to own pid, and then add to list
+ error_check_good mutex_get:$obj [$mutex get $obj] 0
+ error_check_good mutex_setval:$obj [$mutex setval $obj $mypid] 0
+ lappend mlist $obj
+ if {$lastobj >= $nmutex} {
+ break
+ }
+ }
+
+ # Sleep for 10 to (100*$sleepint) ms.
+ after [berkdb random_int 10 [expr $sleepint * 100]]
+
+ # Now release locks
+ foreach i $mlist {
+ error_check_good mutex_getval:$i [$mutex getval $i] $mypid
+ error_check_good mutex_setval:$i \
+ [$mutex setval $i [expr 0 - $mypid]] 0
+ error_check_good mutex_release:$i [$mutex release $i] 0
+ }
+ puts "[timestamp] $locker released mutexes"
+ flush stdout
+}
+
+puts "[timestamp] $locker Complete"
+flush stdout
diff --git a/storage/bdb/test/ndbm.tcl b/storage/bdb/test/ndbm.tcl
new file mode 100644
index 00000000000..0bf8e0cc87c
--- /dev/null
+++ b/storage/bdb/test/ndbm.tcl
@@ -0,0 +1,144 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: ndbm.tcl,v 11.16 2002/07/08 13:11:30 mjc Exp $
+#
+# Historic NDBM interface test.
+# Use the first 1000 entries from the dictionary.
+# Insert each with self as key and data; retrieve each.
+# After all are entered, retrieve all; compare output to original.
+# Then reopen the file, re-retrieve everything.
+# Finally, delete everything.
+proc ndbm { { nentries 1000 } } {
+ source ./include.tcl
+
+ puts "NDBM interfaces test: $nentries"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/ndbmtest
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir NULL
+
+ set db [berkdb ndbm_open -create -truncate -mode 0644 $testfile]
+ error_check_good ndbm_open [is_substr $db ndbm] 1
+ set did [open $dict]
+
+ error_check_good rdonly_false [$db rdonly] 0
+
+ set flags 0
+ set txn 0
+ set count 0
+ set skippednullkey 0
+
+ puts "\tNDBM.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # NDBM can't handle zero-length keys
+ if { [string length $str] == 0 } {
+ set skippednullkey 1
+ continue
+ }
+
+ set ret [$db store $str $str insert]
+ error_check_good ndbm_store $ret 0
+
+ set d [$db fetch $str]
+ error_check_good ndbm_fetch $d $str
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tNDBM.b: dump file"
+ set oid [open $t1 w]
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch $d $key
+ }
+
+ # If we had to skip a zero-length key, juggle things to cover up
+ # this fact in the dump.
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ incr nentries 1
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # File descriptors tests won't work under Windows.
+ if { $is_windows_test != 1 } {
+ puts "\tNDBM.c: pagf/dirf test"
+ set fd [$db pagfno]
+ error_check_bad pagf $fd -1
+ set fd [$db dirfno]
+ error_check_bad dirf $fd -1
+ }
+
+ puts "\tNDBM.d: close, open, and dump file"
+
+ # Now, reopen the file and run the last test again.
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open -rdonly $testfile]
+ error_check_good ndbm_open2 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ error_check_good rdonly_true [$db rdonly] "rdonly:not owner"
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set d [$db fetch $key]
+ error_check_good ndbm_refetch2 $d $key
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and delete each entry
+ puts "\tNDBM.e: sequential scan and delete"
+
+ error_check_good ndbm_close [$db close] 0
+ set db [berkdb ndbm_open $testfile]
+ error_check_good ndbm_open3 [is_substr $db ndbm] 1
+ set oid [open $t1 w]
+
+ for { set key [$db firstkey] } { $key != -1 } {
+ set key [$db nextkey] } {
+ puts $oid $key
+ set ret [$db delete $key]
+ error_check_good ndbm_delete $ret 0
+ }
+ if { $skippednullkey == 1 } {
+ puts $oid ""
+ }
+ close $oid
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ filesort $t1 $t3
+
+ error_check_good NDBM:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+ error_check_good ndbm_close [$db close] 0
+}
diff --git a/storage/bdb/test/parallel.tcl b/storage/bdb/test/parallel.tcl
new file mode 100644
index 00000000000..4e101c088cb
--- /dev/null
+++ b/storage/bdb/test/parallel.tcl
@@ -0,0 +1,295 @@
+# Code to load up the tests in to the Queue database
+# $Id: parallel.tcl,v 11.28 2002/09/05 17:23:06 sandstro Exp $
+proc load_queue { file {dbdir RUNQUEUE} nitems } {
+
+ puts -nonewline "Loading run queue with $nitems items..."
+ flush stdout
+
+ set env [berkdb_env -create -lock -home $dbdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create -truncate \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set fid [open $file]
+
+ set count 0
+
+ while { [gets $fid str] != -1 } {
+ set testarr($count) $str
+ incr count
+ }
+
+ # Randomize array of tests.
+ set rseed [pid]
+ berkdb srand $rseed
+ puts -nonewline "randomizing..."
+ flush stdout
+ for { set i 0 } { $i < $count } { incr i } {
+ set j [berkdb random_int $i [expr $count - 1]]
+
+ set tmp $testarr($i)
+ set testarr($i) $testarr($j)
+ set testarr($j) $tmp
+ }
+
+ if { [string compare ALL $nitems] != 0 } {
+ set maxload $nitems
+ } else {
+ set maxload $count
+ }
+
+ puts "loading..."
+ flush stdout
+ for { set i 0 } { $i < $maxload } { incr i } {
+ set str $testarr($i)
+ set ret [eval {$db put -append $str} ]
+ error_check_good put:$db $ret [expr $i + 1]
+ }
+
+ puts "Loaded $maxload records (out of $count)."
+ close $fid
+ $db close
+ $env close
+}
+
+proc init_runqueue { {dbdir RUNQUEUE} nitems list} {
+
+ if { [file exists $dbdir] != 1 } {
+ file mkdir $dbdir
+ }
+ puts "Creating test list..."
+ $list -n
+ load_queue ALL.OUT $dbdir $nitems
+ file delete TEST.LIST
+ file rename ALL.OUT TEST.LIST
+# file delete ALL.OUT
+}
+
+proc run_parallel { nprocs {list run_all} {nitems ALL} } {
+ set basename ./PARALLEL_TESTDIR
+ set queuedir ./RUNQUEUE
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ init_runqueue $queuedir $nitems $list
+
+ set basedir [pwd]
+ set pidlist {}
+ set queuedir ../../[string range $basedir \
+ [string last "/" $basedir] end]/$queuedir
+
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ fileremove -f ALL.OUT.$i
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "source $test_path/test.tcl;\
+ run_queue $i $basename.$i $queuedir $nitems" &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 300 360000
+
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Regression tests failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Regression tests succeeded."
+ }
+}
+
+proc run_queue { i rundir queuedir nitems } {
+ set builddir [pwd]
+ file delete $builddir/ALL.OUT.$i
+ cd $rundir
+
+ puts "Parallel run_queue process $i (pid [pid]) starting."
+
+ source ./include.tcl
+ global env
+
+ set dbenv [berkdb_env -create -lock -home $queuedir]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open -env $dbenv \
+ -mode 0644 -len 120 -queue queue.db} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set dbc [eval $db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ set waitcnt 0
+
+ while { $waitcnt < 5 } {
+ set line [$db get -consume]
+ if { [ llength $line ] > 0 } {
+ set cmd [lindex [lindex $line 0] 1]
+ set num [lindex [lindex $line 0] 0]
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nExecuting record $num ([timestamp -w]):\n"
+ set tdir "TESTDIR.$i"
+ regsub {TESTDIR} $cmd $tdir cmd
+ puts $o $cmd
+ close $o
+ if { [expr {$num % 10} == 0] } {
+ puts "Starting test $num of $nitems"
+ }
+ #puts "Process $i, record $num:\n$cmd"
+ set env(PURIFYOPTIONS) \
+ "-log-file=./test$num.%p -follow-child-processes -messages=first"
+ set env(PURECOVOPTIONS) \
+ "-counts-file=./cov.pcv -log-file=./cov.log -follow-child-processes"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; $cmd" \
+ >>& $builddir/ALL.OUT.$i } res] {
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "FAIL: '$cmd': $res"
+ close $o
+ }
+ env_cleanup $testdir
+ set o [open $builddir/ALL.OUT.$i a]
+ puts $o "\nEnding record $num ([timestamp])\n"
+ close $o
+ incr count
+ } else {
+ incr waitcnt
+ tclsleep 1
+ }
+ }
+
+ puts "Process $i: $count commands executed"
+
+ $dbc close
+ $db close
+ $dbenv close
+
+ #
+ # We need to put the pid file in the builddir's idea
+ # of testdir, not this child process' local testdir.
+ # Therefore source builddir's include.tcl to get its
+ # testdir.
+ # !!! This resets testdir, so don't do anything else
+ # local to the child after this.
+ source $builddir/include.tcl
+
+ set f [open $builddir/$testdir/end.[pid] w]
+ close $f
+}
+
+proc mkparalleldirs { nprocs basename queuedir } {
+ source ./include.tcl
+ set dir [pwd]
+
+ if { $is_windows_test != 1 } {
+ set EXE ""
+ } else {
+ set EXE ".exe"
+ }
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set destdir $basename.$i
+ catch {file mkdir $destdir}
+ puts "Created $destdir"
+ if { $is_windows_test == 1 } {
+ catch {file mkdir $destdir/Debug}
+ catch {eval file copy \
+ [eval glob {$dir/Debug/*.dll}] $destdir/Debug}
+ }
+ catch {eval file copy \
+ [eval glob {$dir/{.libs,include.tcl}}] $destdir}
+ # catch {eval file copy $dir/$queuedir $destdir}
+ catch {eval file copy \
+ [eval glob {$dir/db_{checkpoint,deadlock}$EXE} \
+ {$dir/db_{dump,load,printlog,recover,stat,upgrade}$EXE} \
+ {$dir/db_{archive,verify}$EXE}] \
+ $destdir}
+
+ # Create modified copies of include.tcl in parallel
+ # directories so paths still work.
+
+ set infile [open ./include.tcl r]
+ set d [read $infile]
+ close $infile
+
+ regsub {test_path } $d {test_path ../} d
+ regsub {src_root } $d {src_root ../} d
+ set tdir "TESTDIR.$i"
+ regsub -all {TESTDIR} $d $tdir d
+ regsub {KILL \.} $d {KILL ..} d
+ set outfile [open $destdir/include.tcl w]
+ puts $outfile $d
+ close $outfile
+
+ global svc_list
+ foreach svc_exe $svc_list {
+ if { [file exists $dir/$svc_exe] } {
+ catch {eval file copy $dir/$svc_exe $destdir}
+ }
+ }
+ }
+}
+
+proc run_ptest { nprocs test args } {
+ global parms
+ set basename ./PARALLEL_TESTDIR
+ set queuedir NULL
+ source ./include.tcl
+
+ mkparalleldirs $nprocs $basename $queuedir
+
+ if { [info exists parms($test)] } {
+ foreach method \
+ "hash queue queueext recno rbtree frecno rrecno btree" {
+ if { [eval exec_ptest $nprocs $basename \
+ $test $method $args] != 0 } {
+ break
+ }
+ }
+ } else {
+ eval exec_ptest $nprocs $basename $test $args
+ }
+}
+
+proc exec_ptest { nprocs basename test args } {
+ source ./include.tcl
+
+ set basedir [pwd]
+ set pidlist {}
+ puts "Running $nprocs parallel runs of $test"
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ set outf ALL.OUT.$i
+ fileremove -f $outf
+ set ret [catch {
+ set p [exec $tclsh_path << \
+ "cd $basename.$i;\
+ source ../$test_path/test.tcl;\
+ $test $args" >& $outf &]
+ lappend pidlist $p
+ set f [open $testdir/begin.$p w]
+ close $f
+ } res]
+ }
+ watch_procs $pidlist 30 36000
+ set failed 0
+ for { set i 1 } { $i <= $nprocs } { incr i } {
+ if { [check_failed_run ALL.OUT.$i] != 0 } {
+ set failed 1
+ puts "Test $test failed in process $i."
+ }
+ }
+ if { $failed == 0 } {
+ puts "Test $test succeeded all processes"
+ return 0
+ } else {
+ puts "Test failed: stopping"
+ return 1
+ }
+}
diff --git a/storage/bdb/test/recd001.tcl b/storage/bdb/test/recd001.tcl
new file mode 100644
index 00000000000..bc7ac6d896a
--- /dev/null
+++ b/storage/bdb/test/recd001.tcl
@@ -0,0 +1,242 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd001.tcl,v 11.40 2002/05/08 19:36:18 sandstro Exp $
+#
+# TEST recd001
+# TEST Per-operation recovery tests for non-duplicate, non-split
+# TEST messages. Makes sure that we exercise redo, undo, and do-nothing
+# TEST condition. Any test that appears with the message (change state)
+# TEST indicates that we've already run the particular test, but we are
+# TEST running it again so that we can change the state of the data base
+# TEST to prepare for the next test (this applies to all other recovery
+# TEST tests as well).
+# TEST
+# TEST These are the most basic recovery tests. We do individual recovery
+# TEST tests for each operation in the access method interface. First we
+# TEST create a file and capture the state of the database (i.e., we copy
+# TEST it. Then we run a transaction containing a single operation. In
+# TEST one test, we abort the transaction and compare the outcome to the
+# TEST original copy of the file. In the second test, we restore the
+# TEST original copy of the database and then run recovery and compare
+# TEST this against the actual database.
+proc recd001 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd001: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd001.db
+ set testfile2 recd001-2.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd001.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd001.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd001.b: put"}
+ { {DB del -txn TXNID $key} "Recd001.c: delete"}
+ { {DB put -txn TXNID $bigkey $data} "Recd001.d: big key put"}
+ { {DB del -txn TXNID $bigkey} "Recd001.e: big key delete"}
+ { {DB put -txn TXNID $key $bigdata} "Recd001.f: big data put"}
+ { {DB del -txn TXNID $key} "Recd001.g: big data delete"}
+ { {DB put -txn TXNID $key $data} "Recd001.h: put (change state)"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.i: overwrite"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_grow}
+ "Recd001.j: partial put growing"}
+ { {DB put -txn TXNID $key $newdata} "Recd001.k: overwrite (fix)"}
+ { {DB put -txn TXNID -partial {$off $len} $key $partial_shrink}
+ "Recd001.l: partial put shrinking"}
+ { {DB put -txn TXNID -append $data} "Recd001.m: put -append"}
+ { {DB get -txn TXNID -consume} "Recd001.n: db get -consume"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd001_key
+ }
+ set data recd001_data
+ set newdata NEWrecd001_dataNEW
+ set off 3
+ set len 12
+
+ set partial_grow replacement_record_grow
+ set partial_shrink xxx
+ if { [is_fixed_length $method] == 1 } {
+ set len [string length $partial_grow]
+ set partial_shrink $partial_grow
+ }
+ set bigdata [replicate $key $fixed_len]
+ if { [is_record_based $method] == 1 } {
+ set bigkey $fixed_len
+ } else {
+ set bigkey [replicate $key $fixed_len]
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Skipping remainder of test for fixed length methods"
+ return
+ }
+
+ #
+ # Check partial extensions. If we add a key/data to the database
+ # and then expand it using -partial, then recover, recovery was
+ # failing in #3944. Check that scenario here.
+ #
+ # !!!
+ # We loop here because on each iteration, we need to clean up
+ # the old env (i.e. this test does not depend on earlier runs).
+ # If we run it without cleaning up the env inbetween, we do not
+ # test the scenario of #3944.
+ #
+ set len [string length $data]
+ set len2 256
+ set part_data [replicate "abcdefgh" 32]
+ set p [list 0 $len]
+ set cmd [subst \
+ {DB put -txn TXNID -partial {$len $len2} $key $part_data}]
+ set msg "Recd001.o: partial put prepopulated/expanding"
+ foreach op {abort commit prepare-abort prepare-discard prepare-commit} {
+ env_cleanup $testdir
+
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+ set t [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $t $dbenv] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -txn $t $opts $testfile2"
+ set db2 [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db2] TRUE
+
+ set ret [$db put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+
+ set ret [$db2 put -txn $t -partial $p $key $data]
+ error_check_good dbput $ret 0
+ error_check_good txncommit [$t commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbclose [$db2 close] 0
+ error_check_good dbenvclose [$dbenv close] 0
+
+ op_recover $op $testdir $env_cmd $testfile $cmd $msg
+ }
+ return
+}
diff --git a/storage/bdb/test/recd002.tcl b/storage/bdb/test/recd002.tcl
new file mode 100644
index 00000000000..ed579291283
--- /dev/null
+++ b/storage/bdb/test/recd002.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd002.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
+#
+# TEST recd002
+# TEST Split recovery tests. For every known split log message, makes sure
+# TEST that we exercise redo, undo, and do-nothing condition.
+proc recd002 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd002: skipping for specific pagesizes"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Queues don't do splits, so we don't really need the small page
+ # size and the small page size is smaller than the record, so it's
+ # a problem.
+ if { [string compare $omethod "-queue"] == 0 } {
+ set pagesize 4096
+ } else {
+ set pagesize 512
+ }
+ puts "Recd002: $method split recovery tests"
+
+ env_cleanup $testdir
+ set testfile recd002.db
+ set testfile2 recd002-2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -home $testdir"
+
+ puts "\tRecd002.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that splits
+ # happen fairly quickly.
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create $args $omethod -mode 0644 -env $dbenv\
+ -pagesize $pagesize $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {populate DB $omethod TXNID $n 0 0} "Recd002.b: splits"}
+ { {unpopulate DB TXNID $r} "Recd002.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ set r [expr $n / 2 ]
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd002.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/recd003.tcl b/storage/bdb/test/recd003.tcl
new file mode 100644
index 00000000000..0fd054832ce
--- /dev/null
+++ b/storage/bdb/test/recd003.tcl
@@ -0,0 +1,119 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd003.tcl,v 11.30 2002/02/25 16:44:24 sandstro Exp $
+#
+# TEST recd003
+# TEST Duplicate recovery tests. For every known duplicate log message,
+# TEST makes sure that we exercise redo, undo, and do-nothing condition.
+# TEST
+# TEST Test all the duplicate log messages and recovery operations. We make
+# TEST sure that we exercise all possible recovery actions: redo, undo, undo
+# TEST but no fix necessary and redo but no fix necessary.
+proc recd003 { method {select 0} args } {
+ source ./include.tcl
+ global rand_init
+
+ set largs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd003 skipping for method $method"
+ return
+ }
+ puts "Recd003: $method duplicate recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ # See comment in recd001.tcl for why there are two database files...
+ set testfile recd003.db
+ set testfile2 recd003-2.db
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd003.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases.
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags \
+ "-create $largs -mode 0644 $omethod -dup -env $dbenv $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+ set n 10
+ set dupn 2000
+ set bign 500
+
+ # List of recovery tests: {CMD MSG} pairs
+ set dlist {
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.b: add dups"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.c: remove dups all at once"}
+ { {populate DB $omethod TXNID $n 1 0}
+ "Recd003.d: add dups (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.e: remove dups 1 at a time"}
+ { {populate DB $omethod TXNID $dupn 1 0}
+ "Recd003.f: dup split"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.g: remove dups (change state)"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.h: add big dup"}
+ { {DB del -txn TXNID duplicate_key}
+ "Recd003.i: remove big dup all at once"}
+ { {populate DB $omethod TXNID $n 1 1}
+ "Recd003.j: add big dup (change state)"}
+ { {unpopulate DB TXNID 0}
+ "Recd003.k: remove big dup 1 at a time"}
+ { {populate DB $omethod TXNID $bign 1 1}
+ "Recd003.l: split big dup"}
+ }
+
+ foreach pair $dlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd003.m: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/recd004.tcl b/storage/bdb/test/recd004.tcl
new file mode 100644
index 00000000000..74504ac3cd7
--- /dev/null
+++ b/storage/bdb/test/recd004.tcl
@@ -0,0 +1,95 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd004.tcl,v 11.29 2002/02/25 16:44:25 sandstro Exp $
+#
+# TEST recd004
+# TEST Big key test where big key gets elevated to internal page.
+proc recd004 { method {select 0} args} {
+ source ./include.tcl
+ global rand_init
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd004: skipping for specific pagesizes"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ puts "Recd004 skipping for method $method"
+ return
+ }
+ puts "Recd004: $method big-key on internal page recovery tests"
+
+ berkdb srand $rand_init
+
+ env_cleanup $testdir
+ set testfile recd004.db
+ set testfile2 recd004-2.db
+ set eflags "-create -txn -home $testdir"
+ puts "\tRecd004.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the databases. We will use a small page size so that we
+ # elevate quickly
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ set oflags "-create -mode 0644 \
+ $omethod -env $dbenv $opts -pagesize 512 $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_bad db_open $db NULL
+ error_check_good db_open [is_substr $db db] 1
+ error_check_good db_close [$db close] 0
+ reset_env $dbenv
+
+ # List of recovery tests: {CMD MSG} pairs
+ set slist {
+ { {big_populate DB TXNID $n} "Recd004.b: big key elevation"}
+ { {unpopulate DB TXNID 0} "Recd004.c: Remove keys"}
+ }
+
+ # If pages are 512 bytes, then adding 512 key/data pairs
+ # should be more than sufficient.
+ set n 512
+ foreach pair $slist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+
+ puts "\tRecd004.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/recd005.tcl b/storage/bdb/test/recd005.tcl
new file mode 100644
index 00000000000..7668c9e3be3
--- /dev/null
+++ b/storage/bdb/test/recd005.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd005.tcl,v 11.34 2002/05/22 15:42:39 sue Exp $
+#
+# TEST recd005
+# TEST Verify reuse of file ids works on catastrophic recovery.
+# TEST
+# TEST Make sure that we can do catastrophic recovery even if we open
+# TEST files using the same log file id.
+proc recd005 { method args} {
+ source ./include.tcl
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd005: $method catastrophic recovery"
+
+ berkdb srand $rand_init
+
+ set testfile1 recd005.1.db
+ set testfile2 recd005.2.db
+ set eflags \
+ "-create -txn -lock_max 2000 -lock_max_objects 2000 -home $testdir"
+
+ set tnum 0
+ foreach sizes "{1000 10} {10 1000}" {
+ foreach ops "{abort abort} {abort commit} {commit abort} \
+ {commit commit}" {
+ env_cleanup $testdir
+ incr tnum
+
+ set s1 [lindex $sizes 0]
+ set s2 [lindex $sizes 1]
+ set op1 [lindex $ops 0]
+ set op2 [lindex $ops 1]
+ puts "\tRecd005.$tnum: $s1 $s2 $op1 $op2"
+
+ puts "\tRecd005.$tnum.a: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Create the two databases.
+ set oflags \
+ "-create -mode 0644 -env $dbenv $args $omethod"
+ set db1 [eval {berkdb_open} $oflags $testfile1]
+ error_check_bad db_open $db1 NULL
+ error_check_good db_open [is_substr $db1 db] 1
+ error_check_good db_close [$db1 close] 0
+
+ set db2 [eval {berkdb_open} $oflags $testfile2]
+ error_check_bad db_open $db2 NULL
+ error_check_good db_open [is_substr $db2 db] 1
+ error_check_good db_close [$db2 close] 0
+ $dbenv close
+
+ set dbenv [eval $env_cmd]
+ puts "\tRecd005.$tnum.b: Populating databases"
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile1 $s1 $op1
+ do_one_file \
+ $testdir $method $dbenv $env_cmd $testfile2 $s2 $op2
+
+ puts "\tRecd005.$tnum.c: Verifying initial population"
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+
+ # Now, close the environment (so that recovery will work
+ # on NT which won't allow delete of an open file).
+ reset_env $dbenv
+
+ berkdb debug_check
+ puts -nonewline \
+ "\tRecd005.$tnum.d: About to run recovery ... "
+ flush stdout
+
+ set stat [catch \
+ {exec $util_path/db_recover -h $testdir -c} \
+ result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ # Substitute a file that will need recovery and try
+ # running recovery again.
+ if { $op1 == "abort" } {
+ file copy -force $testdir/$testfile1.afterop \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile1.init \
+ $testdir/$testfile1
+ move_file_extent $testdir $testfile1 init copy
+ }
+ if { $op2 == "abort" } {
+ file copy -force $testdir/$testfile2.afterop \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 \
+ afterop copy
+ } else {
+ file copy -force $testdir/$testfile2.init \
+ $testdir/$testfile2
+ move_file_extent $testdir $testfile2 init copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\tRecd005.$tnum.e:\
+ About to run recovery on pre-op database ... "
+ flush stdout
+
+ set stat \
+ [catch {exec $util_path/db_recover \
+ -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "Recovery error: $result."
+ }
+ puts "complete"
+
+ set dbenv [eval $env_cmd]
+ check_file $testdir $env_cmd $testfile1 $op1
+ check_file $testdir $env_cmd $testfile2 $op2
+ reset_env $dbenv
+
+ puts "\tRecd005.$tnum.f:\
+ Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch \
+ {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+ }
+ }
+}
+
+proc do_one_file { dir method env env_cmd filename num op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ # Save the initial file and open the environment and the first file
+ file copy -force $dir/$filename $dir/$filename.init
+ copy_extent_file $dir $filename init
+ set oflags "-auto_commit -unknown -env $env"
+ set db [eval {berkdb_open} $oflags $filename]
+
+ # Dump out file contents for initial case
+ open_and_dump_file $filename $env $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set txn [$env txn]
+ error_check_bad txn_begin $txn NULL
+ error_check_good txn_begin [is_substr $txn $env] 1
+
+ # Now fill in the db and the txnid in the command
+ populate $db $method $txn $num 0 0
+
+ # Sync the file so that we can capture a snapshot to test
+ # recovery.
+ error_check_good sync:$db [$db sync] 0
+ file copy -force $dir/$filename $dir/$filename.afterop
+ copy_extent_file $dir $filename afterop
+ open_and_dump_file $testdir/$filename.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
+ error_check_good txn_$op:$txn [$txn $op] 0
+
+ if { $op == "commit" } {
+ puts "\t\tFile $filename executed and committed."
+ } else {
+ puts "\t\tFile $filename executed and aborted."
+ }
+
+ # Dump out file and save a copy.
+ error_check_good sync:$db [$db sync] 0
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ file copy -force $dir/$filename $dir/$filename.final
+ copy_extent_file $dir $filename final
+
+ # If this is an abort, it should match the original file.
+ # If this was a commit, then this file should match the
+ # afterop file.
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+
+ error_check_good close:$db [$db close] 0
+}
+
+proc check_file { dir env_cmd filename op } {
+ source ./include.tcl
+
+ set init_file $dir/$filename.t1
+ set afterop_file $dir/$filename.t2
+ set final_file $dir/$filename.t3
+
+ open_and_dump_file $testdir/$filename NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(pre-commit,post-$op):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ }
+}
diff --git a/storage/bdb/test/recd006.tcl b/storage/bdb/test/recd006.tcl
new file mode 100644
index 00000000000..fc35e755b08
--- /dev/null
+++ b/storage/bdb/test/recd006.tcl
@@ -0,0 +1,262 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd006.tcl,v 11.26 2002/03/15 16:30:53 sue Exp $
+#
+# TEST recd006
+# TEST Nested transactions.
+proc recd006 { method {select 0} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Recd006 skipping for method $method"
+ return
+ }
+ puts "Recd006: $method nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd006.db
+ set testfile $testdir/$dbfile
+
+ puts "\tRecd006.a: create database"
+ set oflags "-create $args $omethod $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set ret [$db put -nooverwrite $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+
+ # Variables used below:
+ # p1: a pair of keys that are likely to be on the same page.
+ # p2: a pair of keys that are likely to be on the same page,
+ # but on a page different than those in p1.
+ set dbc [$db cursor]
+ error_check_good dbc [is_substr $dbc $db] 1
+
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:DB_FIRST [llength $ret] 0
+ set p1 [lindex [lindex $ret 0] 0]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:DB_NEXT [llength $ret] 0
+ lappend p1 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -last]
+ error_check_bad dbc_get:DB_LAST [llength $ret] 0
+ set p2 [lindex [lindex $ret 0] 0]
+ set kvals($p2) [lindex [lindex $ret 0] 1]
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:DB_PREV [llength $ret] 0
+ lappend p2 [lindex [lindex $ret 0] 0]
+ set kvals([lindex [lindex $ret 0] 0]) [lindex [lindex $ret 0] 1]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ # Now create the full transaction environment.
+ set eflags "-create -txn -home $testdir"
+
+ puts "\tRecd006.b: creating environment"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_bad dbenv $dbenv NULL
+
+ # Reset the environment.
+ reset_env $dbenv
+
+ set p1 [list $p1]
+ set p2 [list $p2]
+
+ # List of recovery tests: {CMD MSG} pairs
+ set rlist {
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit commit}
+ "Recd006.c: children (commit commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit commit}
+ "Recd006.d: children (commit commit)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 commit abort}
+ "Recd006.e: children (commit abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 commit abort}
+ "Recd006.f: children (commit abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort abort}
+ "Recd006.g: children (abort abort)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort abort}
+ "Recd006.h: children (abort abort)"}
+ { {nesttest DB TXNID ENV 1 $p1 $p2 abort commit}
+ "Recd006.i: children (abort commit)"}
+ { {nesttest DB TXNID ENV 0 $p1 $p2 abort commit}
+ "Recd006.j: children (abort commit)"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ }
+
+ puts "\tRecd006.k: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+# Do the nested transaction test.
+# We want to make sure that children inherit properly from their
+# parents and that locks are properly handed back to parents
+# and that the right thing happens on commit/abort.
+# In particular:
+# Write lock on parent, properly acquired by child.
+# Committed operation on child gives lock to parent so that
+# other child can also get the lock.
+# Aborted op by child releases lock so other child can get it.
+# Correct database state if child commits
+# Correct database state if child aborts
+proc nesttest { db parent env do p1 p2 child1 child2} {
+ global kvals
+ source ./include.tcl
+
+ if { $do == 1 } {
+ set func toupper
+ } else {
+ set func tolower
+ }
+
+ # Do an RMW on the parent to get a write lock.
+ set p10 [lindex $p1 0]
+ set p11 [lindex $p1 1]
+ set p20 [lindex $p2 0]
+ set p21 [lindex $p2 1]
+
+ set ret [$db get -rmw -txn $parent $p10]
+ set res $ret
+ set Dret [lindex [lindex $ret 0] 1]
+ if { [string compare $Dret $kvals($p10)] == 0 ||
+ [string compare $Dret [string toupper $kvals($p10)]] == 0 } {
+ set val 0
+ } else {
+ set val $Dret
+ }
+ error_check_good get_parent_RMW $val 0
+
+ # OK, do child 1
+ set kid1 [$env txn -parent $parent]
+ error_check_good kid1 [is_valid_txn $kid1 $env] TRUE
+
+ # Reading write-locked parent object should be OK
+ #puts "\tRead write-locked parent object for kid1."
+ set ret [$db get -txn $kid1 $p10]
+ error_check_good kid1_get10 $ret $res
+
+ # Now update this child
+ set data [lindex [lindex [string $func $ret] 0] 1]
+ set ret [$db put -txn $kid1 $p10 $data]
+ error_check_good kid1_put10 $ret 0
+
+ #puts "\tKid1 successful put."
+
+ # Now start child2
+ #puts "\tBegin txn for kid2."
+ set kid2 [$env txn -parent $parent]
+ error_check_good kid2 [is_valid_txn $kid2 $env] TRUE
+
+ # Getting anything in the p1 set should deadlock, so let's
+ # work on the p2 set.
+ set data [string $func $kvals($p20)]
+ #puts "\tPut data for kid2."
+ set ret [$db put -txn $kid2 $p20 $data]
+ error_check_good kid2_put20 $ret 0
+
+ #puts "\tKid2 data put successful."
+
+ # Now let's do the right thing to kid1
+ puts -nonewline "\tKid1 $child1..."
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good kid1_commit [$kid1 commit] 0
+ } else {
+ error_check_good kid1_abort [$kid1 abort] 0
+ }
+ puts "complete"
+
+ # In either case, child2 should now be able to get the
+ # lock, either because it is inherited by the parent
+ # (commit) or because it was released (abort).
+ set data [string $func $kvals($p11)]
+ set ret [$db put -txn $kid2 $p11 $data]
+ error_check_good kid2_put11 $ret 0
+
+ # Now let's do the right thing to kid2
+ puts -nonewline "\tKid2 $child2..."
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good kid2_commit [$kid2 commit] 0
+ } else {
+ error_check_good kid2_abort [$kid2 abort] 0
+ }
+ puts "complete"
+
+ # Now, let parent check that the right things happened.
+ # First get all four values
+ set p10_check [lindex [lindex [$db get -txn $parent $p10] 0] 0]
+ set p11_check [lindex [lindex [$db get -txn $parent $p11] 0] 0]
+ set p20_check [lindex [lindex [$db get -txn $parent $p20] 0] 0]
+ set p21_check [lindex [lindex [$db get -txn $parent $p21] 0] 0]
+
+ if { [string compare $child1 "commit"] == 0 } {
+ error_check_good parent_kid1 $p10_check \
+ [string tolower [string $func $kvals($p10)]]
+ } else {
+ error_check_good \
+ parent_kid1 $p10_check [string tolower $kvals($p10)]
+ }
+ if { [string compare $child2 "commit"] == 0 } {
+ error_check_good parent_kid2 $p11_check \
+ [string tolower [string $func $kvals($p11)]]
+ error_check_good parent_kid2 $p20_check \
+ [string tolower [string $func $kvals($p20)]]
+ } else {
+ error_check_good parent_kid2 $p11_check $kvals($p11)
+ error_check_good parent_kid2 $p20_check $kvals($p20)
+ }
+
+ # Now do a write on the parent for 21 whose lock it should
+ # either have or should be available.
+ set ret [$db put -txn $parent $p21 [string $func $kvals($p21)]]
+ error_check_good parent_put21 $ret 0
+
+ return 0
+}
diff --git a/storage/bdb/test/recd007.tcl b/storage/bdb/test/recd007.tcl
new file mode 100644
index 00000000000..aeac3bea2c1
--- /dev/null
+++ b/storage/bdb/test/recd007.tcl
@@ -0,0 +1,886 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd007.tcl,v 11.60 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST recd007
+# TEST File create/delete tests.
+# TEST
+# TEST This is a recovery test for create/delete of databases. We have
+# TEST hooks in the database so that we can abort the process at various
+# TEST points and make sure that the transaction doesn't commit. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd007 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd007: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd007.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd007.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+
+ set env [eval $env_cmd]
+
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 -env $env $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $env $testfile] 0
+ error_check_good envclose [$env close] 0
+
+ # Convert the args again because fixed_len is now real.
+ set opts [convert_args $method ""]
+
+ # List of recovery tests: {HOOKS MSG} pairs
+ # Where each HOOK is a list of {COPY ABORT}
+ #
+ set rlist {
+ { {"none" "preopen"} "Recd007.b0: none/preopen"}
+ { {"none" "postopen"} "Recd007.b1: none/postopen"}
+ { {"none" "postlogmeta"} "Recd007.b2: none/postlogmeta"}
+ { {"none" "postlog"} "Recd007.b3: none/postlog"}
+ { {"none" "postsync"} "Recd007.b4: none/postsync"}
+ { {"postopen" "none"} "Recd007.c0: postopen/none"}
+ { {"postlogmeta" "none"} "Recd007.c1: postlogmeta/none"}
+ { {"postlog" "none"} "Recd007.c2: postlog/none"}
+ { {"postsync" "none"} "Recd007.c3: postsync/none"}
+ { {"postopen" "postopen"} "Recd007.d: postopen/postopen"}
+ { {"postopen" "postlogmeta"} "Recd007.e: postopen/postlogmeta"}
+ { {"postopen" "postlog"} "Recd007.f: postopen/postlog"}
+ { {"postlog" "postlog"} "Recd007.g: postlog/postlog"}
+ { {"postlogmeta" "postlogmeta"} "Recd007.h: postlogmeta/postlogmeta"}
+ { {"postlogmeta" "postlog"} "Recd007.i: postlogmeta/postlog"}
+ { {"postlog" "postsync"} "Recd007.j: postlog/postsync"}
+ { {"postsync" "postsync"} "Recd007.k: postsync/postsync"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg
+ }
+
+ set rlist {
+ { {"none" "predestroy"} "Recd007.l0: none/predestroy"}
+ { {"none" "postdestroy"} "Recd007.l1: none/postdestroy"}
+ { {"predestroy" "none"} "Recd007.m0: predestroy/none"}
+ { {"postdestroy" "none"} "Recd007.m1: postdestroy/none"}
+ { {"predestroy" "predestroy"} "Recd007.n: predestroy/predestroy"}
+ { {"predestroy" "postdestroy"} "Recd007.o: predestroy/postdestroy"}
+ { {"postdestroy" "postdestroy"} "Recd007.p: postdestroy/postdestroy"}
+ }
+ foreach op { dbremove dbrename dbtruncate } {
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ file_recover_delete $testdir $env_cmd $omethod \
+ $opts $testfile $cmd $msg $op
+ }
+ }
+
+ if { $is_windows_test != 1 } {
+ set env_cmd "berkdb_env_noerr $flags"
+ do_file_recover_delmk $testdir $env_cmd $method $opts $testfile
+ }
+
+ puts "\tRecd007.r: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc file_recover_create { dir env_cmd method opts dbfile cmd msg } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Creating just a database
+ # 2. Creating a database with a subdb
+ # 3. Creating a 2nd subdb in a database
+ puts "\t$msg create with a database"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg create with a database and subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg
+ puts "\t$msg create with a database and 2nd subdb"
+ do_file_recover_create $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg
+
+}
+
+proc do_file_recover_create { dir env_cmd method opts dbfile sub cmd msg } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ set dflags "-dar"
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_create_loc $copy] 1
+ error_check_good abort_location [is_valid_create_loc $abort] 1
+
+ if {([string first "logmeta" $copy] != -1 || \
+ [string first "logmeta" $abort] != -1) && \
+ [is_btree $method] == 0 } {
+ puts "\tSkipping for method $method"
+ $env test copy none
+ $env test abort none
+ error_check_good env_close [$env close] 0
+ return
+ }
+
+ # Basically non-existence is our initial state. When we
+ # abort, it is also our final state.
+ #
+ switch $sub {
+ 0 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile"
+ }
+ 1 {
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env $env $opts $dbfile sub1"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ puts "\t\tExecuting command"
+ set ret [catch {eval {berkdb_open} $oflags} db]
+
+ # Sync the mpool so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ $env mpool_sync
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ if {[string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_bad db_open ret 0
+
+ #
+ # Check that the file does not exist. Final state.
+ #
+ if { $sub != 2 } {
+ error_check_good db_open:exists \
+ [file exists $dir/$dbfile] 0
+ } else {
+ error_check_good \
+ diff(init,postcreate):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ #
+ # Check that the file exists.
+ #
+ error_check_good db_open [file exists $dir/$dbfile] 1
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on sub)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover1 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1 } {
+ copy_extent_file $dir $dbfile init
+ }
+ }
+
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $sub != 2 && [string first "none" $abort] == -1} {
+ #
+ # Operation was aborted, verify it still does
+ # not exist. Only done with file creations.
+ #
+ error_check_good after_recover2 [file exists $dir/$dbfile] 0
+ } else {
+ #
+ # Operation was committed or just a subdb was aborted.
+ # Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+}
+
+proc file_recover_delete { dir env_cmd method opts dbfile cmd msg op } {
+ #
+ # We run this test on each of these scenarios:
+ # 1. Deleting/Renaming just a database
+ # 2. Deleting/Renaming a database with a subdb
+ # 3. Deleting/Renaming a 2nd subdb in a database
+ puts "\t$msg $op with a database"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 0 $cmd $msg $op
+ if { [is_queue $method] == 1 } {
+ puts "\tSkipping subdatabase tests for method $method"
+ return
+ }
+ puts "\t$msg $op with a database and subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 1 $cmd $msg $op
+ puts "\t$msg $op with a database and 2nd subdb"
+ do_file_recover_delete $dir $env_cmd $method $opts $dbfile \
+ 2 $cmd $msg $op
+
+}
+
+proc do_file_recover_delete { dir env_cmd method opts dbfile sub cmd msg op } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ set copy [lindex $cmd 0]
+ set abort [lindex $cmd 1]
+ error_check_good copy_location [is_valid_delete_loc $copy] 1
+ error_check_good abort_location [is_valid_delete_loc $abort] 1
+
+ if { [is_record_based $method] == 1 } {
+ set key1 1
+ set key2 2
+ } else {
+ set key1 recd007_key1
+ set key2 recd007_key2
+ }
+ set data1 recd007_data0
+ set data2 recd007_data1
+ set data3 NEWrecd007_data2
+
+ #
+ # Depending on what sort of subdb we want, if any, our
+ # args to the open call will be different (and if we
+ # want a 2nd subdb, we create the first here.
+ #
+ # XXX
+ # For dbtruncate, we want oflags to have "$env" in it,
+ # not have the value currently in 'env'. That is why
+ # the '$' is protected below. Later on we use oflags
+ # but with a new $env we just opened.
+ #
+ switch $sub {
+ 0 {
+ set subdb ""
+ set new $dbfile.new
+ set dflags "-dar"
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile"
+ }
+ 1 {
+ set subdb sub0
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ 2 {
+ #
+ # If we are aborting here, then we need to
+ # create a first subdb, then create a second
+ #
+ set subdb sub1
+ set new $subdb.new
+ set dflags ""
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile sub0"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ set oflags "-create $method -auto_commit -mode 0644 \
+ -env \$env $opts $dbfile $subdb"
+ }
+ default {
+ puts "\tBad value $sub for sub"
+ return
+ }
+ }
+
+ #
+ # Set our locations to copy and abort
+ #
+ set ret [eval $env test copy $copy]
+ error_check_good test_copy $ret 0
+ set ret [eval $env test abort $abort]
+ error_check_good test_abort $ret 0
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key1 $data1]
+ error_check_good db_put $ret 0
+ set ret [$db put -txn $txn $key2 $data2]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ $env mpool_sync
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed.
+ #
+ switch $op {
+ "dbrename" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb $new } remret]
+ }
+ "dbremove" {
+ set ret [catch { eval {berkdb} $op -env $env -auto_commit \
+ $dbfile $subdb } remret]
+ }
+ "dbtruncate" {
+ set txn [$env txn]
+ set db [eval {berkdb_open_noerr -env} \
+ $env -auto_commit $dbfile $subdb]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+ set ret [catch {$db truncate -txn $txn} remret]
+ }
+ }
+ $env mpool_sync
+ if { $abort == "none" } {
+ if { $op == "dbtruncate" } {
+ error_check_good txncommit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ }
+ #
+ # Operation was committed, verify it.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good $op $ret 0
+ #
+ # If a dbtruncate, check that truncate returned the number
+ # of items previously in the database.
+ #
+ if { [string compare $op "dbtruncate"] == 0 } {
+ error_check_good remret $remret 2
+ }
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ if { $op == "dbtruncate" } {
+ error_check_good txnabort [$txn abort] 0
+ error_check_good dbclose [$db close] 0
+ }
+ puts "\t\tCommand executed and aborted."
+ error_check_good $op $ret 1
+
+ #
+ # Check that the file exists. Final state.
+ # Compare against initial file.
+ #
+ error_check_good post$op.1 [file exists $dir/$dbfile] 1
+ error_check_good \
+ diff(init,post$op.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+ $env mpool_sync
+ error_check_good env_close [$env close] 0
+ catch { file copy -force $dir/$dbfile $init_file } res
+ if { [is_queue $method] == 1} {
+ copy_extent_file $dir $dbfile init
+ }
+
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still doesn't exist or change (depending on abort)
+ # when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+
+ puts "complete"
+
+ if { $abort == "none" } {
+ #
+ # Operate was committed.
+ #
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ berkdb debug_check
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+ #
+ # If we didn't make a copy, then we are done.
+ #
+ if {[string first "none" $copy] != -1} {
+ return
+ }
+
+ #
+ # Now restore the .afterop file(s) to their original name.
+ # Run recovery again.
+ #
+ copy_afterop $dir
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { [string first "none" $abort] != -1} {
+ set env [eval $env_cmd]
+ recd007_check $op $sub $dir $dbfile $subdb $new $env $oflags
+ error_check_good env_close [$env close] 0
+ } else {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff $dflags $init_file $dir $dbfile] 0
+ }
+
+}
+
+#
+# This function tests a specific case of recovering after a db removal.
+# This is for SR #2538. Basically we want to test that:
+# - Make an env.
+# - Make/close a db.
+# - Remove the db.
+# - Create another db of same name.
+# - Sync db but leave open.
+# - Run recovery.
+# - Verify no recovery errors and that new db is there.
+proc do_file_recover_delmk { dir env_cmd method opts dbfile } {
+ global log_log_record_types
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+ set omethod [convert_method $method]
+
+ puts "\tRecd007.q: Delete and recreate a database"
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd007_key
+ }
+ set data1 recd007_data
+ set data2 NEWrecd007_data2
+
+ set oflags \
+ "-create $omethod -auto_commit -mode 0644 $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key $data1]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ set ret \
+ [catch { berkdb dbremove -env $env -auto_commit $dbfile } remret]
+
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good dbremove $ret 0
+ error_check_good dbremove.1 [file exists $dir/$dbfile] 0
+
+ #
+ # Now create a new db with the same name.
+ #
+ set db [eval {berkdb_open_noerr} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn [$env txn]
+ set ret [$db put -txn $txn $key [chop_data $method $data2]]
+ error_check_good db_put $ret 0
+ error_check_good commit [$txn commit] 0
+ error_check_good db_sync [$db sync] 0
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ error_check_good db_recover $stat 0
+ error_check_good db_recover.1 [file exists $dir/$dbfile] 1
+ #
+ # Since we ran recovery on the open db/env, we need to
+ # catch these calls. Basically they are there to clean
+ # up the Tcl widgets.
+ #
+ set stat [catch {$db close} ret]
+ error_check_bad dbclose_after_remove $stat 0
+ error_check_good dbclose_after_remove [is_substr $ret recovery] 1
+ set stat [catch {$env close} ret]
+ error_check_bad envclose_after_remove $stat 0
+ error_check_good envclose_after_remove [is_substr $ret recovery] 1
+
+ #
+ # Reopen env and db and verify 2nd database is there.
+ #
+ set env [eval $env_cmd]
+ error_check_good env_open [is_valid_env $env] TRUE
+ set db [eval {berkdb_open} -env $env $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set ret [$db get $key]
+ error_check_good dbget [llength $ret] 1
+ set kd [lindex $ret 0]
+ error_check_good key [lindex $kd 0] $key
+ error_check_good data2 [lindex $kd 1] [pad_data $method $data2]
+
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+}
+
+proc is_valid_create_loc { loc } {
+ switch $loc {
+ none -
+ preopen -
+ postopen -
+ postlogmeta -
+ postlog -
+ postsync
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+proc is_valid_delete_loc { loc } {
+ switch $loc {
+ none -
+ predestroy -
+ postdestroy -
+ postremcall
+ { return 1 }
+ default
+ { return 0 }
+ }
+}
+
+# Do a logical diff on the db dump files. We expect that either
+# the files are identical, or if they differ, that it is exactly
+# just a free/invalid page.
+# Return 1 if they are different, 0 if logically the same (or identical).
+#
+proc dbdump_diff { flags initfile dir dbfile } {
+ source ./include.tcl
+
+ set initdump $initfile.dump
+ set dbdump $dbfile.dump
+
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $initdump \
+ $initfile} ret]
+ error_check_good dbdump.init $stat 0
+
+ # Do a dump without the freelist which should eliminate any
+ # recovery differences.
+ set stat [catch {eval {exec $util_path/db_dump} $flags -f $dir/$dbdump \
+ $dir/$dbfile} ret]
+ error_check_good dbdump.db $stat 0
+
+ set stat [filecmp $dir/$dbdump $initdump]
+
+ if {$stat == 0} {
+ return 0
+ }
+ puts "diff: $dbdump $initdump gives:\n$ret"
+ return 1
+}
+
+proc recd007_check { op sub dir dbfile subdb new env oflags } {
+ #
+ # No matter how many subdbs we have, dbtruncate will always
+ # have a file, and if we open our particular db, it should
+ # have no entries.
+ #
+ if { $sub == 0 } {
+ if { $op == "dbremove" } {
+ error_check_good $op:not-exist \
+ [file exists $dir/$dbfile] 0
+ } elseif { $op == "dbrename"} {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 0
+ error_check_good $op:exist2 \
+ [file exists $dir/$dbfile.new] 1
+ } else {
+ error_check_good $op:exist \
+ [file exists $dir/$dbfile] 1
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget1 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ return
+ } else {
+ set t1 $dir/t1
+ #
+ # If we have subdbs, check that all but the last one
+ # are there, and the last one is correctly operated on.
+ #
+ set db [berkdb_open -rdonly -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set c [eval {$db cursor}]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ set d [$c get -last]
+ if { $op == "dbremove" } {
+ if { $sub == 1 } {
+ error_check_good subdb:rem [llength $d] 0
+ } else {
+ error_check_bad subdb:rem [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_bad subdb:rem1 $sdb $subdb
+ }
+ } elseif { $op == "dbrename"} {
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren $sdb $new
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ error_check_good subdb:ren1 \
+ [is_substr "new" $sdb] 0
+ }
+ } else {
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$dbt cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $dbt] TRUE
+ set ret [$dbc get -first]
+ error_check_good dbget2 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ if { $sub != 1 } {
+ set d [$c get -prev]
+ error_check_bad subdb:ren [llength $d] 0
+ set sdb [lindex [lindex $d 0] 0]
+ set dbt [berkdb_open -rdonly -env $env \
+ $dbfile $sdb]
+ error_check_good db_open [is_valid_db $dbt] TRUE
+ set dbc [$db cursor]
+ error_check_good dbc_open \
+ [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -first]
+ error_check_bad dbget3 [llength $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$dbt close] 0
+ }
+ }
+ error_check_good dbcclose [$c close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc copy_afterop { dir } {
+ set r [catch { set filecopy [glob $dir/*.afterop] } res]
+ if { $r == 1 } {
+ return
+ }
+ foreach f $filecopy {
+ set orig [string range $f 0 \
+ [expr [string last "." $f] - 1]]
+ catch { file rename -force $f $orig} res
+ }
+}
diff --git a/storage/bdb/test/recd008.tcl b/storage/bdb/test/recd008.tcl
new file mode 100644
index 00000000000..548813a403b
--- /dev/null
+++ b/storage/bdb/test/recd008.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd008.tcl,v 1.26 2002/02/25 16:44:26 sandstro Exp $
+#
+# TEST recd008
+# TEST Test deeply nested transactions and many-child transactions.
+proc recd008 { method {breadth 4} {depth 4} args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Recd008 skipping for method $method"
+ return
+ }
+ puts "Recd008: $method $breadth X $depth deeply nested transactions"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set dbfile recd008.db
+
+ puts "\tRecd008.a: create database"
+ set db [eval {berkdb_open -create} $args $omethod $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Make sure that we have enough entries to span a couple of
+ # different pages.
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < 1000 } {
+ if { [string compare $omethod "-recno"] == 0 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $count == 500} {
+ set p1 $key
+ set kvals($p1) $str
+ }
+ set ret [$db put $key $str]
+ error_check_good put $ret 0
+
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+
+ set txn_max [expr int([expr pow($breadth,$depth)])]
+ if { $txn_max < 20 } {
+ set txn_max 20
+ }
+ puts "\tRecd008.b: create environment for $txn_max transactions"
+
+ set eflags "-mode 0644 -create -txn_max $txn_max \
+ -txn -home $testdir"
+ set env_cmd "berkdb_env $eflags"
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ reset_env $dbenv
+
+ set rlist {
+ { {recd008_parent abort ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.c: child abort parent" }
+ { {recd008_parent commit ENV DB $p1 TXNID 1 1 $breadth $depth}
+ "Recd008.d: child commit parent" }
+ }
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ op_recover abort $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ op_recover commit $testdir $env_cmd $dbfile $cmd $msg
+ recd008_setkval $dbfile $p1
+ }
+
+ puts "\tRecd008.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc recd008_setkval { dbfile p1 } {
+ global kvals
+ source ./include.tcl
+
+ set db [berkdb_open $testdir/$dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get $p1]
+ set kvals($p1) [lindex [lindex $ret 0] 1]
+}
+
+# This is a lot like the op_recover procedure. We cannot use that
+# because it was not meant to be called recursively. This proc
+# knows about depth/breadth and file naming so that recursive calls
+# don't overwrite various initial and afterop files, etc.
+#
+# The basic flow of this is:
+# (Initial file)
+# Parent begin transaction (in op_recover)
+# Parent starts children
+# Recursively call recd008_recover
+# (children modify p1)
+# Parent modifies p1
+# (Afterop file)
+# Parent commit/abort (in op_recover)
+# (Final file)
+# Recovery test (in op_recover)
+proc recd008_parent { op env db p1key parent b0 d0 breadth depth } {
+ global kvals
+ source ./include.tcl
+
+ #
+ # Save copy of original data
+ # Acquire lock on data
+ #
+ set olddata $kvals($p1key)
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Parent spawns off children
+ #
+ set ret [recd008_txn $op $env $db $p1key $parent \
+ $b0 $d0 $breadth $depth]
+
+ puts "Child runs complete. Parent modifies data."
+
+ #
+ # Parent modifies p1
+ #
+ set newdata $olddata.parent
+ set ret [$db put -txn $parent $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Save value in kvals for later comparison
+ #
+ switch $op {
+ "commit" {
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ set kvals($p1key) $olddata
+ }
+ }
+ return 0
+}
+
+proc recd008_txn { op env db p1key parent b0 d0 breadth depth } {
+ global log_log_record_types
+ global kvals
+ source ./include.tcl
+
+ for {set d 1} {$d < $d0} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Recd008_txn: $op parent:$parent $breadth $depth ($b0 $d0)"
+
+ # Save the initial file and open the environment and the file
+ for {set b $b0} {$b <= $breadth} {incr b} {
+ #
+ # Begin child transaction
+ #
+ set t [$env txn -parent $parent]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ set startd [expr $d0 + 1]
+ set child $b:$startd:$t
+ set olddata $kvals($p1key)
+ set newdata $olddata.$child
+ set ret [$db get -rmw -txn $t $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ error_check_good get_parent_RMW $Dret $olddata
+
+ #
+ # Recursively call to set up nested transactions/children
+ #
+ for {set d $startd} {$d <= $depth} {incr d} {
+ set ret [recd008_txn commit $env $db $p1key $t \
+ $b $d $breadth $depth]
+ set ret [recd008_txn abort $env $db $p1key $t \
+ $b $d $breadth $depth]
+ }
+ #
+ # Modifies p1.
+ #
+ set ret [$db put -txn $t $p1key $newdata]
+ error_check_good db_put $ret 0
+
+ #
+ # Commit or abort
+ #
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ puts "Executing txn_$op:$t"
+ error_check_good txn_$op:$t [$t $op] 0
+ for {set d 1} {$d < $startd} {incr d} {
+ puts -nonewline "\t"
+ }
+ set ret [$db get -rmw -txn $parent $p1key]
+ set Dret [lindex [lindex $ret 0] 1]
+ switch $op {
+ "commit" {
+ puts "Command executed and committed."
+ error_check_good get_parent_RMW $Dret $newdata
+ set kvals($p1key) $newdata
+ }
+ "abort" {
+ puts "Command executed and aborted."
+ error_check_good get_parent_RMW $Dret $olddata
+ set kvals($p1key) $olddata
+ }
+ }
+ }
+ return 0
+}
diff --git a/storage/bdb/test/recd009.tcl b/storage/bdb/test/recd009.tcl
new file mode 100644
index 00000000000..5538d2d7652
--- /dev/null
+++ b/storage/bdb/test/recd009.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd009.tcl,v 1.18 2002/04/01 20:11:44 krinsky Exp $
+#
+# TEST recd009
+# TEST Verify record numbering across split/reverse splits and recovery.
+proc recd009 { method {select 0} args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { [is_rbtree $method] != 1 && [is_rrecno $method] != 1} {
+ puts "Recd009 skipping for method $method."
+ return
+ }
+
+ set opts [convert_args $method $args]
+ set method [convert_method $method]
+
+ puts "\tRecd009: Test record numbers across splits and recovery"
+
+ set testfile recd009.db
+ env_cleanup $testdir
+ set mkeys 1000
+ set nkeys 5
+ set data "data"
+
+ puts "\tRecd009.a: Create $method environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -pagesize 8192 -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd009.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { [is_recno $method] == 1 } {
+ set key $i
+ } else {
+ set key key000$i
+ }
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ set newnkeys [expr $nkeys + 1]
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {recd009_split DB TXNID 1 $method $newnkeys $mkeys}
+ "Recd009.c: split"}
+ { {recd009_split DB TXNID 0 $method $newnkeys $mkeys}
+ "Recd009.d: reverse split"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ if { $reverse == -1 } {
+ set abortkeys $nkeys
+ set commitkeys $mkeys
+ set abortpg 0
+ set commitpg 1
+ } else {
+ set abortkeys $mkeys
+ set commitkeys $nkeys
+ set abortpg 1
+ set commitpg 0
+ }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts $abortkeys $abortpg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd009_recnocheck $testdir $testfile $opts \
+ $commitkeys $commitpg
+ }
+ puts "\tRecd009.e: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd009_recnocheck { tdir testfile opts numkeys numpg} {
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRecd009_recnocheck: Verify page count of $numpg on split."
+ set stat [$db stat]
+ error_check_bad stat:check-split [is_substr $stat \
+ "{{Internal pages} 0}"] $numpg
+
+ set type [$db get_type]
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ set i 1
+ puts "\tRecd009_recnocheck: Checking $numkeys record numbers."
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ if { [is_btree $type] } {
+ set thisi [$dbc get -get_recno]
+ } else {
+ set thisi [lindex [lindex $d 0] 0]
+ }
+ error_check_good recno_check $i $thisi
+ error_check_good record_count [expr $i <= $numkeys] 1
+ incr i
+ }
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd009_split { db txn split method nkeys mkeys } {
+ global errorCode
+ source ./include.tcl
+
+ set data "data"
+
+ set isrecno [is_recno $method]
+ # if mkeys is above 1000, need to adjust below for lexical order
+ if { $split == 1 } {
+ puts "\tRecd009_split: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i <= $mkeys } { incr i } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ set ret [$db put -txn $txn $key $data$i]
+ error_check_good dbput:more $ret 0
+ }
+ } else {
+ puts "\tRecd009_split: Delete added keys to force reverse split."
+ # Since rrecno renumbers, we delete downward.
+ for {set i $mkeys} { $i >= $nkeys } { set i [expr $i - 1] } {
+ if { $isrecno == 1 } {
+ set key $i
+ } else {
+ if { $i >= 100 } {
+ set key key0$i
+ } elseif { $i >= 10 } {
+ set key key00$i
+ } else {
+ set key key000$i
+ }
+ }
+ error_check_good db_del:$i [$db del -txn $txn $key] 0
+ }
+ }
+ return 0
+}
diff --git a/storage/bdb/test/recd010.tcl b/storage/bdb/test/recd010.tcl
new file mode 100644
index 00000000000..2549e03a2c0
--- /dev/null
+++ b/storage/bdb/test/recd010.tcl
@@ -0,0 +1,257 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd010.tcl,v 1.19 2002/03/15 19:05:07 sue Exp $
+#
+# TEST recd010
+# TEST Test stability of btree duplicates across btree off-page dup splits
+# TEST and reverse splits and across recovery.
+proc recd010 { method {select 0} args} {
+ if { [is_btree $method] != 1 } {
+ puts "Recd010 skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd010: skipping for specific pagesizes"
+ return
+ }
+ set largs $args
+ append largs " -dup "
+ recd010_main $method $select $largs
+ append largs " -dupsort "
+ recd010_main $method $select $largs
+}
+
+proc recd010_main { method select largs } {
+ global fixed_len
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+
+ set opts [convert_args $method $largs]
+ set method [convert_method $method]
+
+ puts "Recd010 ($opts): Test duplicates across splits and recovery"
+
+ set testfile recd010.db
+ env_cleanup $testdir
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set mkeys 1000
+ set firstkeys 5
+ set data "data"
+ set key "recd010_key"
+
+ puts "\tRecd010.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-env $dbenv -create -mode 0644 $opts $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Fill page with small key/data pairs. Keep at leaf.
+ puts "\tRecd010.b: Fill page with $firstkeys small dups."
+ for { set i 1 } { $i <= $firstkeys } { incr i } {
+ set ret [$db put $key $data$i]
+ error_check_good dbput $ret 0
+ }
+ set kvals 1
+ set kvals_dups $firstkeys
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # List of recovery tests: {CMD MSG} pairs.
+ if { $mkeys < 100 } {
+ puts "Recd010 mkeys of $mkeys too small"
+ return
+ }
+ set rlist {
+ { {recd010_split DB TXNID 1 2 $mkeys}
+ "Recd010.c: btree split 2 large dups"}
+ { {recd010_split DB TXNID 0 2 $mkeys}
+ "Recd010.d: btree reverse split 2 large dups"}
+ { {recd010_split DB TXNID 1 10 $mkeys}
+ "Recd010.e: btree split 10 dups"}
+ { {recd010_split DB TXNID 0 10 $mkeys}
+ "Recd010.f: btree reverse split 10 dups"}
+ { {recd010_split DB TXNID 1 100 $mkeys}
+ "Recd010.g: btree split 100 dups"}
+ { {recd010_split DB TXNID 0 100 $mkeys}
+ "Recd010.h: btree reverse split 100 dups"}
+ }
+
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+ set reverse [string first "reverse" $msg]
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts abort $reverse $firstkeys
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ recd010_check $testdir $testfile $opts commit $reverse $firstkeys
+ }
+ puts "\tRecd010.i: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+#
+# This procedure verifies that the database has only numkeys number
+# of keys and that they are in order.
+#
+proc recd010_check { tdir testfile opts op reverse origdups } {
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set db [eval {berkdb_open} $opts $tdir/$testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set data "data"
+
+ if { $reverse == -1 } {
+ puts "\tRecd010_check: Verify split after $op"
+ } else {
+ puts "\tRecd010_check: Verify reverse split after $op"
+ }
+
+ set stat [$db stat]
+ if { [expr ([string compare $op "abort"] == 0 && $reverse == -1) || \
+ ([string compare $op "commit"] == 0 && $reverse != -1)]} {
+ set numkeys 0
+ set allkeys [expr $numkeys + 1]
+ set numdups $origdups
+ #
+ # If we abort the adding of dups, or commit
+ # the removal of dups, either way check that
+ # we are back at the beginning. Check that:
+ # - We have 0 internal pages.
+ # - We have only 1 key (the original we primed the db
+ # with at the beginning of the test).
+ # - We have only the original number of dups we primed
+ # the db with at the beginning of the test.
+ #
+ error_check_good stat:orig0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:orig1 [is_substr $stat \
+ "{{Number of keys} 1}"] 1
+ error_check_good stat:orig2 [is_substr $stat \
+ "{{Number of records} $origdups}"] 1
+ } else {
+ set numkeys $kvals
+ set allkeys [expr $numkeys + 1]
+ set numdups $kvals_dups
+ #
+ # If we abort the removal of dups, or commit the
+ # addition of dups, check that:
+ # - We have > 0 internal pages.
+ # - We have the number of keys.
+ #
+ error_check_bad stat:new0 [is_substr $stat \
+ "{{Internal pages} 0}"] 1
+ error_check_good stat:new1 [is_substr $stat \
+ "{{Number of keys} $allkeys}"] 1
+ }
+
+ set dbc [$db cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+ puts "\tRecd010_check: Checking key and duplicate values"
+ set key "recd010_key"
+ #
+ # Check dups are there as they should be.
+ #
+ for {set ki 0} {$ki < $numkeys} {incr ki} {
+ set datacnt 0
+ for {set d [$dbc get -set $key$ki]} { [llength $d] != 0 } {
+ set d [$dbc get -nextdup]} {
+ set thisdata [lindex [lindex $d 0] 1]
+ if { $datacnt < 10 } {
+ set pdata $data.$ki.00$datacnt
+ } elseif { $datacnt < 100 } {
+ set pdata $data.$ki.0$datacnt
+ } else {
+ set pdata $data.$ki.$datacnt
+ }
+ error_check_good dup_check $thisdata $pdata
+ incr datacnt
+ }
+ error_check_good dup_count $datacnt $numdups
+ }
+ #
+ # Check that the number of expected keys (allkeys) are
+ # all of the ones that exist in the database.
+ #
+ set dupkeys 0
+ set lastkey ""
+ for {set d [$dbc get -first]} { [llength $d] != 0 } {
+ set d [$dbc get -next]} {
+ set thiskey [lindex [lindex $d 0] 0]
+ if { [string compare $lastkey $thiskey] != 0 } {
+ incr dupkeys
+ }
+ set lastkey $thiskey
+ }
+ error_check_good key_check $allkeys $dupkeys
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
+
+proc recd010_split { db txn split nkeys mkeys } {
+ global errorCode
+ global kvals
+ global kvals_dups
+ source ./include.tcl
+
+ set data "data"
+ set key "recd010_key"
+
+ set numdups [expr $mkeys / $nkeys]
+
+ set kvals $nkeys
+ set kvals_dups $numdups
+ if { $split == 1 } {
+ puts \
+"\tRecd010_split: Add $nkeys keys, with $numdups duplicates each to force split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ for {set i 0} { $i < $numdups } { incr i } {
+ if { $i < 10 } {
+ set pdata $data.$k.00$i
+ } elseif { $i < 100 } {
+ set pdata $data.$k.0$i
+ } else {
+ set pdata $data.$k.$i
+ }
+ set ret [$db put -txn $txn $key$k $pdata]
+ error_check_good dbput:more $ret 0
+ }
+ }
+ } else {
+ puts \
+"\tRecd010_split: Delete $nkeys keys to force reverse split."
+ for {set k 0} { $k < $nkeys } { incr k } {
+ error_check_good db_del:$k [$db del -txn $txn $key$k] 0
+ }
+ }
+ return 0
+}
diff --git a/storage/bdb/test/recd011.tcl b/storage/bdb/test/recd011.tcl
new file mode 100644
index 00000000000..74108a30650
--- /dev/null
+++ b/storage/bdb/test/recd011.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd011.tcl,v 11.19 2002/02/25 16:44:26 sandstro Exp $
+#
+# TEST recd011
+# TEST Verify that recovery to a specific timestamp works.
+proc recd011 { method {niter 200} {ckpt_freq 15} {sleep_time 1} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 11
+
+ puts "Recd0$tnum ($args): Test recovery to a specific timestamp."
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key KEY
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Main loop: every second or so, increment the db in a txn.
+ puts "\t\tInitial Checkpoint"
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b ($niter iterations):\
+ Transaction-protected increment loop."
+ for { set i 0 } { $i <= $niter } { incr i } {
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ set timeof($i) [timestamp -r]
+
+ # If an appropriate period has elapsed, checkpoint.
+ if { $i % $ckpt_freq == $ckpt_freq - 1 } {
+ puts "\t\tIteration $i: Checkpointing."
+ error_check_good ckpt($i) [$dbenv txn_checkpoint] 0
+ }
+
+ # sleep for N seconds.
+ tclsleep $sleep_time
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Recover to each timestamp and check."
+ for { set i $niter } { $i >= 0 } { incr i -1 } {
+
+ # Run db_recover.
+ set t [clock format $timeof($i) -format "%y%m%d%H%M.%S"]
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover($i,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum [lindex [lindex $dbt 0] 1]
+ error_check_good timestamp_recover $datum [pad_data $method $i]
+
+ error_check_good db_close [$db close] 0
+ }
+
+ # Finally, recover to a time well before the first timestamp
+ # and well after the last timestamp. The latter should
+ # be just like the timestamp of the last test performed;
+ # the former should fail.
+ puts "\tRecd0$tnum.d: Recover to before the first timestamp."
+ set t [clock format [expr $timeof(0) - 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_bad db_recover(before,$t) $ret 0
+
+ puts "\tRecd0$tnum.e: Recover to after the last timestamp."
+ set t [clock format \
+ [expr $timeof($niter) + 1000] -format "%y%m%d%H%M.%S"]
+ set ret [catch {exec $util_path/db_recover -h $testdir -t $t} r]
+ error_check_good db_recover(after,$t) $ret 0
+
+ # Now open the db and check the timestamp.
+ set db [eval {berkdb_open} $testdir/$testfile]
+ error_check_good db_open(after) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ set datum2 [lindex [lindex $dbt 0] 1]
+
+ error_check_good timestamp_recover $datum2 $datum
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/recd012.tcl b/storage/bdb/test/recd012.tcl
new file mode 100644
index 00000000000..8231e648588
--- /dev/null
+++ b/storage/bdb/test/recd012.tcl
@@ -0,0 +1,432 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd012.tcl,v 11.27 2002/05/10 00:48:07 margo Exp $
+#
+# TEST recd012
+# TEST Test of log file ID management. [#2288]
+# TEST Test recovery handling of file opens and closes.
+proc recd012 { method {start 0} \
+ {niter 49} {noutiter 25} {niniter 100} {ndbs 5} args } {
+ source ./include.tcl
+
+ set tnum 12
+ set pagesize 512
+
+ if { $is_qnx_test } {
+ set niter 40
+ }
+
+ puts "Recd0$tnum $method ($args): Test recovery file management."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd012: skipping for specific pagesizes"
+ return
+ }
+
+ for { set i $start } { $i <= $niter } { incr i } {
+ env_cleanup $testdir
+
+ # For repeatability, we pass in the iteration number
+ # as a parameter and use that in recd012_body to seed
+ # the random number generator to randomize our operations.
+ # This lets us re-run a potentially failing iteration
+ # without having to start from the beginning and work
+ # our way to it.
+ #
+ # The number of databases ranges from 4 to 8 and is
+ # a function of $niter
+ # set ndbs [expr ($i % 5) + 4]
+
+ recd012_body \
+ $method $ndbs $i $noutiter $niniter $pagesize $tnum $args
+ }
+}
+
+proc recd012_body { method {ndbs 5} iter noutiter niniter psz tnum {largs ""} } {
+ global alphabet rand_init fixed_len recd012_ofkey recd012_ofckptkey
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ puts "\tRecd0$tnum $method ($largs): Iteration $iter"
+ puts "\t\tRecd0$tnum.a: Create environment and $ndbs databases."
+
+ # We run out of lockers during some of the recovery runs, so
+ # we need to make sure that we specify a DB_CONFIG that will
+ # give us enough lockers.
+ set f [open $testdir/DB_CONFIG w]
+ puts $f "set_lk_max_lockers 5000"
+ close $f
+
+ set flags "-create -txn -home $testdir"
+ set env_cmd "berkdb_env $flags"
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Initialize random number generator based on $iter.
+ berkdb srand [expr $iter + $rand_init]
+
+ # Initialize database that keeps track of number of open files (so
+ # we don't run out of descriptors).
+ set ofname of.db
+ set txn [$dbenv txn]
+ error_check_good open_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn\
+ -create -dup -mode 0644 -btree -pagesize 512 $ofname]
+ error_check_good of_open [is_valid_db $ofdb] TRUE
+ error_check_good open_txn_commit [$txn commit] 0
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_put [$ofdb put -txn $oftxn $recd012_ofkey 1] 0
+ error_check_good of_put2 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_put3 [$ofdb put -txn $oftxn $recd012_ofckptkey 0] 0
+ error_check_good of_txn_commit [$oftxn commit] 0
+ error_check_good of_close [$ofdb close] 0
+
+ # Create ndbs databases to work in, and a file listing db names to
+ # pick from.
+ set f [open $testdir/dblist w]
+
+ set oflags "-auto_commit -env $dbenv \
+ -create -mode 0644 -pagesize $psz $largs $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ # 50-50 chance of being a subdb, unless we're a queue.
+ if { [berkdb random_int 0 1] || [is_queue $method] } {
+ # not a subdb
+ set dbname recd0$tnum-$i.db
+ } else {
+ # subdb
+ set dbname "recd0$tnum-subdb.db s$i"
+ }
+ puts $f $dbname
+ set db [eval berkdb_open $oflags $dbname]
+ error_check_good db($i) [is_valid_db $db] TRUE
+ error_check_good db($i)_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close [$dbenv close] 0
+
+ # Now we get to the meat of things. Our goal is to do some number
+ # of opens, closes, updates, and shutdowns (simulated here by a
+ # close of all open handles and a close/reopen of the environment,
+ # with or without an envremove), matching the regular expression
+ #
+ # ((O[OUC]+S)+R+V)
+ #
+ # We'll repeat the inner + a random number up to $niniter times,
+ # and the outer + a random number up to $noutiter times.
+ #
+ # In order to simulate shutdowns, we'll perform the opens, closes,
+ # and updates in a separate process, which we'll exit without closing
+ # all handles properly. The environment will be left lying around
+ # before we run recovery 50% of the time.
+ set out [berkdb random_int 1 $noutiter]
+ puts \
+ "\t\tRecd0$tnum.b: Performing $out recoveries of up to $niniter ops."
+ for { set i 0 } { $i < $out } { incr i } {
+ set child [open "|$tclsh_path" w]
+
+ # For performance, don't source everything,
+ # just what we'll need.
+ puts $child "load $tcllib"
+ puts $child "set fixed_len $fixed_len"
+ puts $child "source $src_root/test/testutils.tcl"
+ puts $child "source $src_root/test/recd0$tnum.tcl"
+
+ set rnd [expr $iter * 10000 + $i * 100 + $rand_init]
+
+ # Go.
+ berkdb debug_check
+ puts $child "recd012_dochild {$env_cmd} $rnd $i $niniter\
+ $ndbs $tnum $method $ofname $largs"
+ close $child
+
+ # Run recovery 0-3 times.
+ set nrecs [berkdb random_int 0 3]
+ for { set j 0 } { $j < $nrecs } { incr j } {
+ berkdb debug_check
+ set ret [catch {exec $util_path/db_recover \
+ -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ file mkdir /tmp/12out
+ set fd [open /tmp/12out/[pid] w]
+ puts $fd $res
+ close $fd
+ }
+ error_check_good recover($j) $ret 0
+ }
+ }
+
+ # Run recovery one final time; it doesn't make sense to
+ # check integrity if we do not.
+ set ret [catch {exec $util_path/db_recover -h $testdir} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover returned with nonzero\
+ exit status, output as follows:"
+ puts $res
+ }
+
+ # Make sure each datum is the correct filename.
+ puts "\t\tRecd0$tnum.c: Checking data integrity."
+ set dbenv [berkdb_env -create -private -home $testdir]
+ error_check_good env_open_integrity [is_valid_env $dbenv] TRUE
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbinfo] > 0 } {
+ set db [eval berkdb_open -env $dbenv $dbinfo]
+ error_check_good dbopen($dbinfo) [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good integrity [lindex [lindex $dbt 0] 1] \
+ [pad_data $method $dbinfo]
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ close $f
+ error_check_good env_close_integrity [$dbenv close] 0
+
+ # Verify
+ error_check_good verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.d: " 0 0 1] 0
+}
+
+proc recd012_dochild { env_cmd rnd outiter niniter ndbs tnum method\
+ ofname args } {
+ global recd012_ofkey
+ source ./include.tcl
+ if { [is_record_based $method] } {
+ set keybase ""
+ } else {
+ set keybase .[repeat abcdefghijklmnopqrstuvwxyz 4]
+ }
+
+ # Initialize our random number generator, repeatably based on an arg.
+ berkdb srand $rnd
+
+ # Open our env.
+ set dbenv [eval $env_cmd]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Find out how many databases appear to be open in the log--we
+ # don't want recovery to run out of filehandles.
+ set txn [$dbenv txn]
+ error_check_good child_txn_begin [is_valid_txn $txn $dbenv] TRUE
+ set ofdb [berkdb_open -env $dbenv -txn $txn $ofname]
+ error_check_good child_txn_commit [$txn commit] 0
+
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ set dbt [$ofdb get -txn $oftxn $recd012_ofkey]
+ error_check_good of_get [lindex [lindex $dbt 0] 0] $recd012_ofkey
+ set nopenfiles [lindex [lindex $dbt 0] 1]
+
+ error_check_good of_commit [$oftxn commit] 0
+
+ # Read our dbnames
+ set f [open $testdir/dblist r]
+ set i 0
+ while { [gets $f dbname($i)] > 0 } {
+ incr i
+ }
+ close $f
+
+ # We now have $ndbs extant databases.
+ # Open one of them, just to get us started.
+ set opendbs {}
+ set oflags "-env $dbenv $args"
+
+ # Start a transaction, just to get us started.
+ set curtxn [$dbenv txn]
+ error_check_good txn [is_valid_txn $curtxn $dbenv] TRUE
+
+ # Inner loop. Do $in iterations of a random open, close, or
+ # update, where $in is between 1 and $niniter.
+ set in [berkdb random_int 1 $niniter]
+ for { set j 0 } { $j < $in } { incr j } {
+ set op [berkdb random_int 0 2]
+ switch $op {
+ 0 {
+ # Open.
+ recd012_open
+ }
+ 1 {
+ # Update. Put random-number$keybase as key,
+ # filename as data, into random database.
+ set num_open [llength $opendbs]
+ if { $num_open == 0 } {
+ # If none are open, do an open first.
+ recd012_open
+ }
+ set n [berkdb random_int 0 [expr $num_open - 1]]
+ set pair [lindex $opendbs $n]
+ set udb [lindex $pair 0]
+ set uname [lindex $pair 1]
+
+ set key [berkdb random_int 1000 1999]$keybase
+ set data [chop_data $method $uname]
+ error_check_good put($uname,$udb,$key,$data) \
+ [$udb put -txn $curtxn $key $data] 0
+
+ # One time in four, commit the transaction.
+ if { [berkdb random_int 0 3] == 0 && 0 } {
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+ 2 {
+ # Close.
+ if { [llength $opendbs] == 0 } {
+ # If none are open, open instead of closing.
+ recd012_open
+ continue
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 \
+ [expr [llength $opendbs] - 1]]
+
+ set db [lindex [lindex $opendbs $which] 0]
+ error_check_good db_choice [is_valid_db $db] TRUE
+ global errorCode errorInfo
+
+ error_check_good db_close \
+ [[lindex [lindex $opendbs $which] 0] close] 0
+
+ set opendbs [lreplace $opendbs $which $which]
+ incr nopenfiles -1
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen \
+ [is_valid_txn $curtxn $dbenv] TRUE
+ }
+ }
+
+ # One time in two hundred, checkpoint.
+ if { [berkdb random_int 0 199] == 0 } {
+ puts "\t\t\tRecd0$tnum:\
+ Random checkpoint after operation $outiter.$j."
+ error_check_good txn_ckpt \
+ [$dbenv txn_checkpoint] 0
+ set nopenfiles \
+ [recd012_nopenfiles_ckpt $dbenv $ofdb $nopenfiles]
+ }
+ }
+
+ # We have to commit curtxn. It'd be kind of nice not to, but
+ # if we start in again without running recovery, we may block
+ # ourselves.
+ error_check_good curtxn_commit [$curtxn commit] 0
+
+ # Put back the new number of open files.
+ set oftxn [$dbenv txn]
+ error_check_good of_txn [is_valid_txn $oftxn $dbenv] TRUE
+ error_check_good of_del [$ofdb del -txn $oftxn $recd012_ofkey] 0
+ error_check_good of_put \
+ [$ofdb put -txn $oftxn $recd012_ofkey $nopenfiles] 0
+ error_check_good of_commit [$oftxn commit] 0
+ error_check_good ofdb_close [$ofdb close] 0
+}
+
+proc recd012_open { } {
+ # This is basically an inline and has to modify curtxn,
+ # so use upvars.
+ upvar curtxn curtxn
+ upvar ndbs ndbs
+ upvar dbname dbname
+ upvar dbenv dbenv
+ upvar oflags oflags
+ upvar opendbs opendbs
+ upvar nopenfiles nopenfiles
+
+ # Return without an open if we've already opened too many files--
+ # we don't want to make recovery run out of filehandles.
+ if { $nopenfiles > 30 } {
+ #puts "skipping--too many open files"
+ return -code break
+ }
+
+ # Commit curtxn first, lest we self-deadlock.
+ error_check_good txn_recommit \
+ [$curtxn commit] 0
+
+ # Do it.
+ set which [berkdb random_int 0 [expr $ndbs - 1]]
+
+ set db [eval berkdb_open -auto_commit $oflags $dbname($which)]
+
+ lappend opendbs [list $db $dbname($which)]
+
+ # Reopen txn.
+ set curtxn [$dbenv txn]
+ error_check_good txn_reopen [is_valid_txn $curtxn $dbenv] TRUE
+
+ incr nopenfiles
+}
+
+# Update the database containing the number of files that db_recover has
+# to contend with--we want to avoid letting it run out of file descriptors.
+# We do this by keeping track of the number of unclosed opens since the
+# checkpoint before last.
+# $recd012_ofkey stores this current value; the two dups available
+# at $recd012_ofckptkey store the number of opens since the last checkpoint
+# previous.
+# Thus, if the current value is 17 when we do a checkpoint, and the
+# stored values are 3 and 8, the new current value (which we return)
+# is 14, and the new stored values are 8 and 6.
+proc recd012_nopenfiles_ckpt { env db nopenfiles } {
+ global recd012_ofckptkey
+ set txn [$env txn]
+ error_check_good nopenfiles_ckpt_txn [is_valid_txn $txn $env] TRUE
+
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Get the first ckpt value and delete it.
+ set dbt [$dbc get -set $recd012_ofckptkey]
+ error_check_good set [llength $dbt] 1
+
+ set discard [lindex [lindex $dbt 0] 1]
+ error_check_good del [$dbc del] 0
+
+ set nopenfiles [expr $nopenfiles - $discard]
+
+ # Get the next ckpt value
+ set dbt [$dbc get -nextdup]
+ error_check_good set2 [llength $dbt] 1
+
+ # Calculate how many opens we've had since this checkpoint before last.
+ set onlast [lindex [lindex $dbt 0] 1]
+ set sincelast [expr $nopenfiles - $onlast]
+
+ # Put this new number at the end of the dup set.
+ error_check_good put [$dbc put -keylast $recd012_ofckptkey $sincelast] 0
+
+ # We should never deadlock since we're the only one in this db.
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ return $nopenfiles
+}
+
+# globals -- it's not worth passing these around, as they're constants
+set recd012_ofkey OPENFILES
+set recd012_ofckptkey CKPTS
diff --git a/storage/bdb/test/recd013.tcl b/storage/bdb/test/recd013.tcl
new file mode 100644
index 00000000000..e08654f34e0
--- /dev/null
+++ b/storage/bdb/test/recd013.tcl
@@ -0,0 +1,287 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd013.tcl,v 11.18 2002/02/25 16:44:27 sandstro Exp $
+#
+# TEST recd013
+# TEST Test of cursor adjustment on child transaction aborts. [#2373]
+#
+# XXX
+# Other tests that cover more specific variants of the same issue
+# are in the access method tests for now. This is probably wrong; we
+# put this one here because they're closely based on and intertwined
+# with other, non-transactional cursor stability tests that are among
+# the access method tests, and because we need at least one test to
+# fit under recd and keep logtrack from complaining. We'll sort out the mess
+# later; the important thing, for now, is that everything that needs to gets
+# tested. (This really shouldn't be under recd at all, since it doesn't
+# run recovery!)
+proc recd013 { method { nitems 100 } args } {
+ source ./include.tcl
+ global alphabet log_log_record_types
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 13
+ set pgsz 512
+
+ puts "Recd0$tnum $method ($args): Test of aborted cursor adjustments."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd013: skipping for specific pagesizes"
+ return
+ }
+
+ set testfile recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set keybase ""
+ } else {
+ set keybase "key"
+ }
+
+ puts "\tRecd0$tnum.a:\
+ Create environment, database, and parent transaction."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set oflags \
+ "-auto_commit -env $env -create -mode 0644 -pagesize $pgsz $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Create a database containing $nitems items, numbered with odds.
+ # We'll then put the even numbers during the body of the test.
+ set txn [$env txn]
+ error_check_good init_txn [is_valid_txn $txn $env] TRUE
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+
+ # First, try to put the item in a child transaction,
+ # then abort and verify all the cursors we've done up until
+ # now.
+ set ctxn [$env txn -parent $txn]
+ error_check_good child_txn($i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good fake_put($i) [$db put -txn $ctxn $key $data] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+ for { set j 1 } { $j < $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+
+ # Then put for real.
+ error_check_good init_put($i) [$db put -txn $txn $key $data] 0
+
+ # Set a cursor of the parent txn to each item.
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc_getset($i) \
+ [$dbc($i) get -set $key] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+
+ # And verify all the cursors, including the one we just
+ # created.
+ for { set j 1 } { $j <= $i } { incr j 2 } {
+ error_check_good dbc_get($j) [$dbc($j) get -current] \
+ [list [list $keybase$j \
+ [pad_data $method $j$alphabet]]]
+ }
+ }
+
+ puts "\t\tRecd0$tnum.a.1: Verify cursor stability after init."
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\tRecd0$tnum.b: Put test."
+ puts "\t\tRecd0$tnum.b.1: Put items."
+ set ctxn [$env txn -parent $txn]
+ error_check_good txn [is_valid_txn $ctxn $env] TRUE
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $ctxn $key $data] 0
+
+ # If we're a renumbering recno, this is uninteresting.
+ # Stir things up by putting a few additional records at
+ # the beginning.
+ if { [is_rrecno $method] == 1 } {
+ set curs [$db cursor -txn $ctxn]
+ error_check_bad llength_get_first \
+ [llength [$curs get -first]] 0
+ error_check_good cursor [is_valid_cursor $curs $db] TRUE
+ # expect a recno!
+ error_check_good rrecno_put($i) \
+ [$curs put -before ADDITIONAL.$i] 1
+ error_check_good curs_close [$curs close] 0
+ }
+ }
+
+ puts "\t\tRecd0$tnum.b.2: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.b.3: "] 0
+
+ # Now put back all the even records, this time in the parent.
+ # Commit and re-begin the transaction so we can abort and
+ # get back to a nice full database.
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ set key $keybase$i
+ set data [chop_data $method $i$alphabet]
+ error_check_good child_put($i) [$db put -txn $txn $key $data] 0
+ }
+ error_check_good txn_commit [$txn commit] 0
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+
+ # Delete test. Set a cursor to each record. Delete the even ones
+ # in the parent and check cursor stability. Then open a child
+ # transaction, and delete the odd ones. Verify that the database
+ # is empty.
+ puts "\tRecd0$tnum.c: Delete test."
+ unset dbc
+
+ # Create cursors pointing at each item.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good dbc($i)_create [is_valid_cursor $dbc($i) $db] \
+ TRUE
+ error_check_good dbc_getset($i) [$dbc($i) get -set $keybase$i] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.1: Delete even items in child txn and abort."
+
+ if { [is_rrecno $method] != 1 } {
+ set init 2
+ set bound [expr 2 * $nitems]
+ set step 2
+ } else {
+ # In rrecno, deletes will renumber the items, so we have
+ # to take that into account when we delete by recno.
+ set init 2
+ set bound [expr $nitems + 1]
+ set step 1
+ }
+
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that no items are deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$i [pad_data $method $i$alphabet]]]
+ }
+
+ puts "\t\tRecd0$tnum.c.2: Delete even items in child txn and commit."
+ set ctxn [$env txn -parent $txn]
+ for { set i $init } { $i <= $bound } { incr i $step } {
+ error_check_good del($i) [$db del -txn $ctxn $keybase$i] 0
+ }
+ error_check_good ctxn_commit [$ctxn commit] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ puts "\t\tRecd0$tnum.c.3: Delete odd items in child txn."
+
+ set ctxn [$env txn -parent $txn]
+
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ # If this is an rrecno, just delete the first
+ # item repeatedly--the renumbering will make
+ # that delete everything.
+ set j 1
+ }
+ error_check_good del($i) [$db del -txn $ctxn $keybase$j] 0
+ }
+
+ # Verify that everyone's deleted.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good get_deleted($i) \
+ [llength [$db get -txn $ctxn $keybase$i]] 0
+ }
+
+ puts "\t\tRecd0$tnum.c.4: Verify cursor stability after abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ # Verify that even items are deleted and odd items are not.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i 2 } {
+ if { [is_rrecno $method] != 1 } {
+ set j $i
+ } else {
+ set j [expr ($i - 1) / 2 + 1]
+ }
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list $keybase$j [pad_data $method $i$alphabet]]]
+ }
+ for { set i 2 } { $i <= 2 * $nitems } { incr i 2 } {
+ error_check_good dbc_get($i) [$dbc($i) get -current] \
+ [list [list "" ""]]
+ }
+
+ # Clean up cursors.
+ for { set i 1 } { $i <= 2 * $nitems } { incr i } {
+ error_check_good dbc($i)_close [$dbc($i) close] 0
+ }
+
+ # Sync and verify.
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_verify \
+ [verify_dir $testdir "\t\tRecd0$tnum.c.5: "] 0
+
+ puts "\tRecd0$tnum.d: Clean up."
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ error_check_good verify_dir \
+ [verify_dir $testdir "\t\tRecd0$tnum.d.1: "] 0
+
+ if { $log_log_record_types == 1 } {
+ logtrack_read $testdir
+ }
+}
diff --git a/storage/bdb/test/recd014.tcl b/storage/bdb/test/recd014.tcl
new file mode 100644
index 00000000000..6796341dca2
--- /dev/null
+++ b/storage/bdb/test/recd014.tcl
@@ -0,0 +1,445 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd014.tcl,v 1.19 2002/08/15 19:21:24 sandstro Exp $
+#
+# TEST recd014
+# TEST This is a recovery test for create/delete of queue extents. We
+# TEST then need to recover and make sure the file is correctly existing
+# TEST or not, as the case may be.
+proc recd014 { method args} {
+ global fixed_len
+ source ./include.tcl
+
+ if { ![is_queueext $method] == 1 } {
+ puts "Recd014: Skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd014: skipping for specific pagesizes"
+ return
+ }
+
+ set orig_fixed_len $fixed_len
+ #
+ # We will use 512-byte pages, to be able to control
+ # when extents get created/removed.
+ #
+ set fixed_len 300
+
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ #
+ # We want to set -extent 1 instead of what
+ # convert_args gave us.
+ #
+ set exti [lsearch -exact $opts "-extent"]
+ incr exti
+ set opts [lreplace $opts $exti $exti 1]
+
+ puts "Recd014: $method extent creation/deletion tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd014.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd014.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+
+ puts "\tRecd014.b: Create test commit"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.b: Create test abort"
+ ext_recover_create $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ puts "\tRecd014.c: Consume test commit"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile commit
+ puts "\tRecd014.c: Consume test abort"
+ ext_recover_consume $testdir $env_cmd $omethod \
+ $opts $testfile abort
+
+ set fixed_len $orig_fixed_len
+ puts "\tRecd014.d: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+}
+
+proc ext_recover_create { dir env_cmd method opts dbfile txncmd } {
+ global log_log_record_types
+ global fixed_len
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set init_file $dir/$dbfile.init
+ set noenvflags "-create $method -mode 0644 -pagesize 512 $opts $dbfile"
+ set oflags "-env $env $noenvflags"
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set ret [catch {eval {berkdb_open} -txn $t $oflags} db]
+ error_check_good txn_commit [$t commit] 0
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ #
+ # The command to execute to create an extent is a put.
+ # We are just creating the first one, so our extnum is 0.
+ #
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+ puts "\t\tExecuting command"
+ set putrecno [$db put -txn $t -append $data]
+ error_check_good db_put $putrecno 1
+
+ # Sync the db so any changes to the file that are
+ # in mpool get written to the disk file before the
+ # diff.
+ puts "\t\tSyncing"
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file created.
+ #
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ error_check_good extput:exists1 [file exists $dbq] 1
+ set ret [$db get $putrecno]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted. Verify our entry is not there.
+ #
+ puts "\t\tCommand executed and aborted."
+ error_check_good db_get [llength $ret] 0
+ } else {
+ #
+ # Operation was committed, verify it exists.
+ #
+ puts "\t\tCommand executed and committed."
+ error_check_good db_get [llength $ret] 1
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ }
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+ error_check_good db_close [$db close] 0
+ error_check_good txn_commit [$t commit] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here. Should be a no-op. Verify that
+ # the file still does/n't exist when we are done.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ #
+ # Verify it did not change.
+ #
+ error_check_good extput:exists2 [file exists $dbq] 1
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # Need a new copy to get the right LSN into the file.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # Undo.
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ file copy -force $dir/$dbfile.afterop $dir/$dbfile
+ move_file_extent $dir $dbfile afterop copy
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ ext_create_check $dir $txncmd $init_file $dbfile $noenvflags $putrecno
+
+ #
+ # To redo, remove the dbfiles. Run recovery again.
+ #
+ catch { file rename -force $dir/$dbfile $dir/$dbfile.renamed } res
+ copy_extent_file $dir $dbfile renamed rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ #
+ # !!!
+ # Even though db_recover exits with status 0, it should print out
+ # a warning because the file didn't exist. Db_recover writes this
+ # to stderr. Tcl assumes that ANYTHING written to stderr is an
+ # error, so even though we exit with 0 status, we still get an
+ # error back from 'catch'. Look for the warning.
+ #
+ if { $stat == 1 && [is_substr $result "warning"] == 0 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ #
+ # Verify it was redone. However, since we removed the files
+ # to begin with, recovery with abort will not recreate the
+ # extent. Recovery with commit will.
+ #
+ if {$txncmd == "abort"} {
+ error_check_good extput:exists3 [file exists $dbq] 0
+ } else {
+ error_check_good extput:exists3 [file exists $dbq] 1
+ }
+}
+
+proc ext_create_check { dir txncmd init_file dbfile oflags putrecno } {
+ if { $txncmd == "commit" } {
+ #
+ # Operation was committed. Verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation aborted. The file is there, but make
+ # sure the item is not.
+ #
+ set xdb [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $xdb] TRUE
+ set ret [$xdb get $putrecno]
+ error_check_good db_get [llength $ret] 0
+ error_check_good db_close [$xdb close] 0
+ }
+}
+
+proc ext_recover_consume { dir env_cmd method opts dbfile txncmd} {
+ global log_log_record_types
+ global alphabet
+ source ./include.tcl
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ env_cleanup $dir
+ # Open the environment and set the copy/abort locations
+ set env [eval $env_cmd]
+
+ set oflags "-create -auto_commit $method -mode 0644 -pagesize 512 \
+ -env $env $opts $dbfile"
+
+ #
+ # Open our db, add some data, close and copy as our
+ # init file.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set extnum 0
+ set data [chop_data $method [replicate $alphabet 512]]
+
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set putrecno [$db put -txn $txn -append $data]
+ error_check_good db_put $putrecno 1
+ error_check_good commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\t\tExecuting command"
+
+ set init_file $dir/$dbfile.init
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+
+ #
+ # If we don't abort, then we expect success.
+ # If we abort, we expect no file removed until recovery is run.
+ #
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set t [$env txn]
+ error_check_good txn_begin [is_valid_txn $t $env] TRUE
+
+ set dbcmd "$db get -txn $t -consume"
+ set ret [eval $dbcmd]
+ error_check_good db_sync [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+
+ error_check_good txn_$txncmd:$t [$t $txncmd] 0
+ error_check_good db_sync [$db sync] 0
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ if {$txncmd == "abort"} {
+ #
+ # Operation was aborted, verify ext did not change.
+ #
+ puts "\t\tCommand executed and aborted."
+
+ #
+ # Check that the file exists. Final state.
+ # Since we aborted the txn, we should be able
+ # to get to our original entry.
+ #
+ error_check_good postconsume.1 [file exists $dbq] 1
+ error_check_good \
+ diff(init,postconsume.2):diff($init_file,$dir/$dbfile)\
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist.
+ #
+ puts "\t\tCommand executed and committed."
+ #
+ # Check file existence. Consume operations remove
+ # the extent when we move off, which we should have
+ # done.
+ error_check_good consume_exists [file exists $dbq] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Run recovery here on what we ended up with. Should be a no-op.
+ #
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (no-op) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover1 [file exists $dbq] 0
+ }
+
+ #
+ # Run recovery here. Re-do the operation.
+ # Verify that the file doesn't exist
+ # (if we committed) or change (if we aborted)
+ # when we are done.
+ #
+ catch { file copy -force $dir/$dbfile $init_file } res
+ copy_extent_file $dir $dbfile init
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (init) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover1):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it does
+ # not exist. Both operations should result
+ # in no file existing now that we've run recovery.
+ #
+ error_check_good after_recover2 [file exists $dbq] 0
+ }
+
+ #
+ # Now move the .afterop file to $dbfile. Run recovery again.
+ #
+ set filecopy [glob $dir/*.afterop]
+ set afterop [lindex $filecopy 0]
+ file rename -force $afterop $dir/$dbfile
+ set afterop [string range $afterop \
+ [expr [string last "/" $afterop] + 1] \
+ [string last "." $afterop]]
+ move_file_extent $dir $dbfile afterop rename
+
+ berkdb debug_check
+ puts -nonewline "\t\tAbout to run recovery (afterop) ... "
+ flush stdout
+
+ set stat [catch {exec $util_path/db_recover -h $dir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ return
+ }
+ puts "complete"
+
+ if { $txncmd == "abort"} {
+ #
+ # Operation was aborted, verify it did not change.
+ #
+ error_check_good \
+ diff(initial,post-recover2):diff($init_file,$dir/$dbfile) \
+ [dbdump_diff "-dar" $init_file $dir $dbfile] 0
+ } else {
+ #
+ # Operation was committed, verify it still does
+ # not exist.
+ #
+ error_check_good after_recover3 [file exists $dbq] 0
+ }
+}
diff --git a/storage/bdb/test/recd015.tcl b/storage/bdb/test/recd015.tcl
new file mode 100644
index 00000000000..8c3ad612419
--- /dev/null
+++ b/storage/bdb/test/recd015.tcl
@@ -0,0 +1,160 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd015.tcl,v 1.13 2002/09/05 17:23:06 sandstro Exp $
+#
+# TEST recd015
+# TEST This is a recovery test for testing lots of prepared txns.
+# TEST This test is to force the use of txn_recover to call with the
+# TEST DB_FIRST flag and then DB_NEXT.
+proc recd015 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd015: $method ($args) prepared txns test"
+
+ # Create the database and environment.
+
+ set numtxns 1
+ set testfile NULL
+
+ set env_cmd "berkdb_env -create -txn -home $testdir"
+ set msg "\tRecd015.a"
+ puts "$msg Simple test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ env_cleanup $testdir
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ #
+ # Now test large numbers of prepared txns to test DB_NEXT
+ # on txn_recover.
+ #
+ set numtxns 250
+ set testfile recd015.db
+ set txnmax [expr $numtxns + 5]
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ env_cleanup $testdir
+ set env_cmd "berkdb_env -create -txn_max $txnmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} $omethod -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set msg "\tRecd015.b"
+ puts "$msg Large test to prepare $numtxns txn "
+ foreach op { abort commit discard } {
+ recd015_body $env_cmd $testfile $numtxns $msg $op
+ }
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+proc recd015_body { env_cmd testfile numtxns msg op } {
+ source ./include.tcl
+
+ sentinel_init
+ set gidf $testdir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl recd15scr.tcl \
+ $testdir/recdout $env_cmd $testfile $gidf $numtxns &]
+
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/recdout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd -recover]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] $numtxns
+
+ set gfd [open $gidf r]
+ set i 0
+ while { [gets $gfd gid] != -1 } {
+ set gids($i) $gid
+ incr i
+ }
+ close $gfd
+ #
+ # Make sure we have as many as we expect
+ error_check_good num_gids $i $numtxns
+
+ set i 0
+ puts "$msg.3: comparing GIDs and $op txns"
+ foreach tpair $txnlist {
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ error_check_good gidcompare $gid $gids($i)
+ error_check_good txn:$op [$txn $op] 0
+ incr i
+ }
+ if { $op != "discard" } {
+ error_check_good envclose [$env close] 0
+ return
+ }
+ #
+ # If we discarded, now do it again and randomly resolve some
+ # until all txns are resolved.
+ #
+ puts "$msg.4: resolving/discarding txns"
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ set opval(1) "abort"
+ set opcnt(1) 0
+ set opval(2) "commit"
+ set opcnt(2) 0
+ set opval(3) "discard"
+ set opcnt(3) 0
+ while { $len != 0 } {
+ set opicnt(1) 0
+ set opicnt(2) 0
+ set opicnt(3) 0
+ #
+ # Abort/commit or discard them randomly until
+ # all are resolved.
+ #
+ for { set i 0 } { $i < $len } { incr i } {
+ set t [lindex $txnlist $i]
+ set txn [lindex $t 0]
+ set newop [berkdb random_int 1 3]
+ set ret [$txn $opval($newop)]
+ error_check_good txn_$opval($newop):$i $ret 0
+ incr opcnt($newop)
+ incr opicnt($newop)
+ }
+# puts "$opval(1): $opicnt(1) Total: $opcnt(1)"
+# puts "$opval(2): $opicnt(2) Total: $opcnt(2)"
+# puts "$opval(3): $opicnt(3) Total: $opcnt(3)"
+
+ set txnlist [$env txn_recover]
+ set len [llength $txnlist]
+ }
+
+ error_check_good envclose [$env close] 0
+}
diff --git a/storage/bdb/test/recd016.tcl b/storage/bdb/test/recd016.tcl
new file mode 100644
index 00000000000..504aca09617
--- /dev/null
+++ b/storage/bdb/test/recd016.tcl
@@ -0,0 +1,183 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd016.tcl,v 11.8 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST recd016
+# TEST This is a recovery test for testing running recovery while
+# TEST recovery is already running. While bad things may or may not
+# TEST happen, if recovery is then run properly, things should be correct.
+proc recd016 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd016: $method ($args) simultaneous recovery test"
+ puts "Recd016: Skipping; waiting on SR #6277"
+ return
+
+ # Create the database and environment.
+ set testfile recd016.db
+
+ #
+ # For this test we create our database ahead of time so that we
+ # don't need to send methods and args to the script.
+ #
+ cleanup $testdir NULL
+
+ #
+ # Use a smaller log to make more files and slow down recovery.
+ #
+ set gflags ""
+ set pflags ""
+ set log_max [expr 256 * 1024]
+ set nentries 10000
+ set nrec 6
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ # Since we are using txns, we need at least 1 lock per
+ # record (for queue). So set lock_max accordingly.
+ set lkmax [expr $nentries * 2]
+
+ puts "\tRecd016.a: Create environment and database"
+ set env_cmd "berkdb_env -create -log_max $log_max \
+ -lock_max $lkmax -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ set db [eval {berkdb_open -create} \
+ $omethod -auto_commit -env $env $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set abid [open $t4 w]
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc recd016_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc recd016.check
+ }
+ puts "\tRecd016.b: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ #
+ # Start a transaction. Alternately abort and commit them.
+ # This will create a bigger log for recovery to collide.
+ #
+ set txn [$env txn]
+ set ret [eval \
+ {$db put} -txn $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ if {[expr $count % 2] == 0} {
+ set ret [$txn commit]
+ error_check_good txn_commit $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good commit_get \
+ $ret [list [list $key [pad_data $method $str]]]
+ } else {
+ set ret [$txn abort]
+ error_check_good txn_abort $ret 0
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good abort_get [llength $ret] 0
+ puts $abid $key
+ }
+ incr count
+ }
+ close $did
+ close $abid
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ set pidlist {}
+ puts "\tRecd016.c: Start up $nrec recovery processes at once"
+ for {set i 0} {$i < $nrec} {incr i} {
+ set p [exec $util_path/db_recover -h $testdir -c &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+ #
+ # Now that they are all done run recovery correctly
+ puts "\tRecd016.d: Run recovery process"
+ set stat [catch {exec $util_path/db_recover -h $testdir -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tRecd016.e: Open, dump and check database"
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j $i
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ close $oid
+ } else {
+ set q q
+ filehead $nentries $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t4 $t3
+ file rename -force $t3 $t4
+ fileextract $t2 $t4 $t3
+ file rename -force $t3 $t5
+
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ filesort $t1 $t3
+ error_check_good envclose [$env close] 0
+
+ error_check_good Recd016:diff($t5,$t3) \
+ [filecmp $t5 $t3] 0
+
+ set stat [catch {exec $util_path/db_printlog -h $testdir \
+ > $testdir/LOG } ret]
+ error_check_good db_printlog $stat 0
+ fileremove $testdir/LOG
+}
+
+# Check function for recd016; keys and data are identical
+proc recd016.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc recd016_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/recd017.tcl b/storage/bdb/test/recd017.tcl
new file mode 100644
index 00000000000..9f8208c1b3e
--- /dev/null
+++ b/storage/bdb/test/recd017.tcl
@@ -0,0 +1,151 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd017.tcl,v 11.4 2002/09/03 16:44:37 sue Exp $
+#
+# TEST recd017
+# TEST Test recovery and security. This is basically a watered
+# TEST down version of recd001 just to verify that encrypted environments
+# TEST can be recovered.
+proc recd017 { method {select 0} args} {
+ global fixed_len
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd017: $method operation/transaction tests"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ # The recovery tests were originally written to
+ # do a command, abort, do it again, commit, and then
+ # repeat the sequence with another command. Each command
+ # tends to require that the previous command succeeded and
+ # left the database a certain way. To avoid cluttering up the
+ # op_recover interface as well as the test code, we create two
+ # databases; one does abort and then commit for each op, the
+ # other does prepare, prepare-abort, and prepare-commit for each
+ # op. If all goes well, this allows each command to depend
+ # exactly one successful iteration of the previous command.
+ set testfile recd017.db
+ set testfile2 recd017-2.db
+
+ set flags "-create -encryptaes $passwd -txn -home $testdir"
+
+ puts "\tRecd017.a.0: creating environment"
+ set env_cmd "berkdb_env $flags"
+ convert_encrypt $env_cmd
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ convert_encrypt $env_cmd
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -encrypt $opts $testfile2"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ error_check_good env_close [$dbenv close] 0
+
+ puts "\tRecd017.a.1: Verify db_printlog can read logfile"
+ set tmpfile $testdir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $testdir -P $passwd \
+ > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+ fileremove $tmpfile
+
+ # List of recovery tests: {CMD MSG} pairs.
+ set rlist {
+ { {DB put -txn TXNID $key $data} "Recd017.b: put"}
+ { {DB del -txn TXNID $key} "Recd017.c: delete"}
+ }
+
+ # These are all the data values that we're going to need to read
+ # through the operation table and run the recovery tests.
+
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ } else {
+ set key recd017_key
+ }
+ set data recd017_data
+ foreach pair $rlist {
+ set cmd [subst [lindex $pair 0]]
+ set msg [lindex $pair 1]
+ if { $select != 0 } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { [lsearch $select $tag] == -1 } {
+ continue
+ }
+ }
+
+ if { [is_queue $method] != 1 } {
+ if { [string first append $cmd] != -1 } {
+ continue
+ }
+ if { [string first consume $cmd] != -1 } {
+ continue
+ }
+ }
+
+# if { [is_fixed_length $method] == 1 } {
+# if { [string first partial $cmd] != -1 } {
+# continue
+# }
+# }
+ op_recover abort $testdir $env_cmd $testfile $cmd $msg
+ op_recover commit $testdir $env_cmd $testfile $cmd $msg
+ #
+ # Note that since prepare-discard ultimately aborts
+ # the txn, it must come before prepare-commit.
+ #
+ op_recover prepare-abort $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-discard $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ op_recover prepare-commit $testdir $env_cmd $testfile2 \
+ $cmd $msg
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/storage/bdb/test/recd018.tcl b/storage/bdb/test/recd018.tcl
new file mode 100644
index 00000000000..fb5a589d851
--- /dev/null
+++ b/storage/bdb/test/recd018.tcl
@@ -0,0 +1,110 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd018.tcl,v 11.2 2002/03/13 21:04:20 sue Exp $
+#
+# TEST recd018
+# TEST Test recover of closely interspersed checkpoints and commits.
+#
+# This test is from the error case from #4230.
+#
+proc recd018 { method {ndbs 10} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 18
+
+ puts "Recd0$tnum ($args): $method recovery of checkpoints and commits."
+
+ set tname recd0$tnum.db
+ env_cleanup $testdir
+
+ set i 0
+ if { [is_record_based $method] == 1 } {
+ set key 1
+ set key2 2
+ } else {
+ set key KEY
+ set key2 KEY2
+ }
+
+ puts "\tRecd0$tnum.a: Create environment and database."
+ set flags "-create -txn -home $testdir"
+
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set oflags "-auto_commit -env $dbenv -create -mode 0644 $args $omethod"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set db($i) [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ }
+
+ # Main loop: Write a record or two to each database.
+ # Do a commit immediately followed by a checkpoint after each one.
+ error_check_good "Initial Checkpoint" [$dbenv txn_checkpoint] 0
+
+ puts "\tRecd0$tnum.b Put/Commit/Checkpoint to $ndbs databases"
+ for { set i 0 } { $i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db($i) put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ if { [expr $i % 2] == 0 } {
+ set txn [$dbenv txn]
+ error_check_good txn2 [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put [$db($i) put \
+ -txn $txn $key2 [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good txn_checkpt [$dbenv txn_checkpoint] 0
+ }
+ error_check_good db_close [$db($i) close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ }
+ error_check_good env_close [$dbenv close] 0
+
+ # Now, loop through and recover to each timestamp, verifying the
+ # expected increment.
+ puts "\tRecd0$tnum.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.d: Run recovery (initial file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd0$tnum.e: Run recovery (after file)"
+ for { set i 0 } {$i < $ndbs } { incr i } {
+ set testfile $tname.$i
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+ }
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+}
diff --git a/storage/bdb/test/recd019.tcl b/storage/bdb/test/recd019.tcl
new file mode 100644
index 00000000000..dd67b7dcb2a
--- /dev/null
+++ b/storage/bdb/test/recd019.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd019.tcl,v 11.3 2002/08/08 15:38:07 bostic Exp $
+#
+# TEST recd019
+# TEST Test txn id wrap-around and recovery.
+proc recd019 { method {numid 50} args} {
+ global fixed_len
+ global txn_curid
+ global log_log_record_types
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Recd019: $method txn id wrap-around test"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd019.db
+
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd019.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # Test txn wrapping. Force a txn_recycle msg.
+ #
+ set new_curid $txn_curid
+ set new_maxid [expr $new_curid + $numid]
+ error_check_good txn_id_set [$dbenv txn_id_set $new_curid $new_maxid] 0
+
+ #
+ # We need to create a database to get the pagesize (either
+ # the default or whatever might have been specified).
+ # Then remove it so we can compute fixed_len and create the
+ # real database.
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set stat [$db stat]
+ #
+ # Compute the fixed_len based on the pagesize being used.
+ # We want the fixed_len to be 1/4 the pagesize.
+ #
+ set pg [get_pagesize $stat]
+ error_check_bad get_pagesize $pg -1
+ set fixed_len [expr $pg / 4]
+ error_check_good db_close [$db close] 0
+ error_check_good dbremove [berkdb dbremove -env $dbenv $testfile] 0
+
+ # Convert the args again because fixed_len is now real.
+ # Create the databases and close the environment.
+ # cannot specify db truncate in txn protected env!!!
+ set opts [convert_args $method ""]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -env $dbenv -auto_commit $opts $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Force txn ids to wrap twice and then some.
+ #
+ set nument [expr $numid * 3 - 2]
+ puts "\tRecd019.b: Wrapping txn ids after $numid"
+ set file $testdir/$testfile.init
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile init
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $testdir/$testfile $file} res
+ copy_extent_file $testdir $testfile afterop
+ error_check_good env_close [$dbenv close] 0
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ # Now, loop through and recover.
+ puts "\tRecd019.c: Run recovery (no-op)"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.d: Run recovery (initial file)"
+ set file $testdir/$testfile.init
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile init copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ puts "\tRecd019.e: Run recovery (after file)"
+ set file $testdir/$testfile.afterop
+ catch { file copy -force $file $testdir/$testfile } res
+ move_file_extent $testdir $testfile afterop copy
+
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 0
+}
diff --git a/storage/bdb/test/recd020.tcl b/storage/bdb/test/recd020.tcl
new file mode 100644
index 00000000000..93a89f32578
--- /dev/null
+++ b/storage/bdb/test/recd020.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd020.tcl,v 11.8 2002/08/08 15:38:08 bostic Exp $
+#
+# TEST recd020
+# TEST Test recovery after checksum error.
+proc recd020 { method args} {
+ global fixed_len
+ global log_log_record_types
+ global datastr
+ source ./include.tcl
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Recd020: skipping for specific pagesizes"
+ return
+ }
+ if { [is_queueext $method] == 1 } {
+ puts "Recd020: skipping for method $method"
+ return
+ }
+
+ puts "Recd020: $method recovery after checksum error"
+
+ # Create the database and environment.
+ env_cleanup $testdir
+
+ set testfile recd020.db
+ set flags "-create -txn -home $testdir"
+
+ puts "\tRecd020.a: creating environment"
+ set env_cmd "berkdb_env $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set pgsize 512
+ set orig_fixed_len $fixed_len
+ set fixed_len [expr $pgsize / 4]
+ set opts [convert_args $method $args]
+ set omethod [convert_method $method]
+ set oflags "-create $omethod -mode 0644 \
+ -auto_commit -chksum -pagesize $pgsize $opts $testfile"
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+
+ #
+ # Put some data.
+ #
+ set nument 50
+ puts "\tRecd020.b: Put some data"
+ for { set i 1 } { $i <= $nument } { incr i } {
+ # Use 'i' as key so method doesn't matter
+ set key $i
+ set data $i$datastr
+
+ # Put, in a txn.
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+ error_check_good db_put \
+ [$db put -txn $txn $key [chop_data $method $data]] 0
+ error_check_good txn_commit [$txn commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+ #
+ # We need to remove the env so that we don't get cached
+ # pages.
+ #
+ error_check_good env_remove [berkdb envremove -home $testdir] 0
+
+ puts "\tRecd020.c: Overwrite part of database"
+ #
+ # First just touch some bits in the file. We want to go
+ # through the paging system, so touch some data pages,
+ # like the middle of page 2.
+ # We should get a checksum error for the checksummed file.
+ #
+ set pg 2
+ set fid [open $testdir/$testfile r+]
+ fconfigure $fid -translation binary
+ set seeklen [expr $pgsize * $pg + 200]
+ seek $fid $seeklen start
+ set byte [read $fid 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $fid $seeklen start
+ puts -nonewline $fid $newbyte
+ close $fid
+
+ #
+ # Verify we get the checksum error. When we get it, it should
+ # log the error as well, so when we run recovery we'll need to
+ # do catastrophic recovery. We do this in a sub-process so that
+ # the files are closed after the panic.
+ #
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [send_cmd $f1 $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [send_cmd $f1 "{berkdb_open_noerr} -env $dbenv $oflags"]
+ error_check_good db [is_valid_db $db] TRUE
+
+ # We need to set non-blocking mode so that after each command
+ # we can read all the remaining output from that command and
+ # we can know what the output from one command is.
+ fconfigure $f1 -blocking 0
+ set ret [read $f1]
+ set got_err 0
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [send_cmd $f1 "catch {$db get $i} r"]
+ set getret [send_cmd $f1 "puts \$r"]
+ set ret [read $f1]
+ if { $stat == 1 } {
+ error_check_good dbget:fail [is_substr $getret \
+ "checksum error: catastrophic recovery required"] 1
+ set got_err 1
+ # Now verify that it was an error on the page we set.
+ error_check_good dbget:pg$pg [is_substr $ret \
+ "failed for page $pg"] 1
+ break
+ } else {
+ set key [lindex [lindex $getret 0] 0]
+ set data [lindex [lindex $getret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data \
+ [pad_data $method $i$datastr]
+ }
+ }
+ error_check_good got_chksum $got_err 1
+ set ret [send_cmd $f1 "$db close"]
+ set extra [read $f1]
+ error_check_good db:fail [is_substr $ret "run recovery"] 1
+
+ set ret [send_cmd $f1 "$dbenv close"]
+ error_check_good env_close:fail [is_substr $ret "run recovery"] 1
+ close $f1
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $testdir
+ }
+
+ puts "\tRecd020.d: Run normal recovery"
+ set ret [catch {exec $util_path/db_recover -h $testdir} r]
+ error_check_good db_recover $ret 1
+ error_check_good dbrec:fail \
+ [is_substr $r "checksum error: catastrophic recovery required"] 1
+
+ catch {fileremove $testdir/$testfile} ret
+ puts "\tRecd020.e: Run catastrophic recovery"
+ set ret [catch {exec $util_path/db_recover -c -h $testdir} r]
+ error_check_good db_recover $ret 0
+
+ #
+ # Now verify the data was reconstructed correctly.
+ #
+ set env_cmd "berkdb_env_noerr $flags"
+ set dbenv [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ set db [eval {berkdb_open} -env $dbenv $oflags]
+ error_check_good db [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nument } { incr i } {
+ set stat [catch {$db get $i} ret]
+ error_check_good stat $stat 0
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good keychk $key $i
+ error_check_good datachk $data [pad_data $method $i$datastr]
+ }
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$dbenv close] 0
+}
diff --git a/storage/bdb/test/recd15scr.tcl b/storage/bdb/test/recd15scr.tcl
new file mode 100644
index 00000000000..e1238907a71
--- /dev/null
+++ b/storage/bdb/test/recd15scr.tcl
@@ -0,0 +1,74 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recd15scr.tcl,v 1.5 2002/01/30 13:18:04 margo Exp $
+#
+# Recd15 - lots of txns - txn prepare script
+# Usage: recd15script envcmd dbcmd gidf numtxns
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# numtxns: number of txns to start
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "recd15script envcmd dbfile gidfile numtxns"
+
+# Verify usage
+if { $argc != 4 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set numtxns [ lindex $argv 3 ]
+
+set txnmax [expr $numtxns + 5]
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 0
+if { $dbfile != "NULL" } {
+ set usedb 1
+ set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+}
+
+puts "\tRecd015script.a: Begin $numtxns txns"
+for {set i 0} {$i < $numtxns} {incr i} {
+ set t [$dbenv txn]
+ error_check_good txnbegin($i) [is_valid_txn $t $dbenv] TRUE
+ set txns($i) $t
+ if { $usedb } {
+ set dbc [$db cursor -txn $t]
+ error_check_good cursor($i) [is_valid_cursor $dbc $db] TRUE
+ set curs($i) $dbc
+ }
+}
+
+puts "\tRecd015script.b: Prepare $numtxns txns"
+set gfd [open $gidfile w+]
+for {set i 0} {$i < $numtxns} {incr i} {
+ if { $usedb } {
+ set dbc $curs($i)
+ error_check_good dbc_close [$dbc close] 0
+ }
+ set t $txns($i)
+ set gid [make_gid recd015script:$t]
+ puts $gfd $gid
+ error_check_good txn_prepare:$t [$t prepare $gid] 0
+}
+close $gfd
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tRecd015script completed successfully"
+flush stdout
diff --git a/storage/bdb/test/recdscript.tcl b/storage/bdb/test/recdscript.tcl
new file mode 100644
index 00000000000..a2afde46e4d
--- /dev/null
+++ b/storage/bdb/test/recdscript.tcl
@@ -0,0 +1,37 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: recdscript.tcl,v 11.4 2002/01/11 15:53:32 bostic Exp $
+#
+# Recovery txn prepare script
+# Usage: recdscript op dir envcmd dbfile cmd
+# op: primary txn operation
+# dir: test directory
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# cmd: db command to execute
+
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "recdscript op dir envcmd dbfile gidfile cmd"
+
+# Verify usage
+if { $argc != 6 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set op [ lindex $argv 0 ]
+set dir [ lindex $argv 1 ]
+set envcmd [ lindex $argv 2 ]
+set dbfile [ lindex $argv 3 ]
+set gidfile [ lindex $argv 4 ]
+set cmd [ lindex $argv 5 ]
+
+op_recover_prep $op $dir $envcmd $dbfile $gidfile $cmd
+flush stdout
diff --git a/storage/bdb/test/rep001.tcl b/storage/bdb/test/rep001.tcl
new file mode 100644
index 00000000000..97a640029f5
--- /dev/null
+++ b/storage/bdb/test/rep001.tcl
@@ -0,0 +1,249 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep001.tcl,v 1.16 2002/08/26 17:52:19 margo Exp $
+#
+# TEST rep001
+# TEST Replication rename and forced-upgrade test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST verify that the database on the client is correct.
+# TEST Next, remove the database, close the master, upgrade the
+# TEST client, reopen the master, and make sure the new master can correctly
+# TEST run test001 and propagate it in the other direction.
+
+proc rep001 { method { niter 1000 } { tnum "01" } args } {
+ global passwd
+
+ puts "Rep0$tnum: Replication sanity test."
+
+ set envargs ""
+ rep001_sub $method $niter $tnum $envargs $args
+
+ puts "Rep0$tnum: Replication and security sanity test."
+ append envargs " -encryptaes $passwd "
+ append args " -encrypt "
+ rep001_sub $method $niter $tnum $envargs $args
+}
+
+proc rep001_sub { method niter tnum envargs largs } {
+ source ./include.tcl
+ global testdir
+ global encrypt
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ set clientdir $testdir/CLIENTDIR
+
+ file mkdir $masterdir
+ file mkdir $clientdir
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ } else {
+ set checkfunc test001.check
+ }
+
+ # Open a master.
+ repladd 1
+ set masterenv \
+ [eval {berkdb_env -create -lock_max 2500 -log_max 1000000} \
+ $envargs {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open a client
+ repladd 2
+ set clientenv [eval {berkdb_env -create} $envargs -txn -lock_max 2500 \
+ {-home $clientdir -rep_client -rep_transport [list 2 replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Bring the client online by processing the startup messages.
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open a test database on the master (so we can test having handles
+ # open across an upgrade).
+ puts "\tRep0$tnum.a:\
+ Opening test database for post-upgrade client logging test."
+ set master_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $masterenv rep0$tnum-upg.db]
+ set puttxn [$masterenv txn]
+ error_check_good master_upg_db_put \
+ [$master_upg_db put -txn $puttxn hello world] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ error_check_good master_upg_db_close [$master_upg_db close] 0
+
+ # Run a modified test001 in the master (and update client).
+ puts "\tRep0$tnum.b: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 1 -env $masterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Open the cross-upgrade database on the client and check its contents.
+ set client_upg_db [berkdb_open \
+ -create -auto_commit -btree -env $clientenv rep0$tnum-upg.db]
+ error_check_good client_upg_db_get [$client_upg_db get hello] \
+ [list [list hello world]]
+ # !!! We use this handle later. Don't close it here.
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.c: Verifying client database contents."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ # Remove the file (and update client).
+ puts "\tRep0$tnum.d: Remove the file on the master and close master."
+ error_check_good remove \
+ [$masterenv dbremove -auto_commit test0$tnum.db] 0
+ error_check_good masterenv_close [$masterenv close] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Don't get confused in Tcl.
+ puts "\tRep0$tnum.e: Upgrade client."
+ set newmasterenv $clientenv
+ error_check_good upgrade_client [$newmasterenv rep_start -master] 0
+
+ # Run test001 in the new master
+ puts "\tRep0$tnum.f: Running test001 in new master."
+ eval test001 $method $niter 0 $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ puts "\tRep0$tnum.g: Reopen old master as client and catch up."
+ # Throttle master so it can't send everything at once
+ $newmasterenv rep_limit 0 [expr 64 * 1024]
+ set newclientenv [eval {berkdb_env -create -recover} $envargs \
+ -txn -lock_max 2500 \
+ {-home $masterdir -rep_client -rep_transport [list 1 replsend]}]
+ error_check_good newclient_env [is_valid_env $newclientenv] TRUE
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+ set stats [$newmasterenv rep_stat]
+ set nthrottles [getstats $stats {Transmission limited}]
+ error_check_bad nthrottles $nthrottles -1
+ error_check_bad nthrottles $nthrottles 0
+
+ # Run a modified test001 in the new master (and update client).
+ puts "\tRep0$tnum.h: Running test001 in new master."
+ eval test001 $method \
+ $niter $niter $tnum 1 -env $newmasterenv $largs
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Test put to the database handle we opened back when the new master
+ # was a client.
+ puts "\tRep0$tnum.i: Test put to handle opened before upgrade."
+ set puttxn [$newmasterenv txn]
+ error_check_good client_upg_db_put \
+ [$client_upg_db put -txn $puttxn hello there] 0
+ error_check_good puttxn_commit [$puttxn commit] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $newclientenv 1]
+ incr nproced [replprocessqueue $newmasterenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Close the new master's handle for the upgrade-test database; we
+ # don't need it. Then check to make sure the client did in fact
+ # update the database.
+ error_check_good client_upg_db_close [$client_upg_db close] 0
+ set newclient_upg_db [berkdb_open -env $newclientenv rep0$tnum-upg.db]
+ error_check_good newclient_upg_db_get [$newclient_upg_db get hello] \
+ [list [list hello there]]
+ error_check_good newclient_upg_db_close [$newclient_upg_db close] 0
+
+ # Verify the database in the client dir.
+ puts "\tRep0$tnum.j: Verifying new client database contents."
+ set testdir [get_home $newmasterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $newclientenv $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+
+ error_check_good newmasterenv_close [$newmasterenv close] 0
+ error_check_good newclientenv_close [$newclientenv close] 0
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+ error_check_good verify \
+ [verify_dir $clientdir "\tRep0$tnum.k: " 0 0 1] 0
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/storage/bdb/test/rep002.tcl b/storage/bdb/test/rep002.tcl
new file mode 100644
index 00000000000..68666b0d0f0
--- /dev/null
+++ b/storage/bdb/test/rep002.tcl
@@ -0,0 +1,278 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep002.tcl,v 11.11 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep002
+# TEST Basic replication election test.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, in various scenarios.
+
+proc rep002 { method { niter 10 } { nclients 3 } { tnum "02" } args } {
+ source ./include.tcl
+ global elect_timeout
+
+ set elect_timeout 1000000
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep002: Skipping for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Start an election in the first client.
+ puts "\tRep0$tnum.d: Starting election without dead master."
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # We want to verify all the clients but the one that declared an
+ # election get the election message.
+ # We also want to verify that the master declares the election
+ # over by fiat, even if everyone uses a lower priority than 20.
+ # Loop and process all messages, keeping track of which
+ # sites got a HOLDELECTION and checking that the returned newmaster,
+ # if any, is 1 (the master's replication ID).
+ set got_hold_elect(M) 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set got_hold_elect($i) 0
+ }
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+
+ incr nproced [replprocessqueue $masterenv 1 0 he nm]
+
+ if { $he == 1 } {
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd(M) [expr $nclients + 1] 0 $elect_timeout]
+ set got_hold_elect(M) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] 0 \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm 1
+ }
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good got_hold_elect(master) $got_hold_elect(M) 0
+ unset got_hold_elect(M)
+ # error_check_good got_hold_elect(0) $got_hold_elect(0) 0
+ unset got_hold_elect(0)
+ for { set i 1 } { $i < $nclients } { incr i } {
+ error_check_good got_hold_elect($i) $got_hold_elect($i) 1
+ unset got_hold_elect($i)
+ }
+
+ cleanup_elections
+
+ # We need multiple clients to proceed from here.
+ if { $nclients < 2 } {
+ puts "\tRep0$tnum: Skipping for less than two clients."
+ error_check_good masterenv_close [$masterenv close] 0
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) \
+ [$clientenv($i) close] 0
+ }
+ return
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Now hold another election in the first client, this time with
+ # a dead master.
+ puts "\tRep0$tnum.e: Starting election with dead master."
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+
+ set elect_pipe(0) [start_election \
+ $qdir $env_cmd(0) [expr $nclients + 1] 20 $elect_timeout]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+
+ # Client #1 has priority 100; everyone else
+ # has priority 10.
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+ set elect_pipe(M) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc reptwo { args } { eval rep002 $args }
diff --git a/storage/bdb/test/rep003.tcl b/storage/bdb/test/rep003.tcl
new file mode 100644
index 00000000000..7bb7e00ddbf
--- /dev/null
+++ b/storage/bdb/test/rep003.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep003.tcl,v 11.9 2002/08/09 02:23:50 margo Exp $
+#
+# TEST rep003
+# TEST Repeated shutdown/restart replication test
+# TEST
+# TEST Run a quick put test in a replicated master environment; start up,
+# TEST shut down, and restart client processes, with and without recovery.
+# TEST To ensure that environment state is transient, use DB_PRIVATE.
+
+proc rep003 { method { tnum "03" } args } {
+ source ./include.tcl
+ global testdir rep003_dbname rep003_omethod rep003_oargs
+
+ env_cleanup $testdir
+ set niter 10
+ set rep003_dbname rep003.db
+
+ if { [is_record_based $method] } {
+ puts "Rep0$tnum: Skipping for method $method"
+ return
+ }
+
+ set rep003_omethod [convert_method $method]
+ set rep003_oargs [convert_args $method $args]
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+
+ puts "Rep0$tnum: Replication repeated-startup test"
+
+ # Open a master.
+ repladd 1
+ set masterenv [berkdb_env_noerr -create -log_max 1000000 \
+ -home $masterdir -txn -rep_master -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ puts "\tRep0$tnum.a: Simple client startup test."
+
+ # Put item one.
+ rep003_put $masterenv A1 a-one
+
+ # Open a client.
+ repladd 2
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ # Put another quick item.
+ rep003_put $masterenv A2 a-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+ replclear 2
+
+ # Now reopen the client after doing another put.
+ puts "\tRep0$tnum.b: Client restart."
+ rep003_put $masterenv B1 b-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv B2 b-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a recovery.
+ puts "\tRep0$tnum.c: Client restart after recovery."
+ rep003_put $masterenv C1 c-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv C2 c-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ replclear 2
+
+ # Now reopen the client after a catastrophic recovery.
+ puts "\tRep0$tnum.d: Client restart after catastrophic recovery."
+ rep003_put $masterenv D1 d-one
+
+ unset clientenv
+ set clientenv [berkdb_env_noerr -create -private -home $clientdir -txn \
+ -recover_fatal -rep_client -rep_transport [list 2 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+ rep003_put $masterenv D2 d-two
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ # The items from part A should be present at all times--
+ # if we roll them back, we've screwed up. [#5709]
+ rep003_check $clientenv A1 a-one
+ rep003_check $clientenv A2 a-two
+ rep003_check $clientenv B1 b-one
+ rep003_check $clientenv B2 b-two
+ rep003_check $clientenv C1 c-one
+ rep003_check $clientenv C2 c-two
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $clientenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ rep003_check $clientenv D1 d-one
+ rep003_check $clientenv D2 d-two
+
+ error_check_good clientenv_close [$clientenv close] 0
+
+ error_check_good masterenv_close [$masterenv close] 0
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep003_put { masterenv key data } {
+ global rep003_dbname rep003_omethod rep003_oargs
+
+ set db [eval {berkdb_open_noerr -create -env $masterenv -auto_commit} \
+ $rep003_omethod $rep003_oargs $rep003_dbname]
+ error_check_good rep3_put_open($key,$data) [is_valid_db $db] TRUE
+
+ set txn [$masterenv txn]
+ error_check_good rep3_put($key,$data) [$db put -txn $txn $key $data] 0
+ error_check_good rep3_put_txn_commit($key,$data) [$txn commit] 0
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
+
+proc rep003_check { env key data } {
+ global rep003_dbname
+
+ set db [berkdb_open_noerr -rdonly -env $env $rep003_dbname]
+ error_check_good rep3_check_open($key,$data) [is_valid_db $db] TRUE
+
+ set dbt [$db get $key]
+ error_check_good rep3_check($key,$data) \
+ [lindex [lindex $dbt 0] 1] $data
+
+ error_check_good rep3_put_close($key,$data) [$db close] 0
+}
diff --git a/storage/bdb/test/rep004.tcl b/storage/bdb/test/rep004.tcl
new file mode 100644
index 00000000000..e1d4d3b65c7
--- /dev/null
+++ b/storage/bdb/test/rep004.tcl
@@ -0,0 +1,198 @@
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep004.tcl,v 1.5 2002/08/08 18:13:12 sue Exp $
+#
+# TEST rep004
+# TEST Test of DB_REP_LOGSONLY.
+# TEST
+# TEST Run a quick put test in a master environment that has one logs-only
+# TEST client. Shut down, then run catastrophic recovery in the logs-only
+# TEST client and check that the database is present and populated.
+
+proc rep004 { method { nitems 10 } { tnum "04" } args } {
+ source ./include.tcl
+ global testdir
+
+ env_cleanup $testdir
+ set dbname rep0$tnum.db
+
+ set omethod [convert_method $method]
+ set oargs [convert_args $method $args]
+
+ puts "Rep0$tnum: Test of logs-only replication clients"
+
+ replsetup $testdir/MSGQUEUEDIR
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ set clientdir $testdir/CLIENTDIR
+ file mkdir $clientdir
+ set logsonlydir $testdir/LOGSONLYDIR
+ file mkdir $logsonlydir
+
+ # Open a master, a logsonly replica, and a normal client.
+ repladd 1
+ set masterenv [berkdb_env -create -home $masterdir -txn -rep_master \
+ -rep_transport [list 1 replsend]]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ repladd 2
+ set loenv [berkdb_env -create -home $logsonlydir -txn -rep_logsonly \
+ -rep_transport [list 2 replsend]]
+ error_check_good logsonly_env [is_valid_env $loenv] TRUE
+
+ repladd 3
+ set clientenv [berkdb_env -create -home $clientdir -txn -rep_client \
+ -rep_transport [list 3 replsend]]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+
+
+ puts "\tRep0$tnum.a: Populate database."
+
+ set db [eval {berkdb open -create -mode 0644 -auto_commit} \
+ -env $masterenv $oargs $omethod $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ }
+
+ puts "\tRep0$tnum.b: Sync up clients."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+ incr nproced [replprocessqueue $clientenv 3]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ puts "\tRep0$tnum.c: Get master and logs-only client ahead."
+ set newcount 0
+ while { [gets $did str] != -1 && $newcount < $nitems } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set data $str
+ } else {
+ set key $str
+ set data [reverse $str]
+ }
+ set kvals($count) $key
+ set dvals($count) [pad_data $method $data]
+
+ set txn [$masterenv txn]
+ error_check_good txn($count) [is_valid_txn $txn $masterenv] TRUE
+
+ set ret [eval \
+ {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good put($count) $ret 0
+
+ error_check_good commit($count) [$txn commit] 0
+
+ incr count
+ incr newcount
+ }
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tRep0$tnum.d: Sync up logs-only client only, then fail over."
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+
+ # "Crash" the master, and fail over to the upgradeable client.
+ error_check_good masterenv_close [$masterenv close] 0
+ replclear 3
+
+ error_check_good upgrade_client [$clientenv rep_start -master] 0
+ set donenow 0
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $clientenv 3]
+ incr nproced [replprocessqueue $loenv 2]
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good loenv_close [$loenv close] 0
+
+ puts "\tRep0$tnum.e: Run catastrophic recovery on logs-only client."
+ set loenv [berkdb_env -create -home $logsonlydir -txn -recover_fatal]
+
+ puts "\tRep0$tnum.f: Verify logs-only client contents."
+ set lodb [eval {berkdb open} -env $loenv $oargs $omethod $dbname]
+ set loc [$lodb cursor]
+
+ set cdb [eval {berkdb open} -env $clientenv $oargs $omethod $dbname]
+ set cc [$cdb cursor]
+
+ # Make sure new master and recovered logs-only replica match.
+ for { set cdbt [$cc get -first] } \
+ { [llength $cdbt] > 0 } { set cdbt [$cc get -next] } {
+ set lodbt [$loc get -next]
+
+ error_check_good newmaster_replica_match $cdbt $lodbt
+ }
+
+ # Reset new master cursor.
+ error_check_good cc_close [$cc close] 0
+ set cc [$cdb cursor]
+
+ for { set lodbt [$loc get -first] } \
+ { [llength $lodbt] > 0 } { set lodbt [$loc get -next] } {
+ set cdbt [$cc get -next]
+
+ error_check_good replica_newmaster_match $lodbt $cdbt
+ }
+
+ error_check_good loc_close [$loc close] 0
+ error_check_good lodb_close [$lodb close] 0
+ error_check_good loenv_close [$loenv close] 0
+
+ error_check_good cc_close [$cc close] 0
+ error_check_good cdb_close [$cdb close] 0
+ error_check_good clientenv_close [$clientenv close] 0
+
+ close $did
+
+ replclose $testdir/MSGQUEUEDIR
+}
diff --git a/storage/bdb/test/rep005.tcl b/storage/bdb/test/rep005.tcl
new file mode 100644
index 00000000000..e0515f1cd62
--- /dev/null
+++ b/storage/bdb/test/rep005.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rep005.tcl,v 11.3 2002/08/08 18:13:13 sue Exp $
+#
+# TEST rep005
+# TEST Replication election test with error handling.
+# TEST
+# TEST Run a modified version of test001 in a replicated master environment;
+# TEST hold an election among a group of clients to make sure they select
+# TEST a proper master from amongst themselves, forcing errors at various
+# TEST locations in the election path.
+
+proc rep005 { method { niter 10 } { tnum "05" } args } {
+ source ./include.tcl
+
+ if { [is_record_based $method] == 1 } {
+ puts "Rep005: Skipping for method $method."
+ return
+ }
+
+ set nclients 3
+ env_cleanup $testdir
+
+ set qdir $testdir/MSGQUEUEDIR
+ replsetup $qdir
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ puts "Rep0$tnum: Replication election test with $nclients clients."
+
+ # Open a master.
+ repladd 1
+ set env_cmd(M) "berkdb_env -create -log_max 1000000 -home \
+ $masterdir -txn -rep_master -rep_transport \[list 1 replsend\]"
+ set masterenv [eval $env_cmd(M)]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+
+ # Open the clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set env_cmd($i) "berkdb_env -create -home $clientdir($i) \
+ -txn -rep_client -rep_transport \[list $envid replsend\]"
+ set clientenv($i) [eval $env_cmd($i)]
+ error_check_good \
+ client_env($i) [is_valid_env $clientenv($i)] TRUE
+ }
+
+ # Run a modified test001 in the master.
+ puts "\tRep0$tnum.a: Running test001 in replicated env."
+ eval test001 $method $niter 0 $tnum 0 -env $masterenv $args
+
+ # Loop, processing first the master's messages, then the client's,
+ # until both queues are empty.
+ while { 1 } {
+ set nproced 0
+
+ incr nproced [replprocessqueue $masterenv 1]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ incr nproced [replprocessqueue $clientenv($i) $envid]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ # Verify the database in the client dir.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\tRep0$tnum.b: Verifying contents of client database $i."
+ set testdir [get_home $masterenv]
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ open_and_dump_file test0$tnum.db $clientenv($i) $testdir/t1 \
+ test001.check dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+
+ verify_dir $clientdir($i) "\tRep0$tnum.c: " 0 0 1
+ }
+
+ # Make sure all the clients are synced up and ready to be good
+ # voting citizens.
+ error_check_good master_flush [$masterenv rep_flush] 0
+ while { 1 } {
+ set nproced 0
+ incr nproced [replprocessqueue $masterenv 1 0]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ incr nproced [replprocessqueue $clientenv($i) \
+ [expr $i + 2] 0]
+ }
+
+ if { $nproced == 0 } {
+ break
+ }
+ }
+
+ error_check_good masterenv_close [$masterenv close] 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ replclear [expr $i + 2]
+ }
+ #
+ # We set up the error list for each client. We know that the
+ # first client is the one calling the election, therefore, add
+ # the error location on sending the message (electsend) for that one.
+ set m "Rep0$tnum"
+ set count 0
+ foreach c0 { electinit electsend electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c1 { electinit electvote1 electwait1 electvote2 \
+ electwait2 } {
+ foreach c2 { electinit electvote1 electwait1 \
+ electvote2 electwait2 } {
+ set elist [list $c0 $c1 $c2]
+ rep005_elect env_cmd clientenv $qdir $m \
+ $count $elist
+ incr count
+ }
+ }
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ error_check_good clientenv_close($i) [$clientenv($i) close] 0
+ }
+
+ replclose $testdir/MSGQUEUEDIR
+}
+
+proc rep005_elect { ecmd cenv qdir msg count elist } {
+ global elect_timeout
+ upvar $ecmd env_cmd
+ upvar $cenv clientenv
+
+ set elect_timeout 1000000
+ set nclients [llength $elist]
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set err_cmd($i) [lindex $elist $i]
+ }
+ puts "\t$msg.d.$count: Starting election with errors $elist"
+ set elect_pipe(0) [start_election $qdir $env_cmd(0) \
+ [expr $nclients + 1] 20 $elect_timeout $err_cmd(0)]
+
+ tclsleep 1
+
+ # Process messages, and verify that the client with the highest
+ # priority--client #1--wins.
+ set got_newmaster 0
+ set tries 10
+ while { 1 } {
+ set nproced 0
+ set he 0
+ set nm 0
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set he 0
+ set envid [expr $i + 2]
+# puts "Processing queue for client $i"
+ incr nproced \
+ [replprocessqueue $clientenv($i) $envid 0 he nm]
+ if { $he == 1 } {
+ # Client #1 has priority 100; everyone else
+ if { $i == 1 } {
+ set pri 100
+ } else {
+ set pri 10
+ }
+ # error_check_bad client(0)_in_elect $i 0
+# puts "Starting election on client $i"
+ set elect_pipe($i) [start_election $qdir \
+ $env_cmd($i) [expr $nclients + 1] $pri \
+ $elect_timeout $err_cmd($i)]
+ set got_hold_elect($i) 1
+ }
+ if { $nm != 0 } {
+ error_check_good newmaster_is_master $nm \
+ [expr 1 + 2]
+ set got_newmaster $nm
+
+ # If this env is the new master, it needs to
+ # configure itself as such--this is a different
+ # env handle from the one that performed the
+ # election.
+ if { $nm == $envid } {
+ error_check_good make_master($i) \
+ [$clientenv($i) rep_start -master] \
+ 0
+ }
+ }
+ }
+
+ # We need to wait around to make doubly sure that the
+ # election has finished...
+ if { $nproced == 0 } {
+ incr tries -1
+ if { $tries == 0 } {
+ break
+ } else {
+ tclsleep 1
+ }
+ }
+ }
+
+ # Verify that client #1 is actually the winner.
+ error_check_good "client 1 wins" $got_newmaster [expr 1 + 2]
+
+ cleanup_elections
+
+}
diff --git a/storage/bdb/test/reputils.tcl b/storage/bdb/test/reputils.tcl
new file mode 100644
index 00000000000..340e359f26d
--- /dev/null
+++ b/storage/bdb/test/reputils.tcl
@@ -0,0 +1,659 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: reputils.tcl,v 11.34 2002/08/12 17:54:18 sandstro Exp $
+#
+# Replication testing utilities
+
+# Environment handle for the env containing the replication "communications
+# structure" (really a CDB environment).
+
+# The test environment consists of a queue and a # directory (environment)
+# per replication site. The queue is used to hold messages destined for a
+# particular site and the directory will contain the environment for the
+# site. So the environment looks like:
+# $testdir
+# ___________|______________________________
+# / | \ \
+# MSGQUEUEDIR MASTERDIR CLIENTDIR.0 ... CLIENTDIR.N-1
+# | | ... |
+# 1 2 .. N+1
+#
+# The master is site 1 in the MSGQUEUEDIR and clients 1-N map to message
+# queues 2 - N+1.
+#
+# The globals repenv(1-N) contain the environment handles for the sites
+# with a given id (i.e., repenv(1) is the master's environment.
+
+global queueenv
+
+# Array of DB handles, one per machine ID, for the databases that contain
+# messages.
+global queuedbs
+global machids
+
+global elect_timeout
+set elect_timeout 50000000
+set drop 0
+
+# Create the directory structure for replication testing.
+# Open the master and client environments; store these in the global repenv
+# Return the master's environment: "-env masterenv"
+#
+proc repl_envsetup { envargs largs tnum {nclients 1} {droppct 0} { oob 0 } } {
+ source ./include.tcl
+ global clientdir
+ global drop drop_msg
+ global masterdir
+ global repenv
+ global testdir
+
+ env_cleanup $testdir
+
+ replsetup $testdir/MSGQUEUEDIR
+
+ set masterdir $testdir/MASTERDIR
+ file mkdir $masterdir
+ if { $droppct != 0 } {
+ set drop 1
+ set drop_msg [expr 100 / $droppct]
+ } else {
+ set drop 0
+ }
+
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientdir($i) $testdir/CLIENTDIR.$i
+ file mkdir $clientdir($i)
+ }
+
+ # Open a master.
+ repladd 1
+ #
+ # Set log smaller than default to force changing files,
+ # but big enough so that the tests that use binary files
+ # as keys/data can run.
+ #
+ set lmax [expr 3 * 1024 * 1024]
+ set masterenv [eval {berkdb_env -create -log_max $lmax} $envargs \
+ {-home $masterdir -txn -rep_master -rep_transport \
+ [list 1 replsend]}]
+ error_check_good master_env [is_valid_env $masterenv] TRUE
+ set repenv(master) $masterenv
+
+ # Open clients
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ repladd $envid
+ set clientenv [eval {berkdb_env -create} $envargs -txn \
+ {-cachesize { 0 10000000 0 }} -lock_max 10000 \
+ {-home $clientdir($i) -rep_client -rep_transport \
+ [list $envid replsend]}]
+ error_check_good client_env [is_valid_env $clientenv] TRUE
+ set repenv($i) $clientenv
+ }
+ set repenv($i) NULL
+ append largs " -env $masterenv "
+
+ # Process startup messages
+ repl_envprocq $tnum $nclients $oob
+
+ return $largs
+}
+
+# Process all incoming messages. Iterate until there are no messages left
+# in anyone's queue so that we capture all message exchanges. We verify that
+# the requested number of clients matches the number of client environments
+# we have. The oob parameter indicates if we should process the queue
+# with out-of-order delivery. The replprocess procedure actually does
+# the real work of processing the queue -- this routine simply iterates
+# over the various queues and does the initial setup.
+
+proc repl_envprocq { tnum { nclients 1 } { oob 0 }} {
+ global repenv
+ global drop
+
+ set masterenv $repenv(master)
+ for { set i 0 } { 1 } { incr i } {
+ if { $repenv($i) == "NULL"} {
+ break
+ }
+ }
+ error_check_good i_nclients $nclients $i
+
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts -nonewline "\t$name: Processing master/$i client queues"
+ set rand_skip 0
+ if { $oob } {
+ puts " out-of-order"
+ } else {
+ puts " in order"
+ }
+ set do_check 1
+ set droprestore $drop
+ while { 1 } {
+ set nproced 0
+
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ incr nproced [replprocessqueue $masterenv 1 $rand_skip]
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set envid [expr $i + 2]
+ if { $oob } {
+ set rand_skip [berkdb random_int 2 10]
+ }
+ set n [replprocessqueue $repenv($i) \
+ $envid $rand_skip]
+ incr nproced $n
+ }
+
+ if { $nproced == 0 } {
+ # Now that we delay requesting records until
+ # we've had a few records go by, we should always
+ # see that the number of requests is lower than the
+ # number of messages that were enqueued.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ set stats [$clientenv rep_stat]
+ set queued [getstats $stats \
+ {Total log records queued}]
+ error_check_bad queued_stats \
+ $queued -1
+ set requested [getstats $stats \
+ {Log records requested}]
+ error_check_bad requested_stats \
+ $requested -1
+ if { $queued != 0 && $do_check != 0 } {
+ error_check_good num_requested \
+ [expr $requested < $queued] 1
+ }
+
+ $clientenv rep_request 1 1
+ }
+
+ # If we were dropping messages, we might need
+ # to flush the log so that we get everything
+ # and end up in the right state.
+ if { $drop != 0 } {
+ set drop 0
+ set do_check 0
+ $masterenv rep_flush
+ berkdb debug_check
+ puts "\t$name: Flushing Master"
+ } else {
+ break
+ }
+ }
+ }
+
+ # Reset the clients back to the default state in case we
+ # have more processing to do.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ set clientenv $repenv($i)
+ $clientenv rep_request 4 128
+ }
+ set drop $droprestore
+}
+
+# Verify that the directories in the master are exactly replicated in
+# each of the client environments.
+
+proc repl_envver0 { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Verify the database in the client dir.
+ # First dump the master.
+ set t1 $masterdir/t1
+ set t2 $masterdir/t2
+ set t3 $masterdir/t3
+ set omethod [convert_method $method]
+ set name [format "Repl%03d" $tnum]
+
+ #
+ # We are interested in the keys of whatever databases are present
+ # in the master environment, so we just call a no-op check function
+ # since we have no idea what the contents of this database really is.
+ # We just need to walk the master and the clients and make sure they
+ # have the same contents.
+ #
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ open_and_dump_file $testfile $repenv(master) $masterdir/t2 \
+ repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare [convert_method $method] -recno] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ }
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying client $i database \
+ $testfile contents."
+ open_and_dump_file $testfile $repenv($i) \
+ $t1 repl_noop dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ } else {
+ catch {file copy -force $t1 $t3} ret
+ }
+ error_check_good diff_files($t2,$t3) [filecmp $t2 $t3] 0
+ }
+ }
+}
+
+# Remove all the elements from the master and verify that these
+# deletions properly propagated to the clients.
+
+proc repl_verdel { tnum method { nclients 1 } } {
+ global clientdir
+ global masterdir
+ global repenv
+
+ # Delete all items in the master.
+ set name [format "Repl%03d" $tnum]
+ set cwd [pwd]
+ cd $masterdir
+ set stat [catch {glob test*.db} dbs]
+ cd $cwd
+ if { $stat == 1 } {
+ return
+ }
+ foreach testfile $dbs {
+ puts "\t$name: Deleting all items from the master."
+ set txn [$repenv(master) txn]
+ error_check_good txn_begin [is_valid_txn $txn \
+ $repenv(master)] TRUE
+ set db [berkdb_open -txn $txn -env $repenv(master) $testfile]
+ error_check_good reopen_master [is_valid_db $db] TRUE
+ set dbc [$db cursor -txn $txn]
+ error_check_good reopen_master_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -next] } {
+ error_check_good del_item [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+
+ repl_envprocq $tnum $nclients
+
+ # Check clients.
+ for { set i 0 } { $i < $nclients } { incr i } {
+ puts "\t$name: Verifying emptiness of client database $i."
+
+ set db [berkdb_open -env $repenv($i) $testfile]
+ error_check_good reopen_client($i) \
+ [is_valid_db $db] TRUE
+ set dbc [$db cursor]
+ error_check_good reopen_client_cursor($i) \
+ [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good client($i)_empty \
+ [llength [$dbc get -first]] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+ }
+}
+
+# Replication "check" function for the dump procs that expect to
+# be able to verify the keys and data.
+proc repl_noop { k d } {
+ return
+}
+
+# Close all the master and client environments in a replication test directory.
+proc repl_envclose { tnum envargs } {
+ source ./include.tcl
+ global clientdir
+ global encrypt
+ global masterdir
+ global repenv
+ global testdir
+
+ if { [lsearch $envargs "-encrypta*"] !=-1 } {
+ set encrypt 1
+ }
+
+ # In order to make sure that we have fully-synced and ready-to-verify
+ # databases on all the clients, do a checkpoint on the master and
+ # process messages in order to flush all the clients.
+ set drop 0
+ set do_check 0
+ set name [format "Repl%03d" $tnum]
+ berkdb debug_check
+ puts "\t$name: Checkpointing master."
+ error_check_good masterenv_ckp [$repenv(master) txn_checkpoint] 0
+
+ # Count clients.
+ for { set ncli 0 } { 1 } { incr ncli } {
+ if { $repenv($ncli) == "NULL" } {
+ break
+ }
+ }
+ repl_envprocq $tnum $ncli
+
+ error_check_good masterenv_close [$repenv(master) close] 0
+ verify_dir $masterdir "\t$name: " 0 0 1
+ for { set i 0 } { $i < $ncli } { incr i } {
+ error_check_good client($i)_close [$repenv($i) close] 0
+ verify_dir $clientdir($i) "\t$name: " 0 0 1
+ }
+ replclose $testdir/MSGQUEUEDIR
+
+}
+
+# Close up a replication group
+proc replclose { queuedir } {
+ global queueenv queuedbs machids
+
+ foreach m $machids {
+ set db $queuedbs($m)
+ error_check_good dbr_close [$db close] 0
+ }
+ error_check_good qenv_close [$queueenv close] 0
+ set machids {}
+}
+
+# Create a replication group for testing.
+proc replsetup { queuedir } {
+ global queueenv queuedbs machids
+
+ file mkdir $queuedir
+ set queueenv \
+ [berkdb_env -create -txn -lock_max 20000 -home $queuedir]
+ error_check_good queueenv [is_valid_env $queueenv] TRUE
+
+ if { [info exists queuedbs] } {
+ unset queuedbs
+ }
+ set machids {}
+
+ return $queueenv
+}
+
+# Send function for replication.
+proc replsend { control rec fromid toid } {
+ global queuedbs queueenv machids
+ global drop drop_msg
+
+ #
+ # If we are testing with dropped messages, then we drop every
+ # $drop_msg time. If we do that just return 0 and don't do
+ # anything.
+ #
+ if { $drop != 0 } {
+ incr drop
+ if { $drop == $drop_msg } {
+ set drop 1
+ return 0
+ }
+ }
+ # XXX
+ # -1 is DB_BROADCAST_MID
+ if { $toid == -1 } {
+ set machlist $machids
+ } else {
+ if { [info exists queuedbs($toid)] != 1 } {
+ error "replsend: machid $toid not found"
+ }
+ set machlist [list $toid]
+ }
+
+ foreach m $machlist {
+ # XXX should a broadcast include to "self"?
+ if { $m == $fromid } {
+ continue
+ }
+
+ set db $queuedbs($m)
+ set txn [$queueenv txn]
+ $db put -txn $txn -append [list $control $rec $fromid]
+ error_check_good replsend_commit [$txn commit] 0
+ }
+
+ return 0
+}
+
+# Nuke all the pending messages for a particular site.
+proc replclear { machid } {
+ global queuedbs queueenv
+
+ if { [info exists queuedbs($machid)] != 1 } {
+ error "FAIL: replclear: machid $machid not found"
+ }
+
+ set db $queuedbs($machid)
+ set txn [$queueenv txn]
+ set dbc [$db cursor -txn $txn]
+ for { set dbt [$dbc get -rmw -first] } { [llength $dbt] > 0 } \
+ { set dbt [$dbc get -rmw -next] } {
+ error_check_good replclear($machid)_del [$dbc del] 0
+ }
+ error_check_good replclear($machid)_dbc_close [$dbc close] 0
+ error_check_good replclear($machid)_txn_commit [$txn commit] 0
+}
+
+# Add a machine to a replication environment.
+proc repladd { machid } {
+ global queueenv queuedbs machids
+
+ if { [info exists queuedbs($machid)] == 1 } {
+ error "FAIL: repladd: machid $machid already exists"
+ }
+
+ set queuedbs($machid) [berkdb open -auto_commit \
+ -env $queueenv -create -recno -renumber repqueue$machid.db]
+ error_check_good repqueue_create [is_valid_db $queuedbs($machid)] TRUE
+
+ lappend machids $machid
+}
+
+# Process a queue of messages, skipping every "skip_interval" entry.
+# We traverse the entire queue, but since we skip some messages, we
+# may end up leaving things in the queue, which should get picked up
+# on a later run.
+
+proc replprocessqueue { dbenv machid { skip_interval 0 } \
+ { hold_electp NONE } { newmasterp NONE } } {
+ global queuedbs queueenv errorCode
+
+ # hold_electp is a call-by-reference variable which lets our caller
+ # know we need to hold an election.
+ if { [string compare $hold_electp NONE] != 0 } {
+ upvar $hold_electp hold_elect
+ }
+ set hold_elect 0
+
+ # newmasterp is the same idea, only returning the ID of a master
+ # given in a DB_REP_NEWMASTER return.
+ if { [string compare $newmasterp NONE] != 0 } {
+ upvar $newmasterp newmaster
+ }
+ set newmaster 0
+
+ set nproced 0
+
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+
+ error_check_good process_dbc($machid) \
+ [is_valid_cursor $dbc $queuedbs($machid)] TRUE
+
+ for { set dbt [$dbc get -first] } \
+ { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set data [lindex [lindex $dbt 0] 1]
+
+ # If skip_interval is nonzero, we want to process messages
+ # out of order. We do this in a simple but slimy way--
+ # continue walking with the cursor without processing the
+ # message or deleting it from the queue, but do increment
+ # "nproced". The way this proc is normally used, the
+ # precise value of nproced doesn't matter--we just don't
+ # assume the queues are empty if it's nonzero. Thus,
+ # if we contrive to make sure it's nonzero, we'll always
+ # come back to records we've skipped on a later call
+ # to replprocessqueue. (If there really are no records,
+ # we'll never get here.)
+ #
+ # Skip every skip_interval'th record (and use a remainder other
+ # than zero so that we're guaranteed to really process at least
+ # one record on every call).
+ if { $skip_interval != 0 } {
+ if { $nproced % $skip_interval == 1 } {
+ incr nproced
+ continue
+ }
+ }
+
+ # We have to play an ugly cursor game here: we currently
+ # hold a lock on the page of messages, but rep_process_message
+ # might need to lock the page with a different cursor in
+ # order to send a response. So save our recno, close
+ # the cursor, and then reopen and reset the cursor.
+ set recno [lindex [lindex $dbt 0] 0]
+ error_check_good dbc_process_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+ set ret [catch {$dbenv rep_process_message \
+ [lindex $data 2] [lindex $data 0] [lindex $data 1]} res]
+ set txn [$queueenv txn]
+ set dbc [$queuedbs($machid) cursor -txn $txn]
+ set dbt [$dbc get -set $recno]
+
+ if { $ret != 0 } {
+ if { [is_substr $res DB_REP_HOLDELECTION] } {
+ set hold_elect 1
+ } else {
+ error "FAIL:[timestamp]\
+ rep_process_message returned $res"
+ }
+ }
+
+ incr nproced
+
+ $dbc del
+
+ if { $ret == 0 && $res != 0 } {
+ if { [is_substr $res DB_REP_NEWSITE] } {
+ # NEWSITE; do nothing.
+ } else {
+ set newmaster $res
+ # Break as soon as we get a NEWMASTER message;
+ # our caller needs to handle it.
+ break
+ }
+ }
+
+ if { $hold_elect == 1 } {
+ # Break also on a HOLDELECTION, for the same reason.
+ break
+ }
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good txn_commit [$txn commit] 0
+
+ # Return the number of messages processed.
+ return $nproced
+}
+
+set run_repl_flag "-run_repl"
+
+proc extract_repl_args { args } {
+ global run_repl_flag
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] == 0 } {
+ return [lindex $args [expr $i + 1]]
+ }
+ }
+ return ""
+}
+
+proc delete_repl_args { args } {
+ global run_repl_flag
+
+ set ret {}
+
+ for { set arg [lindex $args [set i 0]] } \
+ { [string length $arg] > 0 } \
+ { set arg [lindex $args [incr i]] } {
+ if { [string compare $arg $run_repl_flag] != 0 } {
+ lappend ret $arg
+ } else {
+ incr i
+ }
+ }
+ return $ret
+}
+
+global elect_serial
+global elections_in_progress
+set elect_serial 0
+
+# Start an election in a sub-process.
+proc start_election { qdir envstring nsites pri timeout {err "none"}} {
+ source ./include.tcl
+ global elect_serial elect_timeout elections_in_progress machids
+
+ incr elect_serial
+
+ set t [open "|$tclsh_path >& $testdir/ELECTION_OUTPUT.$elect_serial" w]
+
+ puts $t "source $test_path/test.tcl"
+ puts $t "replsetup $qdir"
+ foreach i $machids { puts $t "repladd $i" }
+ puts $t "set env_cmd \{$envstring\}"
+ puts $t "set dbenv \[eval \$env_cmd -errfile \
+ $testdir/ELECTION_ERRFILE.$elect_serial -errpfx FAIL: \]"
+# puts "Start election err $err, env $envstring"
+ puts $t "\$dbenv test abort $err"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ if { $err != "none" } {
+ puts $t "\$dbenv test abort none"
+ puts $t "set res \[catch \{\$dbenv rep_elect $nsites $pri \
+ $elect_timeout\} ret\]"
+ }
+ flush $t
+
+ set elections_in_progress($elect_serial) $t
+ return $elect_serial
+}
+
+proc close_election { i } {
+ global elections_in_progress
+ set t $elections_in_progress($i)
+ puts $t "\$dbenv close"
+ close $t
+ unset elections_in_progress($i)
+}
+
+proc cleanup_elections { } {
+ global elect_serial elections_in_progress
+
+ for { set i 0 } { $i <= $elect_serial } { incr i } {
+ if { [info exists elections_in_progress($i)] != 0 } {
+ close_election $i
+ }
+ }
+
+ set elect_serial 0
+}
diff --git a/storage/bdb/test/rpc001.tcl b/storage/bdb/test/rpc001.tcl
new file mode 100644
index 00000000000..1b65639014f
--- /dev/null
+++ b/storage/bdb/test/rpc001.tcl
@@ -0,0 +1,449 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc001.tcl,v 11.33 2002/07/25 22:57:32 mjc Exp $
+#
+# TEST rpc001
+# TEST Test RPC server timeouts for cursor, txn and env handles.
+# TEST Test RPC specifics, primarily that unsupported functions return
+# TEST errors and such.
+proc rpc001 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ #
+ # First test timeouts on server.
+ #
+ set ttime 5
+ set itime 10
+ puts "Rpc001: Server timeouts: resource $ttime sec, idle $itime sec"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -t $ttime -I $itime &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -t $ttime -I $itime&]
+ }
+ puts "\tRpc001.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc001.b: Creating environment"
+
+ set testfile "rpc001.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc001.c: Opening a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set curs_list {}
+ set txn_list {}
+ puts "\tRpc001.d: Basic timeout test"
+ puts "\tRpc001.d1: Starting a transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ lappend txn_list $txn
+
+ puts "\tRpc001.d2: Open a cursor in that transaction"
+ set dbc [$db cursor -txn $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d3: Duplicate that cursor"
+ set dbc [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d4: Starting a nested transaction"
+ set txn [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+
+ puts "\tRpc001.d5: Create a cursor, no transaction"
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ lappend curs_list $dbc
+
+ puts "\tRpc001.d6: Timeout cursor and transactions"
+ set sleeptime [expr $ttime + 2]
+ tclsleep $sleeptime
+
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$db stat} ret]
+ error_check_good dbstat $stat 0
+
+ #
+ # Check that every handle we opened above is timed out
+ #
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ set txn_list {}
+ set ntxns 8
+ puts "\tRpc001.e: Nested ($ntxns x $ntxns) transaction activity test"
+ puts "\tRpc001.e1: Starting parent transaction"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ set parent_txn $txn
+
+ #
+ # First set a breadth of 'ntxns'
+ # We need 2 from this set for testing later on. Just set them
+ # up separately first.
+ #
+ puts "\tRpc001.e2: Creating $ntxns child transactions"
+ set child0 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set child1 [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+
+ for {set i 2} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $parent_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ }
+
+ #
+ # Now make one 'ntxns' deeply nested.
+ # Add one more for testing later on separately.
+ #
+ puts "\tRpc001.e3: Creating $ntxns nested child transactions"
+ for {set i 0} {$i < $ntxns} {incr i} {
+ set txn [$env txn -parent $last_txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set last_txn $txn
+ }
+ set last_parent $last_txn
+ set last_txn [$env txn -parent $last_parent]
+ error_check_good txn_begin [is_valid_txn $last_txn $env] TRUE
+
+ puts "\tRpc001.e4: Open a cursor in deepest transaction"
+ set dbc [$db cursor -txn $last_txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tRpc001.e5: Duplicate that cursor"
+ set dbcdup [$dbc dup]
+ error_check_good db_cursor [is_valid_cursor $dbcdup $db] TRUE
+ lappend curs_list $dbcdup
+
+ puts "\tRpc001.f: Timeout then activate duplicate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbcdup close} ret]
+ error_check_good dup_close:$dbcdup $stat 0
+ error_check_good dup_close:$dbcdup $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.g: Timeout, then activate cursor"
+ tclsleep $sleeptime
+ set stat [catch {$dbc close} ret]
+ error_check_good dbc_close:$dbc $stat 0
+ error_check_good dbc_close:$dbc $ret 0
+
+ #
+ # Make sure that our parent txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $parent_txn} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.h: Timeout, then activate child txn"
+ tclsleep $sleeptime
+ set stat [catch {$child0 commit} ret]
+ error_check_good child_commit $stat 0
+ error_check_good child_commit:$child0 $ret 0
+
+ #
+ #
+ # Make sure that our nested txn is not timed out. We will
+ # try to begin another child tnx using the parent. We expect
+ # that to succeed. Immediately commit that txn.
+ #
+ set stat [catch {$env txn -parent $last_parent} newchild]
+ error_check_good newchildtxn $stat 0
+ error_check_good newcommit [$newchild commit] 0
+
+ puts "\tRpc001.i: Timeout, then activate nested txn"
+ tclsleep $sleeptime
+ set stat [catch {$last_txn commit} ret]
+ error_check_good lasttxn_commit $stat 0
+ error_check_good lasttxn_commit:$child0 $ret 0
+
+ #
+ # Make sure that our child txn is not timed out. We should
+ # be able to commit it.
+ #
+ set stat [catch {$child1 commit} ret]
+ error_check_good child_commit:$child1 $stat 0
+ error_check_good child_commit:$child1 $ret 0
+
+ #
+ # Clean up. They were inserted in LIFO order, so we should
+ # just be able to commit them all.
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 0
+
+ rpc_timeoutjoin $env "Rpc001.j" $sleeptime 0
+ rpc_timeoutjoin $env "Rpc001.k" $sleeptime 1
+
+ #
+ # We need a 2nd env just to do an op to timeout the env.
+ # Make the flags different so we don't end up sharing a handle.
+ #
+ set env1 [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000}]
+ error_check_good lock_env:open [is_valid_env $env1] TRUE
+
+ puts "\tRpc001.l: Timeout idle env handle"
+ set sleeptime [expr $itime + 2]
+ tclsleep $sleeptime
+
+ set stat [catch {$env1 close} ret]
+ error_check_good env1_close $stat 0
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_timeout \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ tclkill $dpid
+}
+
+proc rpc_timeoutjoin {env msg sleeptime use_txn} {
+ #
+ # Check join cursors now.
+ #
+ puts -nonewline "\t$msg: Test join cursors and timeouts"
+ if { $use_txn } {
+ puts " (using txns)"
+ set txnflag "-auto_commit"
+ } else {
+ puts " (without txns)"
+ set txnflag ""
+ }
+ #
+ # Set up a simple set of join databases
+ #
+ puts "\t${msg}0: Set up join databases"
+ set fruit {
+ {blue blueberry}
+ {red apple} {red cherry} {red raspberry}
+ {yellow lemon} {yellow pear}
+ }
+ set price {
+ {expen blueberry} {expen cherry} {expen raspberry}
+ {inexp apple} {inexp lemon} {inexp pear}
+ }
+ set dessert {
+ {blueberry cobbler} {cherry cobbler} {pear cobbler}
+ {apple pie} {raspberry pie} {lemon pie}
+ }
+ set fdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup fruit.db]
+ error_check_good dbopen [is_valid_db $fdb] TRUE
+ set pdb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup price.db]
+ error_check_good dbopen [is_valid_db $pdb] TRUE
+ set ddb [eval {berkdb_open -create -btree -mode 0644} \
+ $txnflag -env $env -dup dessert.db]
+ error_check_good dbopen [is_valid_db $ddb] TRUE
+ foreach kd $fruit {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$fdb put} $txnflag {$k $d}]
+ error_check_good fruit_put $ret 0
+ }
+ error_check_good sync [$fdb sync] 0
+ foreach kd $price {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$pdb put} $txnflag {$k $d}]
+ error_check_good price_put $ret 0
+ }
+ error_check_good sync [$pdb sync] 0
+ foreach kd $dessert {
+ set k [lindex $kd 0]
+ set d [lindex $kd 1]
+ set ret [eval {$ddb put} $txnflag {$k $d}]
+ error_check_good dessert_put $ret 0
+ }
+ error_check_good sync [$ddb sync] 0
+
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 0
+ rpc_join $env $msg $sleeptime $fdb $pdb $ddb $use_txn 1
+
+ error_check_good ddb:close [$ddb close] 0
+ error_check_good pdb:close [$pdb close] 0
+ error_check_good fdb:close [$fdb close] 0
+}
+
+proc rpc_join {env msg sleep fdb pdb ddb use_txn op} {
+ global errorInfo
+
+ #
+ # Start a parent and child transaction. We'll do our join in
+ # the child transaction just to make sure everything gets timed
+ # out correctly.
+ #
+ set curs_list {}
+ set txn_list {}
+ set msgnum [expr $op * 2 + 1]
+ if { $use_txn } {
+ puts "\t$msg$msgnum: Set up txns and join cursor"
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+ set txn_list [linsert $txn_list 0 $txn]
+ set child0 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child0 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child0]
+ set child1 [$env txn -parent $txn]
+ error_check_good txn_begin [is_valid_txn $child1 $env] TRUE
+ set txn_list [linsert $txn_list 0 $child1]
+ set txncmd "-txn $child0"
+ } else {
+ puts "\t$msg$msgnum: Set up join cursor"
+ set txncmd ""
+ }
+
+ #
+ # Start a cursor, (using txn child0 in the fruit and price dbs, if
+ # needed). # Just pick something simple to join on.
+ # Then call join on the dessert db.
+ #
+ set fkey yellow
+ set pkey inexp
+ set fdbc [eval $fdb cursor $txncmd]
+ error_check_good fdb_cursor [is_valid_cursor $fdbc $fdb] TRUE
+ set ret [$fdbc get -set $fkey]
+ error_check_bad fget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good fget:set:key $k $fkey
+ set curs_list [linsert $curs_list 0 $fdbc]
+
+ set pdbc [eval $pdb cursor $txncmd]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set ret [$pdbc get -set $pkey]
+ error_check_bad pget:set [llength $ret] 0
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good pget:set:key $k $pkey
+ set curs_list [linsert $curs_list 0 $pdbc]
+
+ set jdbc [$ddb join $fdbc $pdbc]
+ error_check_good join_cursor [is_valid_cursor $jdbc $ddb] TRUE
+ set ret [$jdbc get]
+ error_check_bad jget [llength $ret] 0
+
+ set msgnum [expr $op * 2 + 2]
+ if { $op == 1 } {
+ puts -nonewline "\t$msg$msgnum: Timeout all cursors"
+ if { $use_txn } {
+ puts " and txns"
+ } else {
+ puts ""
+ }
+ } else {
+ puts "\t$msg$msgnum: Timeout, then activate join cursor"
+ }
+
+ tclsleep $sleep
+
+ if { $op == 1 } {
+ #
+ # Perform a generic db operations to cause the timeout routine
+ # to trigger.
+ #
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ #
+ # Check that join cursor is timed out.
+ #
+ set stat [catch {$jdbc close} ret]
+ error_check_good dbc_close:$jdbc $stat 1
+ error_check_good dbc_timeout:$jdbc \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+
+ #
+ # Now the server may or may not timeout constituent
+ # cursors when it times out the join cursor. So, just
+ # sleep again and then they should timeout.
+ #
+ tclsleep $sleep
+ set stat [catch {$fdb stat} ret]
+ error_check_good fdbstat $stat 0
+
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 1
+ error_check_good dbc_timeout:$c \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 1
+ error_check_good txn_timeout:$t \
+ [is_substr $errorInfo "DB_NOSERVER_ID"] 1
+ }
+ } else {
+ set stat [catch {$jdbc get} ret]
+ error_check_good jget.stat $stat 0
+ error_check_bad jget [llength $ret] 0
+ set curs_list [linsert $curs_list 0 $jdbc]
+ foreach c $curs_list {
+ set stat [catch {$c close} ret]
+ error_check_good dbc_close:$c $stat 0
+ error_check_good dbc_close:$c $ret 0
+ }
+
+ foreach t $txn_list {
+ set stat [catch {$t commit} ret]
+ error_check_good txn_commit:$t $stat 0
+ error_check_good txn_commit:$t $ret 0
+ }
+ }
+}
diff --git a/storage/bdb/test/rpc002.tcl b/storage/bdb/test/rpc002.tcl
new file mode 100644
index 00000000000..4b69265bf3a
--- /dev/null
+++ b/storage/bdb/test/rpc002.tcl
@@ -0,0 +1,143 @@
+# Sel the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc002.tcl,v 1.17 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc002
+# TEST Test invalid RPC functions and make sure we error them correctly
+proc rpc002 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ set testfile "rpc002.db"
+ set home [file tail $rpc_testdir]
+ #
+ # First start the server.
+ #
+ puts "Rpc002: Unsupported interface test"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc002.a: Started server, pid $dpid"
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ puts "\tRpc002.b: Unsupported env options"
+ #
+ # Test each "pre-open" option for env's. These need to be
+ # tested on the 'berkdb_env' line.
+ #
+ set rlist {
+ { "-data_dir $rpc_testdir" "Rpc002.b0"}
+ { "-log_buffer 512" "Rpc002.b1"}
+ { "-log_dir $rpc_testdir" "Rpc002.b2"}
+ { "-log_max 100" "Rpc002.b3"}
+ { "-lock_conflict {3 {0 0 0 0 0 1 0 1 1}}" "Rpc002.b4"}
+ { "-lock_detect default" "Rpc002.b5"}
+ { "-lock_max 100" "Rpc002.b6"}
+ { "-mmapsize 100" "Rpc002.b7"}
+ { "-shm_key 100" "Rpc002.b9"}
+ { "-tmp_dir $rpc_testdir" "Rpc002.b10"}
+ { "-txn_max 100" "Rpc002.b11"}
+ { "-txn_timestamp 100" "Rpc002.b12"}
+ { "-verbose {recovery on}" "Rpc002.b13"}
+ }
+
+ set e "berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn"
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $e $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+ }
+
+ #
+ # Open an env with all the subsystems (-txn implies all
+ # the rest)
+ #
+ puts "\tRpc002.c: Unsupported env related interfaces"
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000 -txn}]
+ error_check_good envopen [is_valid_env $env] TRUE
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ $testfile"
+ set db [eval $dbcmd]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ #
+ # Test each "post-open" option relating to envs, txns, locks,
+ # logs and mpools.
+ #
+ set rlist {
+ { " lock_detect default" "Rpc002.c0"}
+ { " lock_get read 1 $env" "Rpc002.c1"}
+ { " lock_id" "Rpc002.c2"}
+ { " lock_stat" "Rpc002.c3"}
+ { " lock_vec 1 {get $env read}" "Rpc002.c4"}
+ { " log_archive" "Rpc002.c5"}
+ { " log_file {0 0}" "Rpc002.c6"}
+ { " log_flush" "Rpc002.c7"}
+ { " log_cursor" "Rpc002.c8"}
+ { " log_stat" "Rpc002.c9"}
+ { " mpool -create -pagesize 512" "Rpc002.c10"}
+ { " mpool_stat" "Rpc002.c11"}
+ { " mpool_sync {0 0}" "Rpc002.c12"}
+ { " mpool_trickle 50" "Rpc002.c13"}
+ { " txn_checkpoint -min 1" "Rpc002.c14"}
+ { " txn_stat" "Rpc002.c15"}
+ }
+
+ foreach pair $rlist {
+ set cmd [lindex $pair 0]
+ set msg [lindex $pair 1]
+ puts "\t$msg: $cmd"
+
+ set stat [catch {eval $env $cmd} ret]
+ error_check_good $cmd $stat 1
+ error_check_good $cmd.err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+ }
+ error_check_good dbclose [$db close] 0
+
+ #
+ # The database operations that aren't supported are few
+ # because mostly they are the ones Tcl doesn't support
+ # either so we have no way to get at them. Test what we can.
+ #
+ puts "\tRpc002.d: Unsupported database related interfaces"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ #
+ puts "\tRpc002.d0: -cachesize"
+ set dbcmd "berkdb_open_noerr -create -btree -mode 0644 -env $env \
+ -cachesize {0 65536 0} $testfile"
+ set stat [catch {eval $dbcmd} ret]
+ error_check_good dbopen_cache $stat 1
+ error_check_good dbopen_cache_err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+
+ puts "\tRpc002.d1: Try to upgrade a database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set stat [catch {eval {berkdb upgrade -env} $env $testfile} ret]
+ error_check_good dbupgrade $stat 1
+ error_check_good dbupgrade_err \
+ [is_substr $errorInfo "meaningless in an RPC env"] 1
+
+ error_check_good envclose [$env close] 0
+
+ tclkill $dpid
+}
diff --git a/storage/bdb/test/rpc003.tcl b/storage/bdb/test/rpc003.tcl
new file mode 100644
index 00000000000..76f0dca6c07
--- /dev/null
+++ b/storage/bdb/test/rpc003.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc003.tcl,v 11.9 2002/07/16 20:53:03 bostic Exp $
+#
+# Test RPC and secondary indices.
+proc rpc003 { } {
+ source ./include.tcl
+ global dict nsecondaries
+ global rpc_svc
+
+ #
+ # First set up the files. Secondary indices only work readonly
+ # over RPC. So we need to create the databases first without
+ # RPC. Then run checking over RPC.
+ #
+ puts "Rpc003: Secondary indices over RPC"
+ if { [string compare $rpc_server "localhost"] != 0 } {
+ puts "Cannot run to non-local RPC server. Skipping."
+ return
+ }
+ cleanup $testdir NULL
+ puts "\tRpc003.a: Creating local secondary index databases"
+
+ # Primary method/args.
+ set pmethod btree
+ set pomethod [convert_method $pmethod]
+ set pargs ""
+ set methods {dbtree dbtree}
+ set argses [convert_argses $methods ""]
+ set omethods [convert_methods $methods]
+
+ set nentries 500
+
+ puts "\tRpc003.b: ($pmethod/$methods) $nentries equal key/data pairs"
+ set pname "primary003.db"
+ set snamebase "secondary003"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # We have set up our databases, so now start the server and
+ # read them over RPC.
+ #
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ puts "\tRpc003.c: Started server, pid $dpid"
+ tclsleep 2
+
+ set home [file tail $rpc_testdir]
+ set env [eval {berkdb_env_noerr -create -mode 0644 -home $home \
+ -server $rpc_server}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ #
+ # Attempt to send in a NULL callback to associate. It will fail
+ # if the primary and secondary are not both read-only.
+ #
+ set msg "\tRpc003.d"
+ puts "$msg: Using r/w primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.e"
+ puts "$msg: Using r/w primary and read-only secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod $pargs $pname"
+ set sopen "berkdb_open_noerr -env $env -rdonly \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ set msg "\tRpc003.f"
+ puts "$msg: Using read-only primary and r/w secondary"
+ set popen "berkdb_open_noerr -env $env $pomethod -rdonly $pargs $pname"
+ set sopen "berkdb_open_noerr -create -env $env \
+ [lindex $omethods 0] [lindex $argses 0] $snamebase.0.db"
+ rpc003_assoc_err $popen $sopen $msg
+
+ # Open and associate the secondaries
+ puts "\tRpc003.g: Checking secondaries, both read-only"
+ set pdb [eval {berkdb_open_noerr -env} $env \
+ -rdonly $pomethod $pargs $pname]
+ error_check_good primary_open2 [is_valid_db $pdb] TRUE
+
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -env} $env -rdonly \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open2($i) [is_valid_db $sdb] TRUE
+ error_check_good db_associate2($i) \
+ [eval {$pdb associate} "" $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Rpc003.h"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+
+ tclkill $dpid
+}
+
+proc rpc003_assoc_err { popen sopen msg } {
+ set pdb [eval $popen]
+ error_check_good assoc_err_popen [is_valid_db $pdb] TRUE
+
+ puts "$msg.0: NULL callback"
+ set sdb [eval $sopen]
+ error_check_good assoc_err_sopen [is_valid_db $sdb] TRUE
+ set stat [catch {eval {$pdb associate} "" $sdb} ret]
+ error_check_good db_associate:rdonly $stat 1
+ error_check_good db_associate:inval [is_substr $ret invalid] 1
+
+ puts "$msg.1: non-NULL callback"
+ set stat [catch {eval $pdb associate [callback_n 0] $sdb} ret]
+ error_check_good db_associate:callback $stat 1
+ error_check_good db_associate:rpc \
+ [is_substr $ret "not supported in RPC"] 1
+ error_check_good assoc_sclose [$sdb close] 0
+ error_check_good assoc_pclose [$pdb close] 0
+}
diff --git a/storage/bdb/test/rpc004.tcl b/storage/bdb/test/rpc004.tcl
new file mode 100644
index 00000000000..ca1462f3a89
--- /dev/null
+++ b/storage/bdb/test/rpc004.tcl
@@ -0,0 +1,76 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc004.tcl,v 11.6 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc004
+# TEST Test RPC server and security
+proc rpc004 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global passwd
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc004: RPC server + security"
+ cleanup $testdir NULL
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir -P $passwd &]
+ }
+ puts "\tRpc004.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc004.b: Creating environment"
+
+ set testfile "rpc004.db"
+ set testfile1 "rpc004a.db"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -encryptaes $passwd -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc004.c: Opening a non-encrypted database"
+ #
+ # NOTE: the type of database doesn't matter, just use btree.
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc004.d: Opening an encrypted database"
+ set db1 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env -encrypt $testfile1]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ puts "\tRpc004.e: Put/get on both databases"
+ set key "key"
+ set data "data"
+
+ set ret [$db put -txn $txn $key $data]
+ error_check_good db_put $ret 0
+ set ret [$db get -txn $txn $key]
+ error_check_good db_get $ret [list [list $key $data]]
+ set ret [$db1 put -txn $txn $key $data]
+ error_check_good db1_put $ret 0
+ set ret [$db1 get -txn $txn $key]
+ error_check_good db1_get $ret [list [list $key $data]]
+
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good db1_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ # Cleanup our environment because it's encrypted
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ tclkill $dpid
+}
diff --git a/storage/bdb/test/rpc005.tcl b/storage/bdb/test/rpc005.tcl
new file mode 100644
index 00000000000..f46e7355e5a
--- /dev/null
+++ b/storage/bdb/test/rpc005.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rpc005.tcl,v 11.4 2002/07/16 20:53:03 bostic Exp $
+#
+# TEST rpc005
+# TEST Test RPC server handle ID sharing
+proc rpc005 { } {
+ global __debug_on
+ global __debug_print
+ global errorInfo
+ global rpc_svc
+ source ./include.tcl
+
+ puts "Rpc005: RPC server handle sharing"
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc \
+ -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRpc005.a: Started server, pid $dpid"
+
+ tclsleep 2
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ puts "\tRpc005.b: Creating environment"
+
+ set testfile "rpc005.db"
+ set testfile1 "rpc005a.db"
+ set subdb1 "subdb1"
+ set subdb2 "subdb2"
+ set home [file tail $rpc_testdir]
+
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env] TRUE
+
+ puts "\tRpc005.c: Compare identical and different configured envs"
+ set env_ident [eval {berkdb_env -home $home \
+ -server $rpc_server -txn}]
+ error_check_good lock_env:open [is_valid_env $env_ident] TRUE
+
+ set env_diff [eval {berkdb_env -home $home \
+ -server $rpc_server -txn nosync}]
+ error_check_good lock_env:open [is_valid_env $env_diff] TRUE
+
+ error_check_good ident:id [$env rpcid] [$env_ident rpcid]
+ error_check_bad diff:id [$env rpcid] [$env_diff rpcid]
+
+ error_check_good envclose [$env_diff close] 0
+ error_check_good envclose [$env_ident close] 0
+
+ puts "\tRpc005.d: Opening a database"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tRpc005.e: Compare identical and different configured dbs"
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+
+ set db_diff [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly $testfile]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+
+ error_check_good ident:id [$db rpcid] [$db_ident rpcid]
+ error_check_bad diff:id [$db rpcid] [$db_diff rpcid]
+ error_check_good ident2:id [$db_diff rpcid] [$db_diff2 rpcid]
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db close] 0
+
+ puts "\tRpc005.f: Compare with a database and subdatabases"
+ set db [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set dbid [$db rpcid]
+
+ set db2 [eval {berkdb_open -auto_commit -create -btree -mode 0644} \
+ -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db2] TRUE
+ set db2id [$db2 rpcid]
+ error_check_bad 2subdb:id $dbid $db2id
+
+ set db_ident [eval {berkdb_open -btree} -env $env $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_ident] TRUE
+ set identid [$db_ident rpcid]
+
+ set db_ident2 [eval {berkdb_open -btree} -env $env $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_ident2] TRUE
+ set ident2id [$db_ident2 rpcid]
+
+ set db_diff1 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb1]
+ error_check_good dbopen [is_valid_db $db_diff1] TRUE
+ set diff1id [$db_diff1 rpcid]
+
+ set db_diff2 [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2] TRUE
+ set diff2id [$db_diff2 rpcid]
+
+ set db_diff [eval {berkdb_open -unknown} -env $env -rdonly $testfile1]
+ error_check_good dbopen [is_valid_db $db_diff] TRUE
+ set diffid [$db_diff rpcid]
+
+ set db_diff2a [eval {berkdb_open -btree} -env $env -rdonly \
+ $testfile1 $subdb2]
+ error_check_good dbopen [is_valid_db $db_diff2a] TRUE
+ set diff2aid [$db_diff2a rpcid]
+
+ error_check_good ident:id $dbid $identid
+ error_check_good ident2:id $db2id $ident2id
+ error_check_bad diff:id $dbid $diffid
+ error_check_bad diff2:id $db2id $diffid
+ error_check_bad diff3:id $diff2id $diffid
+ error_check_bad diff4:id $diff1id $diffid
+ error_check_good diff2a:id $diff2id $diff2aid
+
+ error_check_good db_close [$db_ident close] 0
+ error_check_good db_close [$db_ident2 close] 0
+ error_check_good db_close [$db_diff close] 0
+ error_check_good db_close [$db_diff1 close] 0
+ error_check_good db_close [$db_diff2 close] 0
+ error_check_good db_close [$db_diff2a close] 0
+ error_check_good db_close [$db2 close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+ tclkill $dpid
+}
diff --git a/storage/bdb/test/rsrc001.tcl b/storage/bdb/test/rsrc001.tcl
new file mode 100644
index 00000000000..1d57769fda2
--- /dev/null
+++ b/storage/bdb/test/rsrc001.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc001.tcl,v 11.23 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc001
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc001 { } {
+ source ./include.tcl
+
+ puts "Rsrc001: Basic recno backing file writeback tests"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ set rec1 "This is record 1"
+ set rec2 "This is record 2 This is record 2"
+ set rec3 "This is record 3 This is record 3 This is record 3"
+ set rec4 [replicate "This is record 4 " 512]
+
+ foreach testfile { "$testdir/rsrc001.db" "" } {
+
+ cleanup $testdir NULL
+
+ if { $testfile == "" } {
+ puts "Rsrc001: Testing with in-memory database."
+ } else {
+ puts "Rsrc001: Testing with disk-backed database."
+ }
+
+ # Create backing file for the empty-file test.
+ set oid1 [open $testdir/rsrc.txt w]
+ close $oid1
+
+ puts "\tRsrc001.a: Put to empty file."
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set txn ""
+
+ set ret [eval {$db put} $txn {1 $rec1}]
+ error_check_good put_to_empty $ret 0
+ error_check_good db_close [$db close] 0
+
+ # Now fill out the backing file and create the check file.
+ set oid1 [open $testdir/rsrc.txt a]
+ set oid2 [open $testdir/check.txt w]
+
+ # This one was already put into rsrc.txt.
+ puts $oid2 $rec1
+
+ # These weren't.
+ puts $oid1 $rec2
+ puts $oid2 $rec2
+ puts $oid1 $rec3
+ puts $oid2 $rec3
+ puts $oid1 $rec4
+ puts $oid2 $rec4
+ close $oid1
+ close $oid2
+
+ puts -nonewline "\tRsrc001.b: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ Rsrc001:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc001.c: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.d: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record (set 2) $i" $i]
+ puts $oid $rec
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.e: Put beyond end of file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ puts $oid ""
+ incr key
+ }
+ set rec "Last Record"
+ puts $oid $rec
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+
+ puts "\tRsrc001.f: Put beyond end of file, after reopen."
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Last record with reopen"
+ puts $oid $rec
+
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ puts "\tRsrc001.g:\
+ Put several beyond end of file, after reopen with snapshot."
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644\
+ -snapshot -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set rec "Really really last record with reopen"
+ puts $oid ""
+ puts $oid ""
+ puts $oid ""
+ puts $oid $rec
+
+ incr key
+ incr key
+ incr key
+ incr key
+
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno_with_reopen $ret 0
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ Rsrc001:diff($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc001.h: Verify proper syncing of changes on close."
+ error_check_good Rsrc001:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [replicate "New Record $i" $i]
+ puts $oid $rec
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return; we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc001:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good Rsrc001:diff($testdir/{rsrc,check}.txt) $ret 0
+ }
+}
+
+# Strip CRs from a record.
+# Needed on Windows when a file is created as text (with CR/LF)
+# but read as binary (where CR is read as a separate character)
+proc sanitize_record { rec } {
+ source ./include.tcl
+
+ if { $is_windows_test != 1 } {
+ return $rec
+ }
+ regsub -all \15 $rec "" data
+ return $data
+}
diff --git a/storage/bdb/test/rsrc002.tcl b/storage/bdb/test/rsrc002.tcl
new file mode 100644
index 00000000000..0cb3cf752e6
--- /dev/null
+++ b/storage/bdb/test/rsrc002.tcl
@@ -0,0 +1,66 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc002.tcl,v 11.14 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc002
+# TEST Recno backing file test #2: test of set_re_delim. Specify a backing
+# TEST file with colon-delimited records, and make sure they are correctly
+# TEST interpreted.
+proc rsrc002 { } {
+ source ./include.tcl
+
+ puts "Rsrc002: Alternate variable-length record delimiters."
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ foreach testfile { "$testdir/rsrc002.db" "" } {
+
+ cleanup $testdir NULL
+
+ # Create the starting files
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ puts -nonewline $oid1 "ostrich:emu:kiwi:moa:cassowary:rhea:"
+ puts -nonewline $oid2 "ostrich:emu:kiwi:penguin:cassowary:rhea:"
+ close $oid1
+ close $oid2
+
+ if { $testfile == "" } {
+ puts "Rsrc002: Testing with in-memory database."
+ } else {
+ puts "Rsrc002: Testing with disk-backed database."
+ }
+
+ puts "\tRsrc002.a: Read file, verify correctness."
+ set db [eval {berkdb_open -create -mode 0644 -delim 58 \
+ -recno -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (but we won't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -first]
+ error_check_good get_first $rec [list [list 1 "ostrich"]]
+ set rec [$dbc get -next]
+ error_check_good get_next $rec [list [list 2 "emu"]]
+
+ puts "\tRsrc002.b: Write record, verify correctness."
+
+ eval {$dbc get -set 4}
+ set ret [$dbc put -current "penguin"]
+ error_check_good dbc_put $ret 0
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+
+ error_check_good \
+ Rsrc002:diff($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+ }
+}
diff --git a/storage/bdb/test/rsrc003.tcl b/storage/bdb/test/rsrc003.tcl
new file mode 100644
index 00000000000..f357a1e7f80
--- /dev/null
+++ b/storage/bdb/test/rsrc003.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc003.tcl,v 11.5 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc003
+# TEST Recno backing file test. Try different patterns of adding
+# TEST records and making sure that the corresponding file matches.
+proc rsrc003 { } {
+ source ./include.tcl
+ global fixed_len
+
+ puts "Rsrc003: Basic recno backing file writeback tests fixed length"
+
+ # We run this test essentially twice, once with a db file
+ # and once without (an in-memory database).
+ #
+ # Then run with big fixed-length records
+ set rec1 "This is record 1"
+ set rec2 "This is record 2"
+ set rec3 "This is record 3"
+ set bigrec1 [replicate "This is record 1 " 512]
+ set bigrec2 [replicate "This is record 2 " 512]
+ set bigrec3 [replicate "This is record 3 " 512]
+
+ set orig_fixed_len $fixed_len
+ set rlist {
+ {{$rec1 $rec2 $rec3} "small records" }
+ {{$bigrec1 $bigrec2 $bigrec3} "large records" }}
+
+ foreach testfile { "$testdir/rsrc003.db" "" } {
+
+ foreach rec $rlist {
+ cleanup $testdir NULL
+
+ set recs [lindex $rec 0]
+ set msg [lindex $rec 1]
+ # Create the starting files
+ # Note that for the rest of the test, we are going
+ # to append a LF when we 'put' via DB to maintain
+ # file structure and allow us to use 'gets'.
+ set oid1 [open $testdir/rsrc.txt w]
+ set oid2 [open $testdir/check.txt w]
+ foreach record $recs {
+ set r [subst $record]
+ set fixed_len [string length $r]
+ puts $oid1 $r
+ puts $oid2 $r
+ }
+ close $oid1
+ close $oid2
+
+ set reclen [expr $fixed_len + 1]
+ if { $reclen > [string length $rec1] } {
+ set repl 512
+ } else {
+ set repl 2
+ }
+ if { $testfile == "" } {
+ puts \
+"Rsrc003: Testing with in-memory database with $msg."
+ } else {
+ puts \
+"Rsrc003: Testing with disk-backed database with $msg."
+ }
+
+ puts -nonewline \
+ "\tRsrc003.a: Read file, rewrite last record;"
+ puts " write it out and diff"
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record; replace it (don't change it).
+ # Then close the file and diff the two files.
+ set txn ""
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor \
+ [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last [llength [lindex $rec 0]] 2
+ set key [lindex [lindex $rec 0] 0]
+ set data [lindex [lindex $rec 0] 1]
+
+ # Get the last record from the text file
+ set oid [open $testdir/rsrc.txt]
+ set laststr ""
+ while { [gets $oid str] != -1 } {
+ append str \12
+ set laststr $str
+ }
+ close $oid
+ set data [sanitize_record $data]
+ error_check_good getlast $data $laststr
+
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good replace_last $ret 0
+
+ error_check_good curs_close [$dbc close] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ error_check_good \
+ diff1($testdir/rsrc.txt,$testdir/check.txt) \
+ [filecmp $testdir/rsrc.txt $testdir/check.txt] 0
+
+ puts -nonewline "\tRsrc003.b: "
+ puts "Append some records in tree and verify in file."
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "This is New Record $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {-append $rec}]
+ error_check_good put_append $ret $key
+ }
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff2($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts "\tRsrc003.c: Append by record number"
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 2) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ incr key
+ set ret [eval {$db put} $txn {$key $rec}]
+ error_check_good put_byno $ret 0
+ }
+
+ error_check_good db_sync [$db sync] 0
+ error_check_good db_sync [$db sync] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff3($testdir/{rsrc.txt,check.txt}) $ret 0
+
+ puts \
+"\tRsrc003.d: Verify proper syncing of changes on close."
+ error_check_good Rsrc003:db_close [$db close] 0
+ set db [eval {berkdb_open -create -mode 0644 -recno \
+ -len $reclen -source $testdir/rsrc.txt} $testfile]
+ set oid [open $testdir/check.txt a]
+ for {set i 1} {$i < 10} {incr i} {
+ set rec [chop_data -frecno [replicate \
+ "New Record (set 3) $i" $repl]]
+ puts $oid $rec
+ append rec \12
+ set ret [eval {$db put} $txn {-append $rec}]
+ # Don't bother checking return;
+ # we don't know what
+ # the key number is, and we'll pick up a failure
+ # when we compare.
+ }
+ error_check_good Rsrc003:db_close [$db close] 0
+ close $oid
+ set ret [filecmp $testdir/rsrc.txt $testdir/check.txt]
+ error_check_good \
+ diff5($testdir/{rsrc,check}.txt) $ret 0
+ }
+ }
+ set fixed_len $orig_fixed_len
+ return
+}
diff --git a/storage/bdb/test/rsrc004.tcl b/storage/bdb/test/rsrc004.tcl
new file mode 100644
index 00000000000..f6c2f997eb8
--- /dev/null
+++ b/storage/bdb/test/rsrc004.tcl
@@ -0,0 +1,52 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: rsrc004.tcl,v 11.3 2002/01/11 15:53:33 bostic Exp $
+#
+# TEST rsrc004
+# TEST Recno backing file test for EOF-terminated records.
+proc rsrc004 { } {
+ source ./include.tcl
+
+ foreach isfixed { 0 1 } {
+ cleanup $testdir NULL
+
+ # Create the backing text file.
+ set oid1 [open $testdir/rsrc.txt w]
+ if { $isfixed == 1 } {
+ puts -nonewline $oid1 "record 1xxx"
+ puts -nonewline $oid1 "record 2xxx"
+ } else {
+ puts $oid1 "record 1xxx"
+ puts $oid1 "record 2xxx"
+ }
+ puts -nonewline $oid1 "record 3"
+ close $oid1
+
+ set args "-create -mode 0644 -recno -source $testdir/rsrc.txt"
+ if { $isfixed == 1 } {
+ append args " -len [string length "record 1xxx"]"
+ set match "record 3 "
+ puts "Rsrc004: EOF-terminated recs: fixed length"
+ } else {
+ puts "Rsrc004: EOF-terminated recs: variable length"
+ set match "record 3"
+ }
+
+ puts "\tRsrc004.a: Read file, verify correctness."
+ set db [eval berkdb_open $args "$testdir/rsrc004.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Read the last record
+ set dbc [eval {$db cursor} ""]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set rec [$dbc get -last]
+ error_check_good get_last $rec [list [list 3 $match]]
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/scr001/chk.code b/storage/bdb/test/scr001/chk.code
new file mode 100644
index 00000000000..eb01d8614b3
--- /dev/null
+++ b/storage/bdb/test/scr001/chk.code
@@ -0,0 +1,37 @@
+#!/bin/sh -
+#
+# $Id: chk.code,v 1.10 2002/02/04 16:03:26 bostic Exp $
+#
+# Check to make sure that the code samples in the documents build.
+
+d=../..
+
+[ -d $d/docs_src ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+for i in `find $d/docs_src -name '*.cs'`; do
+ echo " compiling $i"
+ sed -e 's/m4_include(\(.*\))/#include <\1>/g' \
+ -e 's/m4_[a-z]*[(\[)]*//g' \
+ -e 's/(\[//g' \
+ -e '/argv/!s/])//g' \
+ -e 's/dnl//g' \
+ -e 's/__GT__/>/g' \
+ -e 's/__LB__/[/g' \
+ -e 's/__LT__/</g' \
+ -e 's/__RB__/]/g' < $i > t.c
+ if cc -Wall -Werror -I.. t.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+done
+
+exit 0
diff --git a/storage/bdb/test/scr002/chk.def b/storage/bdb/test/scr002/chk.def
new file mode 100644
index 00000000000..7d5e6670f63
--- /dev/null
+++ b/storage/bdb/test/scr002/chk.def
@@ -0,0 +1,64 @@
+#!/bin/sh -
+#
+# $Id: chk.def,v 1.9 2002/03/27 04:32:57 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any interfaces
+# to the Win32 libdb.def file.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/build_win32/libdb.def
+t1=__1
+t2=__2
+
+exitv=0
+
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/db_xa_switch/d' \
+ -e '/^__/d' -e '/^;/d' |
+ sort > $t1
+
+egrep __P $d/dbinc_auto/ext_prot.in |
+ sed '/^[a-z]/!d' |
+ awk '{print $2}' |
+ sed 's/^\*//' |
+ sed '/^__/d' | sort > $t2
+
+if cmp -s $t1 $t2 ; then
+ :
+else
+ echo "<<< libdb.def >>> DB include files"
+ diff $t1 $t2
+ echo "FAIL: missing items in libdb.def file."
+ exitv=1
+fi
+
+# Check to make sure we don't have any extras in the libdb.def file.
+sed '/; /d' $f |
+ egrep @ |
+ awk '{print $1}' |
+ sed -e '/__db_global_values/d' > $t1
+
+for i in `cat $t1`; do
+ if egrep $i $d/*/*.c > /dev/null; then
+ :
+ else
+ echo "$f: $i not found in DB sources"
+ fi
+done > $t2
+
+test -s $t2 && {
+ cat $t2
+ echo "FAIL: found unnecessary items in libdb.def file."
+ exitv=1
+}
+
+exit $exitv
diff --git a/storage/bdb/test/scr003/chk.define b/storage/bdb/test/scr003/chk.define
new file mode 100644
index 00000000000..f73355eddf6
--- /dev/null
+++ b/storage/bdb/test/scr003/chk.define
@@ -0,0 +1,77 @@
+#!/bin/sh -
+#
+# $Id: chk.define,v 1.21 2002/03/27 04:32:58 bostic Exp $
+#
+# Check to make sure that all #defines are actually used.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t1=__1
+t2=__2
+t3=__3
+
+egrep '^#define' $d/dbinc/*.h $d/dbinc/*.in |
+ sed -e '/db_185.in/d' -e '/xa.h/d' |
+ awk '{print $2}' |
+ sed -e '/^B_DELETE/d' \
+ -e '/^B_MAX/d' \
+ -e '/^CIRCLEQ_/d' \
+ -e '/^DB_BTREEOLDVER/d' \
+ -e '/^DB_HASHOLDVER/d' \
+ -e '/^DB_LOCKVERSION/d' \
+ -e '/^DB_MAX_PAGES/d' \
+ -e '/^DB_QAMOLDVER/d' \
+ -e '/^DB_TXNVERSION/d' \
+ -e '/^DB_UNUSED/d' \
+ -e '/^DEFINE_DB_CLASS/d' \
+ -e '/^HASH_UNUSED/d' \
+ -e '/^LIST_/d' \
+ -e '/^LOG_OP/d' \
+ -e '/^MINFILL/d' \
+ -e '/^MUTEX_FIELDS/d' \
+ -e '/^NCACHED2X/d' \
+ -e '/^NCACHED30/d' \
+ -e '/^PAIR_MASK/d' \
+ -e '/^P_16_COPY/d' \
+ -e '/^P_32_COPY/d' \
+ -e '/^P_32_SWAP/d' \
+ -e '/^P_TO_UINT16/d' \
+ -e '/^QPAGE_CHKSUM/d' \
+ -e '/^QPAGE_NORMAL/d' \
+ -e '/^QPAGE_SEC/d' \
+ -e '/^SH_CIRCLEQ_/d' \
+ -e '/^SH_LIST_/d' \
+ -e '/^SH_TAILQ_/d' \
+ -e '/^SIZEOF_PAGE/d' \
+ -e '/^TAILQ_/d' \
+ -e '/^WRAPPED_CLASS/d' \
+ -e '/^__BIT_TYPES_DEFINED__/d' \
+ -e '/^__DBC_INTERNAL/d' \
+ -e '/^i_/d' \
+ -e '/_H_/d' \
+ -e 's/(.*//' | sort > $t1
+
+find $d -name '*.c' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ if egrep -w $i `cat $t2` > /dev/null; then
+ :;
+ else
+ f=`egrep -l "#define.*$i" $d/dbinc/*.h $d/dbinc/*.in |
+ sed 's;\.\.\/\.\.\/dbinc/;;' | tr -s "[:space:]" " "`
+ echo "FAIL: $i: $f"
+ fi
+done | sort -k 2 > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unused #defines"
+ exit 1
+}
+
+exit $exitv
diff --git a/storage/bdb/test/scr004/chk.javafiles b/storage/bdb/test/scr004/chk.javafiles
new file mode 100644
index 00000000000..d30c5e3e779
--- /dev/null
+++ b/storage/bdb/test/scr004/chk.javafiles
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.javafiles,v 1.5 2002/01/30 19:50:52 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any Java files to the list
+# of source files in the Makefile.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/Makefile.in
+j=$d/java/src/com/sleepycat
+
+t1=__1
+t2=__2
+
+find $j/db/ $j/examples $d/rpc_server/java -name \*.java -print |
+ sed -e 's/^.*\///' | sort > $t1
+tr ' \t' '\n' < $f | sed -e '/\.java$/!d' -e 's/^.*\///' | sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< java source files >>> Makefile"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr005/chk.nl b/storage/bdb/test/scr005/chk.nl
new file mode 100644
index 00000000000..47c7ff74d4b
--- /dev/null
+++ b/storage/bdb/test/scr005/chk.nl
@@ -0,0 +1,112 @@
+#!/bin/sh -
+#
+# $Id: chk.nl,v 1.6 2002/01/07 15:12:12 bostic Exp $
+#
+# Check to make sure that there are no trailing newlines in __db_err calls.
+
+d=../..
+
+[ -f $d/README ] || {
+ echo "FAIL: chk.nl can't find the source directory."
+ exit 1
+}
+
+cat << END_OF_CODE > t.c
+#include <sys/types.h>
+
+#include <errno.h>
+#include <stdio.h>
+
+int chk(FILE *, char *);
+
+int
+main(argc, argv)
+ int argc;
+ char *argv[];
+{
+ FILE *fp;
+ int exitv;
+
+ for (exitv = 0; *++argv != NULL;) {
+ if ((fp = fopen(*argv, "r")) == NULL) {
+ fprintf(stderr, "%s: %s\n", *argv, strerror(errno));
+ return (1);
+ }
+ if (chk(fp, *argv))
+ exitv = 1;
+ (void)fclose(fp);
+ }
+ return (exitv);
+}
+
+int
+chk(fp, name)
+ FILE *fp;
+ char *name;
+{
+ int ch, exitv, line, q;
+
+ exitv = 0;
+ for (ch = 'a', line = 1;;) {
+ if ((ch = getc(fp)) == EOF)
+ return (exitv);
+ if (ch == '\n') {
+ ++line;
+ continue;
+ }
+ if (ch != '_') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'd') continue;
+ if ((ch = getc(fp)) != 'b') continue;
+ if ((ch = getc(fp)) != '_') continue;
+ if ((ch = getc(fp)) != 'e') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ if ((ch = getc(fp)) != 'r') continue;
+ while ((ch = getc(fp)) != '"') {
+ if (ch == EOF)
+ return (exitv);
+ if (ch == '\n')
+ ++line;
+ }
+ while ((ch = getc(fp)) != '"')
+ switch (ch) {
+ case EOF:
+ return (exitv);
+ case '\\n':
+ ++line;
+ break;
+ case '.':
+ if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <period> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ case '\\\\':
+ if ((ch = getc(fp)) != 'n')
+ ungetc(ch, fp);
+ else if ((ch = getc(fp)) != '"')
+ ungetc(ch, fp);
+ else {
+ fprintf(stderr,
+ "%s: <newline> at line %d\n", name, line);
+ exitv = 1;
+ }
+ break;
+ }
+ }
+ return (exitv);
+}
+END_OF_CODE
+
+cc t.c -o t
+if ./t $d/*/*.[ch] $d/*/*.cpp $d/*/*.in ; then
+ :
+else
+ echo "FAIL: found __db_err calls ending with periods/newlines."
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr006/chk.offt b/storage/bdb/test/scr006/chk.offt
new file mode 100644
index 00000000000..6800268d2a2
--- /dev/null
+++ b/storage/bdb/test/scr006/chk.offt
@@ -0,0 +1,36 @@
+#!/bin/sh -
+#
+# $Id: chk.offt,v 1.9 2001/10/26 13:40:15 bostic Exp $
+#
+# Make sure that no off_t's have snuck into the release.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t=__1
+
+egrep -w off_t $d/*/*.[ch] $d/*/*.in |
+sed -e "/#undef off_t/d" \
+ -e "/mp_fopen.c:.*can't use off_t's here/d" \
+ -e "/mp_fopen.c:.*size or type off_t's or/d" \
+ -e "/mp_fopen.c:.*where an off_t is 32-bits/d" \
+ -e "/mutex\/tm.c:/d" \
+ -e "/os_map.c:.*(off_t)0))/d" \
+ -e "/os_rw.c:.*(off_t)db_iop->pgno/d" \
+ -e "/os_seek.c:.*off_t offset;/d" \
+ -e "/os_seek.c:.*offset = /d" \
+ -e "/test_perf\/perf_misc.c:/d" \
+ -e "/test_server\/dbs.c:/d" \
+ -e "/test_vxworks\/vx_mutex.c:/d" > $t
+
+test -s $t && {
+ cat $t
+ echo "FAIL: found questionable off_t usage"
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr007/chk.proto b/storage/bdb/test/scr007/chk.proto
new file mode 100644
index 00000000000..ae406fa23fe
--- /dev/null
+++ b/storage/bdb/test/scr007/chk.proto
@@ -0,0 +1,45 @@
+#!/bin/sh -
+#
+# $Id: chk.proto,v 1.8 2002/03/27 04:32:59 bostic Exp $
+#
+# Check to make sure that prototypes are actually needed.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+t3=__3
+
+egrep '__P' $d/dbinc_auto/*.h |
+ sed -e 's/[ ][ ]*__P.*//' \
+ -e 's/^.*[ *]//' \
+ -e '/__db_cprint/d' \
+ -e '/__db_lprint/d' \
+ -e '/__db_noop_log/d' \
+ -e '/__db_prnpage/d' \
+ -e '/__db_txnlist_print/d' \
+ -e '/__db_util_arg/d' \
+ -e '/__ham_func2/d' \
+ -e '/__ham_func3/d' \
+ -e '/_getpgnos/d' \
+ -e '/_print$/d' \
+ -e '/_read$/d' > $t1
+
+find $d -name '*.in' -o -name '*.[ch]' -o -name '*.cpp' > $t2
+for i in `cat $t1`; do
+ c=$(egrep -low $i $(cat $t2) | wc -l)
+ echo "$i: $c"
+done | egrep ' 1$' > $t3
+
+test -s $t3 && {
+ cat $t3
+ echo "FAIL: found unnecessary prototypes."
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr008/chk.pubdef b/storage/bdb/test/scr008/chk.pubdef
new file mode 100644
index 00000000000..4f59e831b25
--- /dev/null
+++ b/storage/bdb/test/scr008/chk.pubdef
@@ -0,0 +1,179 @@
+#!/bin/sh -
+#
+# Reconcile the list of public defines with the man pages and the Java files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+p=$d/dist/pubdef.in
+
+exitv=0
+
+# Check that pubdef.in has everything listed in m4.links.
+f=$d/docs_src/m4/m4.links
+sed -n \
+ -e 's/^\$1, \(DB_[^,]*\).*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that m4.links has everything listed in pubdef.in.
+f=$d/docs_src/m4/m4.links
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "^.1, $name" $f > /dev/null`; then
+ [ "X$isdoc" != "XD" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isdoc" = "XD" ] && {
+ echo "$name does not appear in $f"
+ exitv=1;
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in db.in.
+f=$d/dbinc/db.in
+sed -n \
+ -e 's/^#define[ ]*\(DB_[A-Z_0-9]*\).*/\1/p' \
+ -e 's/^[ ]*\(DB_[A-Z_]*\)=[0-9].*/\1/p' \
+ -e d < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that db.in has everything listed in pubdef.in.
+f=$d/dbinc/db.in
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "#define[ ]$name|[ ][ ]*$name=[0-9][0-9]*" \
+ $f > /dev/null`; then
+ [ "X$isinc" != "XI" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isinc" = "XI" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in DbConstants.java.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed -n -e 's/.*static final int[ ]*\([^ ]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that DbConstants.java has everything listed in pubdef.in.
+f=$d/java/src/com/sleepycat/db/DbConstants.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name =" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+# Check that pubdef.in has everything listed in Db.java.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed -n -e 's/.*static final int[ ]*\([^ ;]*\).*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1;
+ fi
+done
+sed -n -e 's/^[ ]*\([^ ]*\) = DbConstants\..*/\1/p' < $f |
+while read name; do
+ if `egrep -w "$name" $p > /dev/null`; then
+ :
+ else
+ echo "$f: $name is missing from $p"
+ exitv=1
+ fi
+done
+
+# Check that Db.java has all of the Java case values listed in pubdef.in.
+# Any J entries should appear twice -- once as a static final int, with
+# no initialization value, and once assigned to the DbConstants value. Any
+# C entries should appear once as a static final int, with an initialization
+# value.
+f=$d/java/src/com/sleepycat/db/Db.java
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "static final int[ ]$name;$" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep -w "= DbConstants.$name;" $f > /dev/null`; then
+ [ "X$isjava" != "XJ" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XJ" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+sed '/^#/d' $p |
+while read name isdoc isinc isjava; do
+ if `egrep "static final int[ ]$name =.*;" $f > /dev/null`; then
+ [ "X$isjava" != "XC" ] && {
+ echo "$name should not appear in $f"
+ exitv=1
+ }
+ else
+ [ "X$isjava" = "XC" ] && {
+ echo "$name does not appear in $f"
+ exitv=1
+ }
+ fi
+done
+
+exit $exitv
diff --git a/storage/bdb/test/scr009/chk.srcfiles b/storage/bdb/test/scr009/chk.srcfiles
new file mode 100644
index 00000000000..4f09a2890f6
--- /dev/null
+++ b/storage/bdb/test/scr009/chk.srcfiles
@@ -0,0 +1,39 @@
+#!/bin/sh -
+#
+# $Id: chk.srcfiles,v 1.10 2002/02/04 22:25:33 bostic Exp $
+#
+# Check to make sure we haven't forgotten to add any files to the list
+# of source files Win32 uses to build its dsp files.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+f=$d/dist/srcfiles.in
+t1=__1
+t2=__2
+
+sed -e '/^[ #]/d' \
+ -e '/^$/d' < $f |
+ awk '{print $1}' > $t1
+find $d -type f |
+ sed -e 's/^\.\.\/\.\.\///' \
+ -e '/^build[^_]/d' \
+ -e '/^test\//d' \
+ -e '/^test_server/d' \
+ -e '/^test_thread/d' \
+ -e '/^test_vxworks/d' |
+ egrep '\.c$|\.cpp$|\.def$|\.rc$' |
+ sed -e '/perl.DB_File\/version.c/d' |
+ sort > $t2
+
+cmp $t1 $t2 > /dev/null || {
+ echo "<<< srcfiles.in >>> existing files"
+ diff $t1 $t2
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr010/chk.str b/storage/bdb/test/scr010/chk.str
new file mode 100644
index 00000000000..2b5698c0ff2
--- /dev/null
+++ b/storage/bdb/test/scr010/chk.str
@@ -0,0 +1,31 @@
+#!/bin/sh -
+#
+# $Id: chk.str,v 1.5 2001/10/12 17:55:36 bostic Exp $
+#
+# Check spelling in quoted strings.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__t1
+
+sed -e '/^#include/d' \
+ -e '/revid/d' \
+ -e '/"/!d' \
+ -e 's/^[^"]*//' \
+ -e 's/%s/ /g' \
+ -e 's/[^"]*$//' \
+ -e 's/\\[nt]/ /g' $d/*/*.c $d/*/*.cpp |
+spell | sort | comm -23 /dev/stdin spell.ok > $t1
+
+test -s $t1 && {
+ cat $t1
+ echo "FAIL: found questionable spelling in strings."
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr010/spell.ok b/storage/bdb/test/scr010/spell.ok
new file mode 100644
index 00000000000..18af8d1306d
--- /dev/null
+++ b/storage/bdb/test/scr010/spell.ok
@@ -0,0 +1,825 @@
+AES
+AJVX
+ALLDB
+API
+APP
+AccessExample
+Acflmo
+Aclmop
+Ahlm
+Ahm
+BCFILprRsvVxX
+BCc
+BDBXXXXXX
+BH
+BI
+BII
+BINTERNAL
+BTREE
+Bc
+BerkeleyDB
+BtRecExample
+Btree
+CD
+CDB
+CDS
+CDdFILTVvX
+CFILpRsv
+CFLprsvVxX
+CFh
+CHKSUM
+CLpsvxX
+CONFIG
+CdFILTvX
+ClassNotFoundException
+Config
+DBC
+DBENV
+DBP
+DBS
+DBSDIR
+DBT
+DBTYPE
+DBcursor
+DONOTINDEX
+DS
+DUP
+DUPMASTER
+DUPSORT
+Db
+DbAppendRecno
+DbAttachImpl
+DbBtreeCompare
+DbBtreePrefix
+DbBtreeStat
+DbDeadlockException
+DbDupCompare
+DbEnv
+DbEnvFeedback
+DbErrcall
+DbException
+DbFeedback
+DbHash
+DbHashStat
+DbKeyRange
+DbLock
+DbLockNotGrantedException
+DbLockRequest
+DbLockStat
+DbLogStat
+DbLogc
+DbLsn
+DbMemoryException
+DbMpoolFStat
+DbMpoolFile
+DbMpoolStat
+DbPreplist
+DbQueueStat
+DbRecoveryInit
+DbRepStat
+DbRepTransport
+DbRunRecoveryException
+DbSecondaryKeyCreate
+DbTxn
+DbTxnRecover
+DbTxnStat
+DbUtil
+DbXAResource
+DbXid
+Dbc
+Dbt
+Dde
+Deref'ing
+EIO
+EIRT
+EIi
+ENV
+EnvExample
+EnvInfoDelete
+Exp
+FIXEDLEN
+Fd
+Ff
+Fh
+FileNotFoundException
+GetFileInformationByHandle
+GetJavaVM
+GetJoin
+HOFFSET
+HOLDELECTION
+Hashtable
+ILo
+ILprR
+INDX
+INIT
+IREAD
+ISSET
+IWR
+IWRITE
+Ik
+KEYEMPTY
+KEYEXIST
+KeyRange
+LBTREE
+LOCKDOWN
+LOGC
+LRECNO
+LRU
+LSN
+Lcom
+Ljava
+Ll
+LockExample
+LogRegister
+LpRsS
+LprRsS
+MEM
+MMDDhhmm
+MPOOL
+MPOOLFILE
+MapViewOfFile
+Maxid
+Mb
+Mbytes
+Metadata
+Metapage
+Mpool
+MpoolExample
+Mutex
+NEWMASTER
+NEWSITE
+NG
+NODUP
+NODUPDATA
+NOLOCKING
+NOMMAP
+NOMORE
+NOORDERCHK
+NOPANIC
+NOSERVER
+NOSYNC
+NOTFOUND
+NOTGRANTED
+NOTYPE
+NOWAIT
+NP
+NoP
+NoqV
+NqV
+NrV
+NsV
+OLDVERSION
+ORDERCHKONLY
+Offpage
+OpenFileMapping
+OutputStream
+PGNO
+PID
+PREV
+Pgno
+RECNO
+RECNOSYNC
+RECNUM
+RINTERNAL
+RMW
+RPC
+RT
+RUNRECOVERY
+Recno
+RepElectResult
+RepProcessMessage
+SERVERPROG
+SERVERVERS
+SETFD
+SHA
+SS
+Shm
+Sleepycat
+Subdatabase
+TDS
+TESTDIR
+TID
+TMP
+TMPDIR
+TODO
+TPS
+TXN
+TXNID
+TXNs
+Tcl
+TempFolder
+TestKeyRange
+TestLogc
+TpcbExample
+Tt
+Txn
+Txnid
+Txns
+UID
+UNAVAIL
+USERMEM
+Unencrypted
+UnmapViewOfFile
+VM
+VX
+Vv
+VvW
+VvXxZ
+Vvw
+Vx
+VxWorks
+Waitsfor
+XA
+XAException
+Xid
+XxZ
+YIELDCPU
+YY
+abc
+abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq
+abcdef
+abs
+addpage
+addr
+addrem
+adj
+afterop
+ahr
+alldb
+alloc
+alsVv
+amx
+anum
+appl
+appname
+archivedir
+arg
+args
+ata
+badkey
+berkdb
+berkeley
+bfname
+bfree
+bigpages
+bnum
+bostic
+bqual
+bsize
+bt
+btcompare
+btrec
+btree
+buf
+bylsn
+bypage
+byteswap
+byteswapped
+bytevalue
+cachesize
+cadjust
+callpgin
+cd
+cdb
+cdel
+ceVv
+ceh
+celmNrtVZ
+celmNtV
+celmNtVZ
+cget
+charkey
+charset
+chgpg
+chkpoint
+chkpt
+chksum
+ckp
+cksum
+clearerr
+clientrun
+cmdargs
+cnt
+compareproc
+compat
+conf
+config
+copypage
+cp
+crdel
+creat
+curadj
+curlsn
+datalen
+db
+dbc
+dbclient
+dbclose
+dbe
+dbenv
+dbkill
+dbm
+dbmclose
+dbminit
+dbobj
+dbopen
+dbp
+dbreg
+dbremove
+dbrename
+dbs
+dbt
+dbtruncate
+dbverify
+dd
+def
+del
+delext
+delim
+dev
+df
+dh
+dir
+dirfno
+dist
+dists
+dlen
+ds
+dsize
+dup
+dup'ed
+dupcompare
+dups
+dupset
+dupsort
+efh
+eid
+electinit
+electsend
+electvote
+electwait
+encryptaes
+encryptany
+endian
+env
+envid
+envremove
+eof
+errcall
+errfile
+errno
+errpfx
+excl
+extentsize
+faststat
+fclose
+fcntl
+fcreate
+fd
+ff
+ffactor
+fget
+fh
+fid
+fileid
+fileopen
+firstkey
+fiv
+flushcommit
+foo
+fopen
+formatID
+fput
+freelist
+fset
+fstat
+fsync
+ftype
+func
+fv
+gbytes
+gc'ed
+gen
+getBranchQualifier
+getFormatId
+getGlobalTransactionId
+gettime
+gettimeofday
+gettype
+getval
+gid
+groupalloc
+gtrid
+hashproc
+hcreate
+hdestroy
+hdr
+hostname
+hsearch
+icursor
+idletimeout
+ids
+idup
+iitem
+inc
+incfirst
+indx
+init
+inlen
+inp
+insdel
+int
+intValue
+io
+iread
+isdeleted
+itemorder
+iter
+iwr
+iwrite
+javax
+kb
+kbyte
+kbytes
+keyfirst
+keygroup
+keygroups
+keygrp
+keylast
+keyrange
+killinterval
+killiteration
+killtest
+klNpP
+klNprRV
+klNprRs
+krinsky
+lM
+lP
+lang
+lastid
+ld
+len
+lf
+lg
+libdb
+lk
+llsn
+localhost
+localtime
+lockid
+logc
+logclean
+logfile
+logflush
+logsonly
+lorder
+lpgno
+lsVv
+lsn
+lsynch
+lt
+lu
+luB
+luGB
+luKB
+luKb
+luM
+luMB
+luMb
+lx
+mNP
+mNs
+machid
+makedup
+malloc
+margo
+maxcommitperflush
+maxkey
+maxlockers
+maxlocks
+maxnactive
+maxnlockers
+maxnlocks
+maxnobjects
+maxobjects
+maxops
+maxtimeout
+maxtxns
+mbytes
+mem
+memp
+metadata
+metaflags
+metagroup
+metalsn
+metapage
+metasub
+methodID
+mincommitperflush
+minkey
+minlocks
+minwrite
+minwrites
+mis
+mjc
+mkdir
+mlock
+mmap
+mmapped
+mmapsize
+mmetalsn
+mmpgno
+mp
+mpf
+mpgno
+mpool
+msg
+munmap
+mutex
+mutexes
+mutexlocks
+mv
+mvptr
+mydrive
+mydrivexxx
+nO
+nP
+nTV
+nTt
+naborts
+nactive
+nbegins
+nbytes
+ncaches
+ncommits
+nconflicts
+ndata
+ndbm
+ndeadlocks
+ndx
+needswap
+nelem
+nevict
+newalloc
+newclient
+newfile
+newitem
+newmaster
+newname
+newpage
+newpgno
+newsite
+nextdup
+nextkey
+nextlsn
+nextnodup
+nextpgno
+ng
+nitems
+nkeys
+nlockers
+nlocks
+nlsn
+nmodes
+nnext
+nnextlsn
+nnowaits
+nobjects
+nodup
+nodupdata
+nogrant
+nolocking
+nommap
+noop
+nooverwrite
+nopanic
+nosort
+nosync
+notfound
+notgranted
+nowait
+nowaits
+npages
+npgno
+nrec
+nrecords
+nreleases
+nrequests
+nrestores
+nsites
+ntasks
+nthreads
+num
+numdup
+obj
+offpage
+ok
+olddata
+olditem
+oldname
+opd
+opflags
+opmods
+orig
+os
+osynch
+outlen
+ovfl
+ovflpoint
+ovflsize
+ovref
+pageimage
+pagelsn
+pageno
+pagesize
+pagesizes
+pagfno
+panic'ing
+paniccall
+panicstate
+parentid
+passwd
+perf
+perfdb
+pflag
+pg
+pgcookie
+pgdbt
+pget
+pgfree
+pgin
+pgno
+pgnum
+pgout
+pgsize
+pid
+pkey
+plist
+pn
+postdestroy
+postlog
+postlogmeta
+postopen
+postsync
+prR
+prec
+predestroy
+preopen
+prev
+prevlsn
+prevnodup
+prheader
+pri
+printlog
+proc
+procs
+pthread
+pthreads
+ptype
+pv
+qV
+qam
+qs
+qtest
+rRV
+rRs
+rV
+rand
+rcuradj
+rdonly
+readd
+readonly
+realloc
+rec
+reclength
+recno
+recnum
+recnums
+recs
+refcount
+regionmax
+regop
+regsize
+relink
+repl
+revsplitoff
+rf
+rkey
+rlsn
+rm
+rmid
+rmw
+ro
+rootent
+rootlsn
+rpc
+rpcid
+rs
+rsplit
+runlog
+rw
+rwrw
+rwrwrw
+sS
+sV
+sVv
+scount
+secon
+secs
+sendproc
+seq
+setto
+setval
+sh
+shalloc
+shm
+shmat
+shmctl
+shmdt
+shmem
+shmget
+shr
+sleepycat
+splitdata
+splitmeta
+srand
+stat
+str
+strcmp
+strdup
+strerror
+strlen
+subdatabase
+subdb
+sv
+svc
+tV
+tVZ
+tas
+tcl
+tcp
+thr
+threadID
+tid
+tiebreaker
+timestamp
+tlen
+tm
+tmp
+tmpdir
+tmutex
+tnum
+tp
+tpcb
+treeorder
+ttpcbddlk
+ttpcbi
+ttpcbr
+ttype
+tx
+txn
+txnarray
+txnid
+txns
+txt
+ubell
+ud
+uid
+ulen
+uncorrect
+undeleting
+unmap
+unpinned
+upd
+upi
+usec
+usecs
+usr
+util
+vVxXZ
+vZ
+val
+var
+vec
+ver
+vflag
+vrfy
+vw
+vx
+vxmutex
+vxtmp
+waitsfor
+walkdupint
+walkpages
+wb
+wc
+wcount
+wordlist
+writeable
+wrnosync
+wt
+xa
+xid
+xxx
+yieldcpu
diff --git a/storage/bdb/test/scr011/chk.tags b/storage/bdb/test/scr011/chk.tags
new file mode 100644
index 00000000000..14a3c4e011d
--- /dev/null
+++ b/storage/bdb/test/scr011/chk.tags
@@ -0,0 +1,41 @@
+#!/bin/sh -
+#
+# $Id: chk.tags,v 1.10 2001/10/12 17:55:36 bostic Exp $
+#
+# Check to make sure we don't need any more symbolic links to tags files.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && ls -F | egrep / | sort |
+ sed -e 's/\///' \
+ -e '/^CVS$/d' \
+ -e '/^build_vxworks$/d' \
+ -e '/^build_win32$/d' \
+ -e '/^docs$/d' \
+ -e '/^docs_book$/d' \
+ -e '/^docs_src$/d' \
+ -e '/^java$/d' \
+ -e '/^perl$/d' \
+ -e '/^test$/d' \
+ -e '/^test_cxx$/d' \
+ -e '/^test_purify$/d' \
+ -e '/^test_thread$/d' \
+ -e '/^test_vxworks$/d') > $t1
+
+(cd $d && ls */tags | sed 's/\/tags$//' | sort) > $t2
+if diff $t1 $t2 > /dev/null; then
+ exit 0
+else
+ echo "<<< source tree >>> tags files"
+ diff $t1 $t2
+ exit 1
+fi
diff --git a/storage/bdb/test/scr012/chk.vx_code b/storage/bdb/test/scr012/chk.vx_code
new file mode 100644
index 00000000000..8d7ca608f93
--- /dev/null
+++ b/storage/bdb/test/scr012/chk.vx_code
@@ -0,0 +1,68 @@
+#!/bin/sh -
+#
+# $Id: chk.vx_code,v 1.6 2002/03/27 20:20:25 bostic Exp $
+#
+# Check to make sure the auto-generated utility code in the VxWorks build
+# directory compiles.
+
+d=../..
+
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+rm -f t.c t1.c t2.c
+
+header()
+{
+ echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{return ($1(argv[1]));}"
+}
+
+(echo "int"
+ echo "main(int argc, char *argv[])"
+ echo "{"
+ echo "int i;") > t1.c
+
+for i in db_archive db_checkpoint db_deadlock db_dump db_load \
+ db_printlog db_recover db_stat db_upgrade db_verify dbdemo; do
+ echo " compiling build_vxworks/$i"
+ (cat $d/build_vxworks/$i/$i.c; header $i) > t.c
+ if cc -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+ else
+ echo "FAIL: unable to compile $i"
+ exit 1
+ fi
+
+ cat $d/build_vxworks/$i/$i.c >> t2.c
+ echo "i = $i(argv[1]);" >> t1.c
+done
+
+(cat t2.c t1.c; echo "return (0); }") > t.c
+
+echo " compiling build_vxworks utility composite"
+if cc -Dlint -Wall -I.. -I$d t.c \
+ $d/clib/getopt.c \
+ $d/common/util_arg.c \
+ $d/common/util_cache.c \
+ $d/common/util_log.c \
+ $d/common/util_sig.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile utility composite"
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr013/chk.stats b/storage/bdb/test/scr013/chk.stats
new file mode 100644
index 00000000000..3a404699668
--- /dev/null
+++ b/storage/bdb/test/scr013/chk.stats
@@ -0,0 +1,114 @@
+#!/bin/sh -
+#
+# $Id: chk.stats,v 1.6 2002/08/19 18:35:18 bostic Exp $
+#
+# Check to make sure all of the stat structure members are included in
+# all of the possible formats.
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+exitv=0
+t=__tmp
+
+# Extract the field names for a structure from the db.h file.
+inc_fields()
+{
+ sed -e "/struct $1 {/,/^};$/p" \
+ -e d < $d/dbinc/db.in |
+ sed -e 1d \
+ -e '$d' \
+ -e '/;/!d' \
+ -e 's/;.*//' \
+ -e 's/^[ ].*[ \*]//'
+}
+
+cat << END_OF_IGNORE > IGNORE
+bt_maxkey
+bt_metaflags
+hash_metaflags
+qs_metaflags
+qs_ndata
+END_OF_IGNORE
+
+# Check to make sure the elements of a structure from db.h appear in
+# the other files.
+inc()
+{
+ for i in `inc_fields $1`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ for j in $2; do
+ if egrep -w $i $d/$j > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in $j."
+ exitv=1
+ fi
+ done
+ done
+}
+
+inc "__db_bt_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_h_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc "__db_qam_stat" \
+ "tcl/tcl_db.c db_stat/db_stat.c docs_src/db/db_stat.so"
+inc __db_lock_stat \
+ "tcl/tcl_lock.c db_stat/db_stat.c docs_src/lock/lock_stat.so"
+inc __db_log_stat \
+ "tcl/tcl_log.c db_stat/db_stat.c docs_src/log/log_stat.so"
+inc __db_mpool_stat \
+ "tcl/tcl_mp.c db_stat/db_stat.c docs_src/memp/memp_stat.so"
+inc __db_txn_stat \
+ "tcl/tcl_txn.c db_stat/db_stat.c docs_src/txn/txn_stat.so"
+
+# Check to make sure the elements from a man page appears in db.in.
+man()
+{
+ for i in `cat $t`; do
+ if egrep -w $i IGNORE > /dev/null; then
+ echo " $1: ignoring $i"
+ continue
+ fi
+ if egrep -w $i $d/dbinc/db.in > /dev/null; then
+ :;
+ else
+ echo " $1: $i not found in db.h."
+ exitv=1
+ fi
+ done
+}
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' < $d/docs_src/db/db_stat.so > $t
+man "checking db_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(\([^)]*\)).*/\1/' \
+ -e 's/.* //' < $d/docs_src/lock/lock_stat.so > $t
+man "checking lock_stat.so against db.h"
+
+sed -e '/m4_stat[12](/!d' \
+ -e 's/.*m4_stat[12](\([^)]*\)).*/\1/' < $d/docs_src/log/log_stat.so > $t
+man "checking log_stat.so against db.h"
+
+sed -e '/m4_stat[123](/!d' \
+ -e 's/.*m4_stat[123](\([^)]*\)).*/\1/' < $d/docs_src/memp/memp_stat.so > $t
+man "checking memp_stat.so against db.h"
+
+sed -e '/m4_stat(/!d' \
+ -e 's/.*m4_stat(.*, \([^)]*\)).*/\1/' \
+ -e 's/__[LR]B__//g' < $d/docs_src/txn/txn_stat.so > $t
+man "checking txn_stat.so against db.h"
+
+exit $exitv
diff --git a/storage/bdb/test/scr014/chk.err b/storage/bdb/test/scr014/chk.err
new file mode 100644
index 00000000000..72b4a62719f
--- /dev/null
+++ b/storage/bdb/test/scr014/chk.err
@@ -0,0 +1,34 @@
+#!/bin/sh -
+#
+# $Id: chk.err,v 1.3 2002/03/27 04:33:05 bostic Exp $
+#
+# Check to make sure all of the error values have corresponding error
+# message strings in db_strerror().
+
+# Top-level directory.
+d=../..
+
+# Path names are from a top-level directory.
+[ -f $d/README ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__tmp1
+t2=__tmp2
+
+egrep -- "define.*DB_.*-309" $d/dbinc/db.in | awk '{print $2}' > $t1
+sed -e '/^db_strerror/,/^}/{' \
+ -e '/ case DB_/{' \
+ -e 's/:.*//' \
+ -e 's/.* //' \
+ -e p \
+ -e '}' \
+ -e '}' \
+ -e d \
+ < $d/common/db_err.c > $t2
+
+cmp $t1 $t2 > /dev/null ||
+(echo "<<< db.h >>> db_strerror" && diff $t1 $t2 && exit 1)
+
+exit 0
diff --git a/storage/bdb/test/scr015/README b/storage/bdb/test/scr015/README
new file mode 100644
index 00000000000..75a356eea06
--- /dev/null
+++ b/storage/bdb/test/scr015/README
@@ -0,0 +1,36 @@
+# $Id: README,v 1.1 2001/05/31 23:09:11 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the C++
+tests. You must be in this directory to run them. For example,
+
+ $ export LIBS="-L/usr/include/BerkeleyDB/lib"
+ $ export CXXFLAGS="-I/usr/include/BerkeleyDB/include"
+ $ export LD_LIBRARY_PATH="/usr/include/BerkeleyDB/lib"
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use c++ in your path. Set environment variables $CXX
+to override this. It will also honor any $CXXFLAGS and $LIBS
+variables that are set, except that -c are silently removed from
+$CXXFLAGS (since we do the compilation in one step).
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_cxx-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LIBS, $CXXFLAGS $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.cpp file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/storage/bdb/test/scr015/TestConstruct01.cpp b/storage/bdb/test/scr015/TestConstruct01.cpp
new file mode 100644
index 00000000000..7ae328d458c
--- /dev/null
+++ b/storage/bdb/test/scr015/TestConstruct01.cpp
@@ -0,0 +1,330 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.cpp,v 1.5 2002/01/23 14:26:40 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+#define ERR(a) \
+ do { \
+ cout << "FAIL: " << (a) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR2(a1,a2) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << "\n"; sysexit(1); \
+ } while (0)
+
+#define ERR3(a1,a2,a3) \
+ do { \
+ cout << "FAIL: " << (a1) << ": " << (a2) << ": " << (a3) << "\n"; sysexit(1); \
+ } while (0)
+
+#define CHK(a) \
+ do { \
+ int _ret; \
+ if ((_ret = (a)) != 0) { \
+ ERR3("DB function " #a " has bad return", _ret, DbEnv::strerror(_ret)); \
+ } \
+ } while (0)
+
+#ifdef VERBOSE
+#define DEBUGOUT(a) cout << a << "\n"
+#else
+#define DEBUGOUT(a)
+#endif
+
+#define CONSTRUCT01_DBNAME "construct01.db"
+#define CONSTRUCT01_DBDIR "."
+#define CONSTRUCT01_DBFULLPATH (CONSTRUCT01_DBDIR "/" CONSTRUCT01_DBNAME)
+
+int itemcount; // count the number of items in the database
+
+// A good place to put a breakpoint...
+//
+void sysexit(int status)
+{
+ exit(status);
+}
+
+void check_file_removed(const char *name, int fatal)
+{
+ unlink(name);
+#if 0
+ if (access(name, 0) == 0) {
+ if (fatal)
+ cout << "FAIL: ";
+ cout << "File \"" << name << "\" still exists after run\n";
+ if (fatal)
+ sysexit(1);
+ }
+#endif
+}
+
+// Check that key/data for 0 - count-1 are already present,
+// and write a key/data for count. The key and data are
+// both "0123...N" where N == count-1.
+//
+// For some reason on Windows, we need to open using the full pathname
+// of the file when there is no environment, thus the 'has_env'
+// variable.
+//
+void rundb(Db *db, int count, int has_env)
+{
+ const char *name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db->set_error_stream(&cerr);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ CHK(db->set_pagesize(1024));
+ CHK(db->open(NULL, name, NULL, DB_BTREE, count ? 0 : DB_CREATE, 0664));
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ char outbuf[10];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = '0' + i;
+ }
+ outbuf[i++] = '\0';
+ Dbt key(outbuf, i);
+ Dbt data(outbuf, i);
+
+ DEBUGOUT("Put: " << outbuf);
+ CHK(db->put(0, &key, &data, DB_NOOVERWRITE));
+
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ CHK(db->cursor(NULL, &dbcp, 0));
+
+ // Walk through the table, checking
+ Dbt readkey;
+ Dbt readdata;
+ while (dbcp->get(&readkey, &readdata, DB_NEXT) == 0) {
+ char *key_string = (char *)readkey.get_data();
+ char *data_string = (char *)readdata.get_data();
+ DEBUGOUT("Got: " << key_string << ": " << data_string);
+ int len = strlen(key_string);
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad");
+ }
+ else if (strcmp(data_string, key_string) != 0) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string[i] != ('0' + i)) {
+ cout << " got " << key_string
+ << " (" << (int)key_string[i] << ")"
+ << ", wanted " << i
+ << " (" << (int)('0' + i) << ")"
+ << " at position " << i << "\n";
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ cout << " expected more keys, bitmap is: " << expected << "\n";
+ ERR("missing keys in database");
+ }
+ CHK(dbcp->close());
+ CHK(db->close(0));
+}
+
+void t1(int except_flag)
+{
+ cout << " Running test 1:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t2(int except_flag)
+{
+ cout << " Running test 2:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t3(int except_flag)
+{
+ cout << " Running test 3:\n";
+ Db db(0, except_flag);
+ rundb(&db, itemcount++, 0);
+ cout << " finished.\n";
+}
+
+void t4(int except_flag)
+{
+ cout << " Running test 4:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ CHK(db.close(0));
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t5(int except_flag)
+{
+ cout << " Running test 5:\n";
+ DbEnv env(except_flag);
+ CHK(env.open(CONSTRUCT01_DBDIR, DB_CREATE | DB_INIT_MPOOL, 0));
+ Db db(&env, 0);
+ rundb(&db, itemcount++, 1);
+ // Note we cannot reuse the old Db!
+ Db anotherdb(&env, 0);
+
+ anotherdb.set_errpfx("test5");
+ rundb(&anotherdb, itemcount++, 1);
+ CHK(env.close(0));
+ cout << " finished.\n";
+}
+
+void t6(int except_flag)
+{
+ cout << " Running test 6:\n";
+
+ /* From user [#2939] */
+ int err;
+
+ DbEnv* penv = new DbEnv(DB_CXX_NO_EXCEPTIONS);
+ penv->set_cachesize(0, 32 * 1024, 0);
+ penv->open(CONSTRUCT01_DBDIR, DB_CREATE | DB_PRIVATE | DB_INIT_MPOOL, 0);
+
+ //LEAK: remove this block and leak disappears
+ Db* pdb = new Db(penv,0);
+ if ((err = pdb->close(0)) != 0) {
+ fprintf(stderr, "Error closing Db: %s\n", db_strerror(err));
+ }
+ delete pdb;
+ //LEAK: remove this block and leak disappears
+
+ if ((err = penv->close(0)) != 0) {
+ fprintf(stderr, "Error closing DbEnv: %s\n", db_strerror(err));
+ }
+ delete penv;
+
+ // Make sure we get a message from C++ layer reminding us to close.
+ cerr << "expected error: ";
+ {
+ DbEnv foo(DB_CXX_NO_EXCEPTIONS);
+ foo.open(CONSTRUCT01_DBDIR, DB_CREATE, 0);
+ }
+ cerr << "should have received error.\n";
+ cout << " finished.\n";
+}
+
+// remove any existing environment or database
+void removeall()
+{
+ {
+ DbEnv tmpenv(DB_CXX_NO_EXCEPTIONS);
+ (void)tmpenv.remove(CONSTRUCT01_DBDIR, DB_FORCE);
+ }
+
+ check_file_removed(CONSTRUCT01_DBFULLPATH, 1);
+ for (int i=0; i<8; i++) {
+ char buf[20];
+ sprintf(buf, "__db.00%d", i);
+ check_file_removed(buf, 1);
+ }
+}
+
+int doall(int except_flag)
+{
+ itemcount = 0;
+ try {
+ // before and after the run, removing any
+ // old environment/database.
+ //
+ removeall();
+ t1(except_flag);
+ t2(except_flag);
+ t3(except_flag);
+ t4(except_flag);
+ t5(except_flag);
+ t6(except_flag);
+
+ removeall();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ ERR2("EXCEPTION RECEIVED", dbe.what());
+ }
+ return 1;
+}
+
+int main(int argc, char *argv[])
+{
+ int iterations = 1;
+ if (argc > 1) {
+ iterations = atoi(argv[1]);
+ if (iterations < 0) {
+ ERR("Usage: construct01 count");
+ }
+ }
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ cout << "(" << i << "/" << iterations << ") ";
+ }
+ cout << "construct01 running:\n";
+ if (doall(DB_CXX_NO_EXCEPTIONS) != 0) {
+ ERR("SOME TEST FAILED FOR NO-EXCEPTION TEST");
+ }
+ else if (doall(0) != 0) {
+ ERR("SOME TEST FAILED FOR EXCEPTION TEST");
+ }
+ else {
+ cout << "\nALL TESTS SUCCESSFUL\n";
+ }
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestConstruct01.testerr b/storage/bdb/test/scr015/TestConstruct01.testerr
new file mode 100644
index 00000000000..1ba627d103b
--- /dev/null
+++ b/storage/bdb/test/scr015/TestConstruct01.testerr
@@ -0,0 +1,4 @@
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
+expected error: DbEnv::_destroy_check: open DbEnv object destroyed
+should have received error.
diff --git a/storage/bdb/test/scr015/TestConstruct01.testout b/storage/bdb/test/scr015/TestConstruct01.testout
new file mode 100644
index 00000000000..9b840f9fcf4
--- /dev/null
+++ b/storage/bdb/test/scr015/TestConstruct01.testout
@@ -0,0 +1,27 @@
+(0/1) construct01 running:
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+ Running test 1:
+ finished.
+ Running test 2:
+ finished.
+ Running test 3:
+ finished.
+ Running test 4:
+ finished.
+ Running test 5:
+ finished.
+ Running test 6:
+ finished.
+
+ALL TESTS SUCCESSFUL
diff --git a/storage/bdb/test/scr015/TestExceptInclude.cpp b/storage/bdb/test/scr015/TestExceptInclude.cpp
new file mode 100644
index 00000000000..28bc498222f
--- /dev/null
+++ b/storage/bdb/test/scr015/TestExceptInclude.cpp
@@ -0,0 +1,27 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestExceptInclude.cpp,v 1.4 2002/07/05 22:17:59 dda Exp $
+ */
+
+/* We should be able to include cxx_except.h without db_cxx.h,
+ * and use the DbException class. We do need db.h to get a few
+ * typedefs defined that the DbException classes use.
+ *
+ * This program does nothing, it's just here to make sure
+ * the compilation works.
+ */
+#include <db.h>
+#include <cxx_except.h>
+
+int main(int argc, char *argv[])
+{
+ DbException *dbe = new DbException("something");
+ DbMemoryException *dbme = new DbMemoryException("anything");
+
+ dbe = dbme;
+}
+
diff --git a/storage/bdb/test/scr015/TestGetSetMethods.cpp b/storage/bdb/test/scr015/TestGetSetMethods.cpp
new file mode 100644
index 00000000000..81ef914eac3
--- /dev/null
+++ b/storage/bdb/test/scr015/TestGetSetMethods.cpp
@@ -0,0 +1,91 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.cpp,v 1.4 2002/01/11 15:53:59 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *dbenv = new DbEnv(0);
+ DbTxn *dbtxn;
+ u_int8_t conflicts[10];
+
+ dbenv->set_error_stream(&cerr);
+ dbenv->set_timeout(0x90000000,
+ DB_SET_LOCK_TIMEOUT);
+ dbenv->set_lg_bsize(0x1000);
+ dbenv->set_lg_dir(".");
+ dbenv->set_lg_max(0x10000000);
+ dbenv->set_lg_regionmax(0x100000);
+ dbenv->set_lk_conflicts(conflicts, sizeof(conflicts));
+ dbenv->set_lk_detect(DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv->set_lk_max(0);
+ dbenv->set_lk_max_lockers(100);
+ dbenv->set_lk_max_locks(10);
+ dbenv->set_lk_max_objects(1000);
+ dbenv->set_mp_mmapsize(0x10000);
+ dbenv->set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv->open(".", DB_CREATE | DB_INIT_TXN |
+ DB_INIT_LOCK | DB_INIT_LOG |
+ DB_INIT_MPOOL,
+ 0644);
+
+ dbenv->txn_begin(NULL, &dbtxn, DB_TXN_NOWAIT);
+ dbtxn->set_timeout(0xA0000000, DB_SET_TXN_TIMEOUT);
+ dbtxn->abort();
+
+ dbenv->close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db *db_bt = new Db(NULL, 0);
+ db_bt->set_bt_maxkey(10000);
+ db_bt->set_bt_minkey(100);
+ db_bt->set_cachesize(0, 0x100000, 0);
+ db_bt->close(0);
+
+ Db *db_h = new Db(NULL, 0);
+ db_h->set_h_ffactor(0x10);
+ db_h->set_h_nelem(100);
+ db_h->set_lorder(0);
+ db_h->set_pagesize(0x10000);
+ db_h->close(0);
+
+ Db *db_re = new Db(NULL, 0);
+ db_re->set_re_delim('@');
+ db_re->set_re_pad(10);
+ db_re->set_re_source("re.in");
+ db_re->close(0);
+
+ Db *db_q = new Db(NULL, 0);
+ db_q->set_q_extentsize(200);
+ db_q->close(0);
+
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what() << "\n";
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestKeyRange.cpp b/storage/bdb/test/scr015/TestKeyRange.cpp
new file mode 100644
index 00000000000..980d2f518e0
--- /dev/null
+++ b/storage/bdb/test/scr015/TestKeyRange.cpp
@@ -0,0 +1,171 @@
+/*NOTE: AccessExample changed to test Db.key_range.
+ * We made a global change of /AccessExample/TestKeyRange/,
+ * the only other changes are marked with comments that
+ * are notated as 'ADDED'.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.cpp,v 1.4 2002/01/23 14:26:41 bostic Exp $
+ */
+
+#ifndef NO_SYSTEM_INCLUDES
+#include <sys/types.h>
+
+#include <iostream.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#ifndef _MSC_VER
+#include <unistd.h>
+#endif
+#endif
+
+#include <iomanip.h>
+#include <db_cxx.h>
+
+class TestKeyRange
+{
+public:
+ TestKeyRange();
+ void run();
+
+private:
+ static const char FileName[];
+
+ // no need for copy and assignment
+ TestKeyRange(const TestKeyRange &);
+ void operator = (const TestKeyRange &);
+};
+
+static void usage(); // forward
+
+int main(int argc, char *argv[])
+{
+ if (argc > 1) {
+ usage();
+ }
+
+ // Use a try block just to report any errors.
+ // An alternate approach to using exceptions is to
+ // use error models (see DbEnv::set_error_model()) so
+ // that error codes are returned for all Berkeley DB methods.
+ //
+ try {
+ TestKeyRange app;
+ app.run();
+ return 0;
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ return 1;
+ }
+}
+
+static void usage()
+{
+ cerr << "usage: TestKeyRange\n";
+ exit(1);
+}
+
+const char TestKeyRange::FileName[] = "access.db";
+
+TestKeyRange::TestKeyRange()
+{
+}
+
+void TestKeyRange::run()
+{
+ // Remove the previous database.
+ (void)unlink(FileName);
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db db(0, 0);
+
+ db.set_error_stream(&cerr);
+ db.set_errpfx("TestKeyRange");
+ db.set_pagesize(1024); /* Page size: 1K. */
+ db.set_cachesize(0, 32 * 1024, 0);
+ db.open(NULL, FileName, NULL, DB_BTREE, DB_CREATE, 0664);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ char buf[1024];
+ char rbuf[1024];
+ char *t;
+ char *p;
+ int ret;
+ int len;
+ Dbt *firstkey = NULL;
+ char firstbuf[1024];
+
+ for (;;) {
+ cout << "input>";
+ cout.flush();
+
+ cin.getline(buf, sizeof(buf));
+ if (cin.eof())
+ break;
+
+ if ((len = strlen(buf)) <= 0)
+ continue;
+ for (t = rbuf, p = buf + (len - 1); p >= buf;)
+ *t++ = *p--;
+ *t++ = '\0';
+
+ Dbt key(buf, len + 1);
+ Dbt data(rbuf, len + 1);
+ if (firstkey == NULL) {
+ strcpy(firstbuf, buf);
+ firstkey = new Dbt(firstbuf, len + 1);
+ }
+
+ ret = db.put(0, &key, &data, DB_NOOVERWRITE);
+ if (ret == DB_KEYEXIST) {
+ cout << "Key " << buf << " already exists.\n";
+ }
+ cout << "\n";
+ }
+
+ // We put a try block around this section of code
+ // to ensure that our database is properly closed
+ // in the event of an error.
+ //
+ try {
+ // Acquire a cursor for the table.
+ Dbc *dbcp;
+ db.cursor(NULL, &dbcp, 0);
+
+ /*ADDED...*/
+ DB_KEY_RANGE range;
+ memset(&range, 0, sizeof(range));
+
+ db.key_range(NULL, firstkey, &range, 0);
+ printf("less: %f\n", range.less);
+ printf("equal: %f\n", range.equal);
+ printf("greater: %f\n", range.greater);
+ /*end ADDED*/
+
+ Dbt key;
+ Dbt data;
+
+ // Walk through the table, printing the key/data pairs.
+ while (dbcp->get(&key, &data, DB_NEXT) == 0) {
+ char *key_string = (char *)key.get_data();
+ char *data_string = (char *)data.get_data();
+ cout << key_string << " : " << data_string << "\n";
+ }
+ dbcp->close();
+ }
+ catch (DbException &dbe) {
+ cerr << "TestKeyRange: " << dbe.what() << "\n";
+ }
+
+ db.close(0);
+}
diff --git a/storage/bdb/test/scr015/TestKeyRange.testin b/storage/bdb/test/scr015/TestKeyRange.testin
new file mode 100644
index 00000000000..a2b6bd74e7b
--- /dev/null
+++ b/storage/bdb/test/scr015/TestKeyRange.testin
@@ -0,0 +1,8 @@
+first line is alphabetically somewhere in the middle.
+Blah blah
+let's have exactly eight lines of input.
+stuff
+more stuff
+and even more stuff
+lastly
+but not leastly.
diff --git a/storage/bdb/test/scr015/TestKeyRange.testout b/storage/bdb/test/scr015/TestKeyRange.testout
new file mode 100644
index 00000000000..25b2e1a835c
--- /dev/null
+++ b/storage/bdb/test/scr015/TestKeyRange.testout
@@ -0,0 +1,19 @@
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>
+input>less: 0.375000
+equal: 0.125000
+greater: 0.500000
+Blah blah : halb halB
+and even more stuff : ffuts erom neve dna
+but not leastly. : .yltsael ton tub
+first line is alphabetically somewhere in the middle. : .elddim eht ni erehwemos yllacitebahpla si enil tsrif
+lastly : yltsal
+let's have exactly eight lines of input. : .tupni fo senil thgie yltcaxe evah s'tel
+more stuff : ffuts erom
+stuff : ffuts
diff --git a/storage/bdb/test/scr015/TestLogc.cpp b/storage/bdb/test/scr015/TestLogc.cpp
new file mode 100644
index 00000000000..94fcfa0b3ec
--- /dev/null
+++ b/storage/bdb/test/scr015/TestLogc.cpp
@@ -0,0 +1,101 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.cpp,v 1.6 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+static void show_dbt(ostream &os, Dbt *dbt)
+{
+ int i;
+ int size = dbt->get_size();
+ unsigned char *data = (unsigned char *)dbt->get_data();
+
+ os << "size: " << size << " data: ";
+ for (i=0; i<size && i<10; i++) {
+ os << (int)data[i] << " ";
+ }
+ if (i<size)
+ os << "...";
+}
+
+int main(int argc, char *argv[])
+{
+ try {
+ DbEnv *env = new DbEnv(0);
+ env->open(".", DB_CREATE | DB_INIT_LOG | DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db *db1 = new Db(env, 0);
+ db1->open(NULL, "first.db", NULL, DB_BTREE, DB_CREATE, 0);
+ Dbt *key = new Dbt((char *)"a", 1);
+ Dbt *data = new Dbt((char *)"b", 1);
+ db1->put(NULL, key, data, 0);
+ key->set_data((char *)"c");
+ data->set_data((char *)"d");
+ db1->put(NULL, key, data, 0);
+ db1->close(0);
+
+ Db *db2 = new Db(env, 0);
+ db2->open(NULL, "second.db", NULL, DB_BTREE, DB_CREATE, 0);
+ key->set_data((char *)"w");
+ data->set_data((char *)"x");
+ db2->put(NULL, key, data, 0);
+ key->set_data((char *)"y");
+ data->set_data((char *)"z");
+ db2->put(NULL, key, data, 0);
+ db2->close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc *logc;
+
+ env->log_cursor(&logc, 0);
+ int ret = 0;
+ DbLsn lsn;
+ Dbt *dbt = new Dbt();
+ u_int32_t flags = DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc->get(&lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // cout << "logc.get: " << count;
+ // show_dbt(cout, dbt);
+ // cout << "\n";
+ //
+ count++;
+ flags = DB_NEXT;
+ }
+ if (ret != DB_NOTFOUND) {
+ cerr << "*** FAIL: logc.get returned: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ logc->close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ cerr << "*** FAIL: not enough log records\n";
+
+ cout << "TestLogc done.\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "*** FAIL: " << dbe.what() <<"\n";
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestLogc.testout b/storage/bdb/test/scr015/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/storage/bdb/test/scr015/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/storage/bdb/test/scr015/TestSimpleAccess.cpp b/storage/bdb/test/scr015/TestSimpleAccess.cpp
new file mode 100644
index 00000000000..2450b9b3030
--- /dev/null
+++ b/storage/bdb/test/scr015/TestSimpleAccess.cpp
@@ -0,0 +1,67 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char *)"key", 4);
+ Dbt *datadbt = new Dbt((char *)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char *)"key", 4);
+ Dbt *badkeydbt = new Dbt((char *)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestSimpleAccess.testout b/storage/bdb/test/scr015/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/storage/bdb/test/scr015/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr015/TestTruncate.cpp b/storage/bdb/test/scr015/TestTruncate.cpp
new file mode 100644
index 00000000000..d5c0dc6de29
--- /dev/null
+++ b/storage/bdb/test/scr015/TestTruncate.cpp
@@ -0,0 +1,84 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.cpp,v 1.5 2002/01/23 14:26:41 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+#include <db_cxx.h>
+#include <iostream.h>
+
+int main(int argc, char *argv[])
+{
+ try {
+ Db *db = new Db(NULL, 0);
+ db->open(NULL, "my.db", NULL, DB_BTREE, DB_CREATE, 0644);
+
+ // populate our massive database.
+ // all our strings include null for convenience.
+ // Note we have to cast for idiomatic
+ // usage, since newer gcc requires it.
+ Dbt *keydbt = new Dbt((char*)"key", 4);
+ Dbt *datadbt = new Dbt((char*)"data", 5);
+ db->put(NULL, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt *goodkeydbt = new Dbt((char*)"key", 4);
+ Dbt *badkeydbt = new Dbt((char*)"badkey", 7);
+ Dbt *resultdbt = new Dbt();
+ resultdbt->set_flags(DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ cout << "get: " << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ if ((ret = db->get(NULL, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "get using bad key: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "*** got data using bad key!!: "
+ << result << "\n";
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ cout << "truncating data...\n";
+ u_int32_t nrecords;
+ db->truncate(NULL, &nrecords, 0);
+ cout << "truncate returns " << nrecords << "\n";
+ if ((ret = db->get(NULL, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ cout << "after truncate get: "
+ << DbEnv::strerror(ret) << "\n";
+ }
+ else {
+ char *result = (char *)resultdbt->get_data();
+ cout << "got data: " << result << "\n";
+ }
+
+ db->close(0);
+ cout << "finished test\n";
+ }
+ catch (DbException &dbe) {
+ cerr << "Db Exception: " << dbe.what();
+ }
+ return 0;
+}
diff --git a/storage/bdb/test/scr015/TestTruncate.testout b/storage/bdb/test/scr015/TestTruncate.testout
new file mode 100644
index 00000000000..0a4bc98165d
--- /dev/null
+++ b/storage/bdb/test/scr015/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after truncate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr015/chk.cxxtests b/storage/bdb/test/scr015/chk.cxxtests
new file mode 100644
index 00000000000..5c21e27208c
--- /dev/null
+++ b/storage/bdb/test/scr015/chk.cxxtests
@@ -0,0 +1,71 @@
+#!/bin/sh -
+#
+# $Id: chk.cxxtests,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Check to make sure that regression tests for C++ run.
+
+TEST_CXX_SRCDIR=../test/scr015 # must be a relative directory
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.cxxtests must be run from a local build directory.'
+ exit 1
+}
+[ -f libdb.a ] || make libdb.a || {
+ echo 'FAIL: unable to build libdb.a'
+ exit 1
+}
+[ -f libdb_cxx.a ] || make libdb_cxx.a || {
+ echo 'FAIL: unable to build libdb_cxx.a'
+ exit 1
+}
+CXX=`sed -e '/^CXX=/!d' -e 's/^CXX=//' -e 's/.*mode=compile *//' Makefile`
+echo " ====== cxx tests using $CXX"
+testnames=`cd $TEST_CXX_SRCDIR; ls *.cpp | sed -e 's/\.cpp$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_CXX_SRCDIR/ignore > /dev/null; then
+ echo " **** cxx test $testname ignored"
+ continue
+ fi
+
+ echo " ==== cxx test $testname"
+ rm -rf TESTCXX; mkdir TESTCXX
+ cd ./TESTCXX
+ testprefix=../$TEST_CXX_SRCDIR/$testname
+
+ ${CXX} ${CXXFLAGS} -o $testname $testprefix.cpp ${LIBS} > ../$testname.compileout 2>&1 || {
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ./$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTCXX
+exit 0
diff --git a/storage/bdb/test/scr015/ignore b/storage/bdb/test/scr015/ignore
new file mode 100644
index 00000000000..55ce82ae372
--- /dev/null
+++ b/storage/bdb/test/scr015/ignore
@@ -0,0 +1,4 @@
+#
+# $Id: ignore,v 1.3 2001/10/12 13:02:32 dda Exp $
+#
+# A list of tests to ignore
diff --git a/storage/bdb/test/scr015/testall b/storage/bdb/test/scr015/testall
new file mode 100644
index 00000000000..a2d493a8b22
--- /dev/null
+++ b/storage/bdb/test/scr015/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.3 2001/09/13 14:49:36 dda Exp $
+#
+# Run all the C++ regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.cpp -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.cpp$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** cxx test $name ignored"
+ else
+ echo " ==== cxx test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/storage/bdb/test/scr015/testone b/storage/bdb/test/scr015/testone
new file mode 100644
index 00000000000..3bbba3f90f0
--- /dev/null
+++ b/storage/bdb/test/scr015/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/07/05 22:17:59 dda Exp $
+#
+# Run just one C++ regression test, the single argument
+# is the basename of the test, e.g. TestRpcServer
+
+error()
+{
+ echo '' >&2
+ echo "C++ regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+gdbflag=n
+CXX=${CXX:-c++}
+LIBS=${LIBS:-}
+
+# remove any -c option in the CXXFLAGS
+CXXFLAGS="`echo " ${CXXFLAGS} " | sed -e 's/ -c //g'`"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ LIBS="-L$prefix/lib -ldb_cxx $LIBS"
+ CXXFLAGS="-I$prefix/include $CXXFLAGS"
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ --gdb )
+ CXXFLAGS="-g $CXXFLAGS"
+ gdbflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+
+${CXX} ${CXXFLAGS} -o $name ../$name.cpp ${LIBS} > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$gdbflag" = y ]; then
+ if [ -s $infile ]; then
+ echo "Input file is $infile"
+ fi
+ gdb ./$name
+ exit 0
+elif [ "$stdinflag" = y ]; then
+ ./$name >../$name.out 2>../$name.err
+else
+ ./$name <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/storage/bdb/test/scr016/CallbackTest.java b/storage/bdb/test/scr016/CallbackTest.java
new file mode 100644
index 00000000000..eede964a027
--- /dev/null
+++ b/storage/bdb/test/scr016/CallbackTest.java
@@ -0,0 +1,83 @@
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+
+public class CallbackTest
+{
+ public static void main(String args[])
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.set_bt_compare(new BtreeCompare());
+ db.open(null, "test.db", "", Db.DB_BTREE, Db.DB_CREATE, 0666);
+ StringDbt[] keys = new StringDbt[10];
+ StringDbt[] datas = new StringDbt[10];
+ for (int i = 0; i<10; i++) {
+ int val = (i * 3) % 10;
+ keys[i] = new StringDbt("key" + val);
+ datas[i] = new StringDbt("data" + val);
+ System.out.println("put " + val);
+ db.put(null, keys[i], datas[i], 0);
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("FAIL: " + dbe);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("FAIL: " + fnfe);
+ }
+
+ }
+
+
+}
+
+class BtreeCompare
+ implements DbBtreeCompare
+{
+ /* A weird comparator, for example.
+ * In fact, it may not be legal, since it's not monotonically increasing.
+ */
+ public int bt_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare function called");
+ byte b1[] = dbt1.get_data();
+ byte b2[] = dbt2.get_data();
+ System.out.println(" " + (new String(b1)) + ", " + (new String(b2)));
+ int len1 = b1.length;
+ int len2 = b2.length;
+ if (len1 != len2)
+ return (len1 < len2) ? 1 : -1;
+ int value = 1;
+ for (int i=0; i<len1; i++) {
+ if (b1[i] != b2[i])
+ return (b1[i] < b2[i]) ? value : -value;
+ value *= -1;
+ }
+ return 0;
+ }
+}
+
+class StringDbt extends Dbt
+{
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+}
diff --git a/storage/bdb/test/scr016/CallbackTest.testout b/storage/bdb/test/scr016/CallbackTest.testout
new file mode 100644
index 00000000000..68797d4a2de
--- /dev/null
+++ b/storage/bdb/test/scr016/CallbackTest.testout
@@ -0,0 +1,60 @@
+put 0
+put 3
+compare function called
+ key3, key0
+put 6
+compare function called
+ key6, key3
+put 9
+compare function called
+ key9, key6
+put 2
+compare function called
+ key2, key9
+compare function called
+ key2, key0
+compare function called
+ key2, key6
+compare function called
+ key2, key3
+compare function called
+ key2, key0
+put 5
+compare function called
+ key5, key3
+compare function called
+ key5, key9
+compare function called
+ key5, key6
+put 8
+compare function called
+ key8, key5
+compare function called
+ key8, key9
+compare function called
+ key8, key6
+put 1
+compare function called
+ key1, key9
+compare function called
+ key1, key0
+compare function called
+ key1, key5
+compare function called
+ key1, key2
+compare function called
+ key1, key0
+put 4
+compare function called
+ key4, key5
+compare function called
+ key4, key2
+compare function called
+ key4, key3
+put 7
+compare function called
+ key7, key4
+compare function called
+ key7, key8
+compare function called
+ key7, key6
diff --git a/storage/bdb/test/scr016/README b/storage/bdb/test/scr016/README
new file mode 100644
index 00000000000..226a8aa3b77
--- /dev/null
+++ b/storage/bdb/test/scr016/README
@@ -0,0 +1,37 @@
+# $Id: README,v 1.2 2001/05/31 23:09:10 dda Exp $
+
+Use the scripts testall or testone to run all, or just one of the Java
+tests. You must be in this directory to run them. For example,
+
+ $ export LD_LIBRARY_PATH=/usr/local/Berkeley3.3/lib
+ $ ./testone TestAppendRecno
+ $ ./testall
+
+The scripts will use javac and java in your path. Set environment
+variables $JAVAC and $JAVA to override this. It will also and honor
+any $CLASSPATH that is already set, prepending ../../../../classes to
+it, which is where the test .class files are put, and where the DB
+.class files can normally be found after a build on Unix and Windows.
+If none of these variables are set, everything will probably work
+with whatever java/javac is in your path.
+
+To run successfully, you will probably need to set $LD_LIBRARY_PATH
+to be the directory containing libdb_java-X.Y.so
+
+As an alternative, use the --prefix=<DIR> option, a la configure
+to set the top of the BerkeleyDB install directory. This forces
+the proper options to be added to $LD_LIBRARY_PATH.
+For example,
+
+ $ ./testone --prefix=/usr/include/BerkeleyDB TestAppendRecno
+ $ ./testall --prefix=/usr/include/BerkeleyDB
+
+The test framework is pretty simple. Any <name>.java file in this
+directory that is not mentioned in the 'ignore' file represents a
+test. If the test is not compiled successfully, the compiler output
+is left in <name>.compileout . Otherwise, the java program is run in
+a clean subdirectory using as input <name>.testin, or if that doesn't
+exist, /dev/null. Output and error from the test run are put into
+<name>.out, <name>.err . If <name>.testout, <name>.testerr exist,
+they are used as reference files and any differences are reported.
+If either of the reference files does not exist, /dev/null is used.
diff --git a/storage/bdb/test/scr016/TestAppendRecno.java b/storage/bdb/test/scr016/TestAppendRecno.java
new file mode 100644
index 00000000000..f4ea70ca084
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAppendRecno.java
@@ -0,0 +1,258 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAppendRecno.java,v 1.4 2002/08/16 19:35:53 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestAppendRecno
+ implements DbAppendRecno
+{
+ private static final String FileName = "access.db";
+ int callback_count = 0;
+ Db table = null;
+
+ public TestAppendRecno()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAppendRecno\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAppendRecno app = new TestAppendRecno();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAppendRecno: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAppendRecno: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestAppendRecno");
+ table.set_append_recno(this);
+
+ table.open(null, FileName, null, Db.DB_RECNO, Db.DB_CREATE, 0644);
+ for (int i=0; i<10; i++) {
+ System.out.println("\n*** Iteration " + i );
+ try {
+ RecnoDbt key = new RecnoDbt(77+i);
+ StringDbt data = new StringDbt("data" + i + "_xyz");
+ table.put(null, key, data, Db.DB_APPEND);
+ }
+ catch (DbException dbe) {
+ System.out.println("dbe: " + dbe);
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ RecnoDbt key = new RecnoDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getRecno() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ System.out.println("Test finished.");
+ }
+
+ public void db_append_recno(Db db, Dbt dbt, int recno)
+ throws DbException
+ {
+ int count = callback_count++;
+
+ System.out.println("====\ncallback #" + count);
+ System.out.println("db is table: " + (db == table));
+ System.out.println("recno = " + recno);
+
+ // This gives variable output.
+ //System.out.println("dbt = " + dbt);
+ if (dbt instanceof RecnoDbt) {
+ System.out.println("dbt = " +
+ ((RecnoDbt)dbt).getRecno());
+ }
+ else if (dbt instanceof StringDbt) {
+ System.out.println("dbt = " +
+ ((StringDbt)dbt).getString());
+ }
+ else {
+ // Note: the dbts are created out of whole
+ // cloth by Berkeley DB, not us!
+ System.out.println("internally created dbt: " +
+ new StringDbt(dbt) + ", size " +
+ dbt.get_size());
+ }
+
+ switch (count) {
+ case 0:
+ // nothing
+ break;
+
+ case 1:
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 2:
+ System.out.println("throwing...");
+ throw new DbException("append_recno thrown");
+ //not reached
+
+ case 3:
+ // Should result in an error (size unchanged).
+ dbt.set_offset(1);
+ break;
+
+ case 4:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 1);
+ break;
+
+ case 5:
+ dbt.set_offset(1);
+ dbt.set_size(dbt.get_size() - 2);
+ break;
+
+ case 6:
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(3);
+ break;
+
+ case 7:
+ // Should result in an error.
+ dbt.set_data(null);
+ break;
+
+ case 8:
+ // Should result in an error.
+ dbt.set_data(new String("abc").getBytes());
+ dbt.set_size(4);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+
+ // Here's an example of how you can extend a Dbt to store recno's.
+ //
+ static /*inner*/
+ class RecnoDbt extends Dbt
+ {
+ RecnoDbt()
+ {
+ this(0); // let other constructor do most of the work
+ }
+
+ RecnoDbt(int value)
+ {
+ set_flags(Db.DB_DBT_USERMEM); // do not allocate on retrieval
+ arr = new byte[4];
+ set_data(arr); // use our local array for data
+ set_ulen(4); // size of return storage
+ setRecno(value);
+ }
+
+ public String toString() /*override*/
+ {
+ return String.valueOf(getRecno());
+ }
+
+ void setRecno(int value)
+ {
+ set_recno_key_data(value);
+ set_size(arr.length);
+ }
+
+ int getRecno()
+ {
+ return get_recno_key_data();
+ }
+
+ byte arr[];
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt(Dbt dbt)
+ {
+ set_data(dbt.get_data());
+ set_size(dbt.get_size());
+ }
+
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString() /*override*/
+ {
+ return getString();
+ }
+ }
+}
+
diff --git a/storage/bdb/test/scr016/TestAppendRecno.testout b/storage/bdb/test/scr016/TestAppendRecno.testout
new file mode 100644
index 00000000000..970174e7a96
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAppendRecno.testout
@@ -0,0 +1,82 @@
+
+*** Iteration 0
+====
+callback #0
+db is table: true
+recno = 1
+internally created dbt: data0_xyz, size 9
+
+*** Iteration 1
+====
+callback #1
+db is table: true
+recno = 2
+internally created dbt: data1_xyz, size 9
+
+*** Iteration 2
+====
+callback #2
+db is table: true
+recno = 3
+internally created dbt: data2_xyz, size 9
+throwing...
+dbe: com.sleepycat.db.DbException: append_recno thrown
+
+*** Iteration 3
+====
+callback #3
+db is table: true
+recno = 3
+internally created dbt: data3_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 4
+====
+callback #4
+db is table: true
+recno = 3
+internally created dbt: data4_xyz, size 9
+
+*** Iteration 5
+====
+callback #5
+db is table: true
+recno = 4
+internally created dbt: data5_xyz, size 9
+
+*** Iteration 6
+====
+callback #6
+db is table: true
+recno = 5
+internally created dbt: data6_xyz, size 9
+
+*** Iteration 7
+====
+callback #7
+db is table: true
+recno = 6
+internally created dbt: data7_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.data is null
+
+*** Iteration 8
+====
+callback #8
+db is table: true
+recno = 6
+internally created dbt: data8_xyz, size 9
+dbe: com.sleepycat.db.DbException: Dbt.size + Dbt.offset greater than array length
+
+*** Iteration 9
+====
+callback #9
+db is table: true
+recno = 6
+internally created dbt: data9_xyz, size 9
+1 : data0_xyz
+2 : data1_xy
+3 : ata4_xyz
+4 : ata5_xy
+5 : abc
+6 : data9_xyz
+Test finished.
diff --git a/storage/bdb/test/scr016/TestAssociate.java b/storage/bdb/test/scr016/TestAssociate.java
new file mode 100644
index 00000000000..4105b9cb0a1
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAssociate.java
@@ -0,0 +1,333 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestAssociate.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+import java.util.Hashtable;
+
+public class TestAssociate
+ implements DbDupCompare
+{
+ private static final String FileName = "access.db";
+ public static Db saveddb1 = null;
+ public static Db saveddb2 = null;
+
+ public TestAssociate()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestAssociate\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestAssociate app = new TestAssociate();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestAssociate: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestAssociate: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ public static int counter = 0;
+ public static String results[] = { "abc", "def", "ghi", "JKL", "MNO", null };
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ /*
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ */
+ return results[counter++];
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ static public String shownull(Object o)
+ {
+ if (o == null)
+ return "null";
+ else
+ return "not null";
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open("./", Db.DB_CREATE|Db.DB_INIT_MPOOL, 0644);
+ (new java.io.File(FileName)).delete();
+ Db table = new Db(dbenv, 0);
+ Db table2 = new Db(dbenv, 0);
+ table2.set_dup_compare(this);
+ table2.set_flags(Db.DB_DUPSORT);
+ table.set_error_stream(System.err);
+ table2.set_error_stream(System.err);
+ table.set_errpfx("TestAssociate");
+ table2.set_errpfx("TestAssociate(table2)");
+ System.out.println("Primary database is " + shownull(table));
+ System.out.println("Secondary database is " + shownull(table2));
+ saveddb1 = table;
+ saveddb2 = table2;
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table2.open(null, FileName + "2", null,
+ Db.DB_BTREE, Db.DB_CREATE, 0644);
+ table.associate(null, table2, new Capitalize(), 0);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\ndef\njhi");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table2.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ StringDbt pkey = new StringDbt();
+
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+
+ key.setString("BC");
+ System.out.println("get BC returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget BC returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+ key.setString("KL");
+ System.out.println("get KL returns " + table2.get(null, key, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + data.getString());
+ System.out.println("pget KL returns " + table2.pget(null, key, pkey, data, 0));
+ System.out.println(" values: " + key.getString() + " : " + pkey.getString() + " : " + data.getString());
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+
+ public String toString()
+ {
+ return "StringDbt=" + getString();
+ }
+ }
+
+ /* creates a stupid secondary index as follows:
+ For an N letter key, we use N-1 letters starting at
+ position 1. If the new letters are already capitalized,
+ we return the old array, but with offset set to 1.
+ If the letters are not capitalized, we create a new,
+ capitalized array. This is pretty stupid for
+ an application, but it tests all the paths in the runtime.
+ */
+ public static class Capitalize implements DbSecondaryKeyCreate
+ {
+ public int secondary_key_create(Db secondary, Dbt key, Dbt value,
+ Dbt result)
+ throws DbException
+ {
+ String which = "unknown db";
+ if (saveddb1.equals(secondary)) {
+ which = "primary";
+ }
+ else if (saveddb2.equals(secondary)) {
+ which = "secondary";
+ }
+ System.out.println("secondary_key_create, Db: " + shownull(secondary) + "(" + which + "), key: " + show_dbt(key) + ", data: " + show_dbt(value));
+ int len = key.get_size();
+ byte[] arr = key.get_data();
+ boolean capped = true;
+
+ if (len < 1)
+ throw new DbException("bad key");
+
+ if (len < 2)
+ return Db.DB_DONOTINDEX;
+
+ result.set_size(len - 1);
+ for (int i=1; capped && i<len; i++) {
+ if (!Character.isUpperCase((char)arr[i]))
+ capped = false;
+ }
+ if (capped) {
+ System.out.println(" creating key(1): " + new String(arr, 1, len-1));
+ result.set_data(arr);
+ result.set_offset(1);
+ }
+ else {
+ System.out.println(" creating key(2): " + (new String(arr)).substring(1).
+ toUpperCase());
+ result.set_data((new String(arr)).substring(1).
+ toUpperCase().getBytes());
+ }
+ return 0;
+ }
+ }
+
+ public int dup_compare(Db db, Dbt dbt1, Dbt dbt2)
+ {
+ System.out.println("compare");
+ int sz1 = dbt1.get_size();
+ int sz2 = dbt2.get_size();
+ if (sz1 < sz2)
+ return -1;
+ if (sz1 > sz2)
+ return 1;
+ byte[] data1 = dbt1.get_data();
+ byte[] data2 = dbt2.get_data();
+ for (int i=0; i<sz1; i++)
+ if (data1[i] != data2[i])
+ return (data1[i] < data2[i] ? -1 : 1);
+ return 0;
+ }
+
+ public static int nseen = 0;
+ public static Hashtable ht = new Hashtable();
+
+ public static String show_dbt(Dbt dbt)
+ {
+ String name;
+
+ if (dbt == null)
+ return "null dbt";
+
+ name = (String)ht.get(dbt);
+ if (name == null) {
+ name = "Dbt" + (nseen++);
+ ht.put(dbt, name);
+ }
+
+ byte[] value = dbt.get_data();
+ if (value == null)
+ return name + "(null)";
+ else
+ return name + "(\"" + new String(value) + "\")";
+ }
+}
+
+
diff --git a/storage/bdb/test/scr016/TestAssociate.testout b/storage/bdb/test/scr016/TestAssociate.testout
new file mode 100644
index 00000000000..34414b660d1
--- /dev/null
+++ b/storage/bdb/test/scr016/TestAssociate.testout
@@ -0,0 +1,30 @@
+Primary database is not null
+Secondary database is not null
+secondary_key_create, Db: not null(secondary), key: Dbt0("abc"), data: Dbt1("cba")
+ creating key(2): BC
+
+secondary_key_create, Db: not null(secondary), key: Dbt2("def"), data: Dbt3("fed")
+ creating key(2): EF
+
+secondary_key_create, Db: not null(secondary), key: Dbt4("ghi"), data: Dbt5("ihg")
+ creating key(2): HI
+
+secondary_key_create, Db: not null(secondary), key: Dbt6("JKL"), data: Dbt7("LKJ")
+ creating key(1): KL
+
+secondary_key_create, Db: not null(secondary), key: Dbt8("MNO"), data: Dbt9("ONM")
+ creating key(1): NO
+
+BC : cba
+EF : fed
+HI : ihg
+KL : LKJ
+NO : ONM
+get BC returns 0
+ values: BC : cba
+pget BC returns 0
+ values: BC : abc : cba
+get KL returns 0
+ values: KL : LKJ
+pget KL returns 0
+ values: KL : JKL : LKJ
diff --git a/storage/bdb/test/scr016/TestClosedDb.java b/storage/bdb/test/scr016/TestClosedDb.java
new file mode 100644
index 00000000000..3bd6e5380f8
--- /dev/null
+++ b/storage/bdb/test/scr016/TestClosedDb.java
@@ -0,0 +1,62 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestClosedDb.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Close the Db, and make sure operations after that fail gracefully.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestClosedDb
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ // Close the db - subsequent operations should fail
+ // by throwing an exception.
+ db.close(0);
+ try {
+ db.get(null, goodkeydbt, resultdbt, 0);
+ System.out.println("Error - did not expect to get this far.");
+ }
+ catch (DbException dbe) {
+ System.out.println("Got expected Db Exception: " + dbe);
+ }
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestClosedDb.testout b/storage/bdb/test/scr016/TestClosedDb.testout
new file mode 100644
index 00000000000..ce13883f63a
--- /dev/null
+++ b/storage/bdb/test/scr016/TestClosedDb.testout
@@ -0,0 +1,2 @@
+Got expected Db Exception: com.sleepycat.db.DbException: null object: Invalid argument
+finished test
diff --git a/storage/bdb/test/scr016/TestConstruct01.java b/storage/bdb/test/scr016/TestConstruct01.java
new file mode 100644
index 00000000000..b60073ebc0d
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct01.java
@@ -0,0 +1,474 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct01.java,v 1.6 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct01
+{
+ public static final String CONSTRUCT01_DBNAME = "construct01.db";
+ public static final String CONSTRUCT01_DBDIR = "/tmp";
+ public static final String CONSTRUCT01_DBFULLPATH =
+ CONSTRUCT01_DBDIR + "/" + CONSTRUCT01_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ System.err.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ // For some reason on Windows, we need to open using the full pathname
+ // of the file when there is no environment, thus the 'has_env'
+ // variable.
+ //
+ void rundb(Db db, int count, boolean has_env, TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ String name;
+
+ if (has_env)
+ name = CONSTRUCT01_DBNAME;
+ else
+ name = CONSTRUCT01_DBFULLPATH;
+
+ db.set_error_stream(System.err);
+
+ // We don't really care about the pagesize, but we do want
+ // to make sure adjusting Db specific variables works before
+ // opening the db.
+ //
+ db.set_pagesize(1024);
+ db.open(null, name, null, Db.DB_BTREE,
+ (count != 0) ? 0 : Db.DB_CREATE, 0664);
+
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ //outbuf[i] = System.out.println((byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ /*
+ System.out.println("byte: " + ('0' + 0) + ", after: " +
+ (int)'0' + "=" + (int)('0' + 0) +
+ "," + (byte)outbuf[0]);
+ */
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ //DEBUGOUT("Put: " + (char)outbuf[0] + ": " + new String(outbuf));
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(options.dbt_alloc_flags);
+ readdata.set_flags(options.dbt_alloc_flags);
+
+ //DEBUGOUT("Dbc.get");
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ String key_string = new String(readkey.get_data());
+ String data_string = new String(readdata.get_data());
+ //DEBUGOUT("Got: " + key_string + ": " + data_string);
+ int len = key_string.length();
+ if (len <= 0 || key_string.charAt(len-1) != 'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len + " (" + key_string + ")" );
+ }
+ else if (!data_string.equals(key_string)) {
+ ERR("key/data don't match");
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ else {
+ bitmap |= bit;
+ expected &= ~(bit);
+ for (i=0; i<len; i++) {
+ if (key_string.charAt(i) != ('0' + i)) {
+ System.out.print(" got " + key_string
+ + " (" + (int)key_string.charAt(i)
+ + "), wanted " + i
+ + " (" + (int)('0' + i)
+ + ") at position " + i + "\n");
+ ERR("key is corrupt");
+ }
+ }
+ }
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " + expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ db.close(0);
+ }
+
+ void t1(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t2(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ // rundb(db, itemcount++, false, options);
+ }
+
+ void t3(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ // rundb(db, itemcount++, false, options);
+ db.set_errpfx("test3");
+ for (int i=0; i<100; i++)
+ db.set_errpfx("str" + i);
+ rundb(db, itemcount++, false, options);
+ }
+
+ void t4(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ /**/
+ //rundb(db, itemcount++, true, options);
+ db.set_errpfx("test4");
+ rundb(db, itemcount++, true, options);
+ /**/
+ env.close(0);
+ }
+
+ void t5(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ DbEnv env = new DbEnv(0);
+ env.open(CONSTRUCT01_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ Db db = new Db(env, 0);
+ // rundb(db, itemcount++, true, options);
+ db.set_errpfx("test5");
+ rundb(db, itemcount++, true, options);
+ /*
+ env.close(0);
+
+ // reopen the environment, don't recreate
+ env.open(CONSTRUCT01_DBDIR, Db.DB_INIT_MPOOL, 0);
+ // Note we cannot reuse the old Db!
+ */
+ Db anotherdb = new Db(env, 0);
+
+ // rundb(anotherdb, itemcount++, true, options);
+ anotherdb.set_errpfx("test5");
+ rundb(anotherdb, itemcount++, true, options);
+ env.close(0);
+ }
+
+ void t6(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+ db.close(0);
+ dbenv.close(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // By design, t7 leaves a db and dbenv open; it should be detected.
+ void t7(TestOptions options)
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(null, 0);
+ DbEnv dbenv = new DbEnv(0);
+
+ System.gc();
+ System.runFinalization();
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db)
+ {
+ {
+ if (use_db) {
+ try {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT01_DBFULLPATH, null, 0);
+ /**/
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT01_DBDIR, Db.DB_FORCE);
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ //expected error:
+ // System.err.println("error during remove: " + fnfe);
+ }
+ }
+ }
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, !use_db);
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+
+ boolean doall(TestOptions options)
+ {
+ itemcount = 0;
+ try {
+ removeall((options.testmask & 1) != 0);
+ for (int item=1; item<32; item++) {
+ if ((options.testmask & (1 << item)) != 0) {
+ VERBOSEOUT(" Running test " + item + ":");
+ switch (item) {
+ case 1:
+ t1(options);
+ break;
+ case 2:
+ t2(options);
+ break;
+ case 3:
+ t3(options);
+ break;
+ case 4:
+ t4(options);
+ break;
+ case 5:
+ t5(options);
+ break;
+ case 6:
+ t6(options);
+ break;
+ case 7:
+ t7(options);
+ break;
+ default:
+ ERR("unknown test case: " + item);
+ break;
+ }
+ VERBOSEOUT(" finished.\n");
+ }
+ }
+ removeall((options.testmask & 1) != 0);
+ options.successcounter++;
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+ int mask = 0x7f;
+
+ // Make sure the database file is removed before we start.
+ check_file_removed(CONSTRUCT01_DBFULLPATH, true, true);
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ if (arg.charAt(0) == '-') {
+ // keep on lower bit, which means to remove db between tests.
+ mask = 1;
+ for (int pos=1; pos<arg.length(); pos++) {
+ char ch = arg.charAt(pos);
+ if (ch >= '0' && ch <= '9') {
+ mask |= (1 << (ch - '0'));
+ }
+ else if (ch == 'v') {
+ verbose_flag = true;
+ }
+ else {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ VERBOSEOUT("mask = " + mask);
+
+ }
+ else {
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct01 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+ }
+
+ // Run GC before and after the test to give
+ // a baseline for any Java memory used.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+
+ TestConstruct01 con = new TestConstruct01();
+ int[] dbt_flags = { 0, Db.DB_DBT_MALLOC, Db.DB_DBT_REALLOC };
+ String[] dbt_flags_name = { "default", "malloc", "realloc" };
+
+ TestOptions options = new TestOptions();
+ options.testmask = mask;
+
+ for (int flagiter = 0; flagiter < dbt_flags.length; flagiter++) {
+ options.dbt_alloc_flags = dbt_flags[flagiter];
+
+ VERBOSEOUT("Running with DBT alloc flags: " +
+ dbt_flags_name[flagiter]);
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct01 running:");
+ if (!con.doall(options)) {
+ ERR("SOME TEST FAILED");
+ }
+ else {
+ VERBOSEOUT("\nTESTS SUCCESSFUL");
+ }
+
+ // We continually run GC during the test to keep
+ // the Java memory usage low. That way we can
+ // monitor the total memory usage externally
+ // (e.g. via ps) and verify that we aren't leaking
+ // memory in the JNI or DB layer.
+ //
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ }
+ }
+
+ if (options.successcounter == 600) {
+ System.out.println("ALL TESTS SUCCESSFUL");
+ }
+ else {
+ System.out.println("***FAIL: " + (600 - options.successcounter) +
+ " tests did not complete");
+ }
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+
+}
+
+class TestOptions
+{
+ int testmask = 0; // which tests to run
+ int dbt_alloc_flags = 0; // DB_DBT_* flags to use
+ int successcounter =0;
+}
+
diff --git a/storage/bdb/test/scr016/TestConstruct01.testerr b/storage/bdb/test/scr016/TestConstruct01.testerr
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct01.testerr
diff --git a/storage/bdb/test/scr016/TestConstruct01.testout b/storage/bdb/test/scr016/TestConstruct01.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct01.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/storage/bdb/test/scr016/TestConstruct02.java b/storage/bdb/test/scr016/TestConstruct02.java
new file mode 100644
index 00000000000..5bbb55ccd56
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct02.java
@@ -0,0 +1,326 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestConstruct02.java,v 1.6 2002/08/16 19:35:54 dda Exp $
+ */
+
+/*
+ * Do some regression tests for constructors.
+ * Run normally (without arguments) it is a simple regression test.
+ * Run with a numeric argument, it repeats the regression a number
+ * of times, to try to determine if there are memory leaks.
+ */
+
+package com.sleepycat.test;
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.IOException;
+import java.io.FileNotFoundException;
+
+public class TestConstruct02
+{
+ public static final String CONSTRUCT02_DBNAME = "construct02.db";
+ public static final String CONSTRUCT02_DBDIR = "./";
+ public static final String CONSTRUCT02_DBFULLPATH =
+ CONSTRUCT02_DBDIR + "/" + CONSTRUCT02_DBNAME;
+
+ private int itemcount; // count the number of items in the database
+ public static boolean verbose_flag = false;
+
+ private DbEnv dbenv = new DbEnv(0);
+
+ public TestConstruct02()
+ throws DbException, FileNotFoundException
+ {
+ dbenv.open(CONSTRUCT02_DBDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0666);
+ }
+
+ public void close()
+ {
+ try {
+ dbenv.close(0);
+ removeall(true, true);
+ }
+ catch (DbException dbe) {
+ ERR("DbException: " + dbe);
+ }
+ }
+
+ public static void ERR(String a)
+ {
+ System.out.println("FAIL: " + a);
+ sysexit(1);
+ }
+
+ public static void DEBUGOUT(String s)
+ {
+ System.out.println(s);
+ }
+
+ public static void VERBOSEOUT(String s)
+ {
+ if (verbose_flag)
+ System.out.println(s);
+ }
+
+ public static void sysexit(int code)
+ {
+ System.exit(code);
+ }
+
+ private static void check_file_removed(String name, boolean fatal,
+ boolean force_remove_first)
+ {
+ File f = new File(name);
+ if (force_remove_first) {
+ f.delete();
+ }
+ if (f.exists()) {
+ if (fatal)
+ System.out.print("FAIL: ");
+ System.out.print("File \"" + name + "\" still exists after run\n");
+ if (fatal)
+ sysexit(1);
+ }
+ }
+
+
+ // Check that key/data for 0 - count-1 are already present,
+ // and write a key/data for count. The key and data are
+ // both "0123...N" where N == count-1.
+ //
+ void rundb(Db db, int count)
+ throws DbException, FileNotFoundException
+ {
+ if (count >= 64)
+ throw new IllegalArgumentException("rundb count arg >= 64");
+
+ // The bit map of keys we've seen
+ long bitmap = 0;
+
+ // The bit map of keys we expect to see
+ long expected = (1 << (count+1)) - 1;
+
+ byte outbuf[] = new byte[count+1];
+ int i;
+ for (i=0; i<count; i++) {
+ outbuf[i] = (byte)('0' + i);
+ }
+ outbuf[i++] = (byte)'x';
+
+ Dbt key = new Dbt(outbuf, 0, i);
+ Dbt data = new Dbt(outbuf, 0, i);
+
+ db.put(null, key, data, Db.DB_NOOVERWRITE);
+
+ // Acquire a cursor for the table.
+ Dbc dbcp = db.cursor(null, 0);
+
+ // Walk through the table, checking
+ Dbt readkey = new Dbt();
+ Dbt readdata = new Dbt();
+ Dbt whoknows = new Dbt();
+
+ readkey.set_flags(Db.DB_DBT_MALLOC);
+ readdata.set_flags(Db.DB_DBT_MALLOC);
+
+ while (dbcp.get(readkey, readdata, Db.DB_NEXT) == 0) {
+ byte[] key_bytes = readkey.get_data();
+ byte[] data_bytes = readdata.get_data();
+
+ int len = key_bytes.length;
+ if (len != data_bytes.length) {
+ ERR("key and data are different");
+ }
+ for (i=0; i<len-1; i++) {
+ byte want = (byte)('0' + i);
+ if (key_bytes[i] != want || data_bytes[i] != want) {
+ System.out.println(" got " + new String(key_bytes) +
+ "/" + new String(data_bytes));
+ ERR("key or data is corrupt");
+ }
+ }
+ if (len <= 0 ||
+ key_bytes[len-1] != (byte)'x' ||
+ data_bytes[len-1] != (byte)'x') {
+ ERR("reread terminator is bad");
+ }
+ len--;
+ long bit = (1 << len);
+ if (len > count) {
+ ERR("reread length is bad: expect " + count + " got "+ len);
+ }
+ else if ((bitmap & bit) != 0) {
+ ERR("key already seen");
+ }
+ else if ((expected & bit) == 0) {
+ ERR("key was not expected");
+ }
+ bitmap |= bit;
+ expected &= ~(bit);
+ }
+ if (expected != 0) {
+ System.out.print(" expected more keys, bitmap is: " +
+ expected + "\n");
+ ERR("missing keys in database");
+ }
+ dbcp.close();
+ }
+
+ void t1()
+ throws DbException, FileNotFoundException
+ {
+ Db db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+
+ // Reopen no longer allowed, so we create a new db.
+ db = new Db(dbenv, 0);
+ db.set_error_stream(System.err);
+ db.set_pagesize(1024);
+ db.open(null, CONSTRUCT02_DBNAME, null, Db.DB_BTREE,
+ Db.DB_CREATE, 0664);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ rundb(db, itemcount++);
+ db.close(0);
+ }
+
+ // remove any existing environment or database
+ void removeall(boolean use_db, boolean remove_env)
+ {
+ {
+ try {
+ if (remove_env) {
+ DbEnv tmpenv = new DbEnv(0);
+ tmpenv.remove(CONSTRUCT02_DBDIR, Db.DB_FORCE);
+ }
+ else if (use_db) {
+ /**/
+ //memory leak for this:
+ Db tmpdb = new Db(null, 0);
+ tmpdb.remove(CONSTRUCT02_DBFULLPATH, null, 0);
+ /**/
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ catch (FileNotFoundException dbe) {
+ System.err.println("error during remove: " + dbe);
+ }
+ }
+ check_file_removed(CONSTRUCT02_DBFULLPATH, true, !use_db);
+ if (remove_env) {
+ for (int i=0; i<8; i++) {
+ String fname = "__db.00" + i;
+ check_file_removed(fname, true, !use_db);
+ }
+ }
+ }
+
+ boolean doall()
+ {
+ itemcount = 0;
+ try {
+ VERBOSEOUT(" Running test 1:\n");
+ t1();
+ VERBOSEOUT(" finished.\n");
+ removeall(true, false);
+ return true;
+ }
+ catch (DbException dbe) {
+ ERR("EXCEPTION RECEIVED: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ ERR("EXCEPTION RECEIVED: " + fnfe);
+ }
+ return false;
+ }
+
+ public static void main(String args[])
+ {
+ int iterations = 200;
+
+ for (int argcnt=0; argcnt<args.length; argcnt++) {
+ String arg = args[argcnt];
+ try {
+ iterations = Integer.parseInt(arg);
+ if (iterations < 0) {
+ ERR("Usage: construct02 [-testdigits] count");
+ }
+ }
+ catch (NumberFormatException nfe) {
+ ERR("EXCEPTION RECEIVED: " + nfe);
+ }
+ }
+
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+ long starttotal = Runtime.getRuntime().totalMemory();
+ long startfree = Runtime.getRuntime().freeMemory();
+ TestConstruct02 con = null;
+
+ try {
+ con = new TestConstruct02();
+ }
+ catch (DbException dbe) {
+ System.err.println("Exception: " + dbe);
+ System.exit(1);
+ }
+ catch (java.io.FileNotFoundException fnfe) {
+ System.err.println("Exception: " + fnfe);
+ System.exit(1);
+ }
+
+ for (int i=0; i<iterations; i++) {
+ if (iterations != 0) {
+ VERBOSEOUT("(" + i + "/" + iterations + ") ");
+ }
+ VERBOSEOUT("construct02 running:\n");
+ if (!con.doall()) {
+ ERR("SOME TEST FAILED");
+ }
+ System.gc();
+ System.runFinalization();
+ VERBOSEOUT("gc complete");
+
+ }
+ con.close();
+
+ System.out.print("ALL TESTS SUCCESSFUL\n");
+
+ long endtotal = Runtime.getRuntime().totalMemory();
+ long endfree = Runtime.getRuntime().freeMemory();
+
+ System.out.println("delta for total mem: " + magnitude(endtotal - starttotal));
+ System.out.println("delta for free mem: " + magnitude(endfree - startfree));
+
+ return;
+ }
+
+ static String magnitude(long value)
+ {
+ final long max = 10000000;
+ for (long scale = 10; scale <= max; scale *= 10) {
+ if (value < scale && value > -scale)
+ return "<" + scale;
+ }
+ return ">" + max;
+ }
+}
diff --git a/storage/bdb/test/scr016/TestConstruct02.testout b/storage/bdb/test/scr016/TestConstruct02.testout
new file mode 100644
index 00000000000..5d2041cd197
--- /dev/null
+++ b/storage/bdb/test/scr016/TestConstruct02.testout
@@ -0,0 +1,3 @@
+ALL TESTS SUCCESSFUL
+delta for total mem: <10
+delta for free mem: <10000
diff --git a/storage/bdb/test/scr016/TestDbtFlags.java b/storage/bdb/test/scr016/TestDbtFlags.java
new file mode 100644
index 00000000000..98527e6b3e7
--- /dev/null
+++ b/storage/bdb/test/scr016/TestDbtFlags.java
@@ -0,0 +1,241 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestDbtFlags.java,v 1.4 2002/08/16 19:35:54 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestDbtFlags
+{
+ private static final String FileName = "access.db";
+ private int flag_value;
+ private int buf_size;
+ private int cur_input_line = 0;
+
+ /*zippy quotes for test input*/
+ static final String[] input_lines = {
+ "If we shadows have offended",
+ "Think but this, and all is mended",
+ "That you have but slumber'd here",
+ "While these visions did appear",
+ "And this weak and idle theme",
+ "No more yielding but a dream",
+ "Gentles, do not reprehend",
+ "if you pardon, we will mend",
+ "And, as I am an honest Puck, if we have unearned luck",
+ "Now to 'scape the serpent's tongue, we will make amends ere long;",
+ "Else the Puck a liar call; so, good night unto you all.",
+ "Give me your hands, if we be friends, and Robin shall restore amends."
+ };
+
+ public TestDbtFlags(int flag_value, int buf_size)
+ {
+ this.flag_value = flag_value;
+ this.buf_size = buf_size;
+ }
+
+ public static void runWithFlags(int flag_value, int size)
+ {
+ String msg = "=-=-=-= Test with DBT flags " + flag_value +
+ " bufsize " + size;
+ System.out.println(msg);
+ System.err.println(msg);
+
+ try
+ {
+ TestDbtFlags app = new TestDbtFlags(flag_value, size);
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestDbtFlags: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestDbtFlags: " + fnfe.toString());
+ System.exit(1);
+ }
+ }
+
+ public static void main(String argv[])
+ {
+ runWithFlags(Db.DB_DBT_MALLOC, -1);
+ runWithFlags(Db.DB_DBT_REALLOC, -1);
+ runWithFlags(Db.DB_DBT_USERMEM, 20);
+ runWithFlags(Db.DB_DBT_USERMEM, 50);
+ runWithFlags(Db.DB_DBT_USERMEM, 200);
+ runWithFlags(0, -1);
+
+ System.exit(0);
+ }
+
+ String get_input_line()
+ {
+ if (cur_input_line >= input_lines.length)
+ return null;
+ return input_lines[cur_input_line++];
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestDbtFlags");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ for (;;) {
+ //System.err.println("input line " + cur_input_line);
+ String line = get_input_line();
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line, flag_value);
+ StringDbt data = new StringDbt(reversed, flag_value);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ key.check_flags();
+ data.check_flags();
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt(flag_value, buf_size);
+ StringDbt data = new StringDbt(flag_value, buf_size);
+
+ int iteration_count = 0;
+ int dbreturn = 0;
+
+ while (dbreturn == 0) {
+ //System.err.println("iteration " + iteration_count);
+ try {
+ if ((dbreturn = iterator.get(key, data, Db.DB_NEXT)) == 0) {
+ System.out.println(key.get_string() + " : " + data.get_string());
+ }
+ }
+ catch (DbMemoryException dme) {
+ /* In a real application, we'd normally increase
+ * the size of the buffer. Since we've created
+ * this error condition for testing, we'll just report it.
+ * We still need to skip over this record, and we don't
+ * want to mess with our original Dbt's, since we want
+ * to see more errors. So create some temporary
+ * mallocing Dbts to get this record.
+ */
+ System.err.println("exception, iteration " + iteration_count +
+ ": " + dme);
+ System.err.println(" key size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+ System.err.println(" data size: " + key.get_size() +
+ " ulen: " + key.get_ulen());
+
+ dme.get_dbt().set_size(buf_size);
+ StringDbt tempkey = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ StringDbt tempdata = new StringDbt(Db.DB_DBT_MALLOC, -1);
+ if ((dbreturn = iterator.get(tempkey, tempdata, Db.DB_NEXT)) != 0) {
+ System.err.println("cannot get expected next record");
+ return;
+ }
+ System.out.println(tempkey.get_string() + " : " +
+ tempdata.get_string());
+ }
+ iteration_count++;
+ }
+ key.check_flags();
+ data.check_flags();
+
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ int saved_flags;
+
+ StringDbt(int flags, int buf_size)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ if (buf_size != -1) {
+ set_data(new byte[buf_size]);
+ set_ulen(buf_size);
+ }
+ }
+
+ StringDbt(String value, int flags)
+ {
+ this.saved_flags = flags;
+ set_flags(saved_flags);
+ set_string(value);
+ }
+
+ void set_string(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ check_flags();
+ }
+
+ String get_string()
+ {
+ check_flags();
+ return new String(get_data(), 0, get_size());
+ }
+
+ void check_flags()
+ {
+ int actual_flags = get_flags();
+ if (actual_flags != saved_flags) {
+ System.err.println("flags botch: expected " + saved_flags +
+ ", got " + actual_flags);
+ }
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestDbtFlags.testerr b/storage/bdb/test/scr016/TestDbtFlags.testerr
new file mode 100644
index 00000000000..7666868ebd4
--- /dev/null
+++ b/storage/bdb/test/scr016/TestDbtFlags.testerr
@@ -0,0 +1,54 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+=-=-=-= Test with DBT flags 16 bufsize -1
+=-=-=-= Test with DBT flags 32 bufsize 20
+exception, iteration 0: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 20
+ data size: 53 ulen: 20
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 20
+ data size: 55 ulen: 20
+exception, iteration 3: Dbt not large enough for available data
+ key size: 25 ulen: 20
+ data size: 25 ulen: 20
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 20
+ data size: 69 ulen: 20
+exception, iteration 5: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+exception, iteration 6: Dbt not large enough for available data
+ key size: 28 ulen: 20
+ data size: 28 ulen: 20
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 20
+ data size: 65 ulen: 20
+exception, iteration 8: Dbt not large enough for available data
+ key size: 32 ulen: 20
+ data size: 32 ulen: 20
+exception, iteration 9: Dbt not large enough for available data
+ key size: 33 ulen: 20
+ data size: 33 ulen: 20
+exception, iteration 10: Dbt not large enough for available data
+ key size: 30 ulen: 20
+ data size: 30 ulen: 20
+exception, iteration 11: Dbt not large enough for available data
+ key size: 27 ulen: 20
+ data size: 27 ulen: 20
+=-=-=-= Test with DBT flags 32 bufsize 50
+exception, iteration 1: Dbt not large enough for available data
+ key size: 53 ulen: 50
+ data size: 53 ulen: 50
+exception, iteration 2: Dbt not large enough for available data
+ key size: 55 ulen: 50
+ data size: 55 ulen: 50
+exception, iteration 4: Dbt not large enough for available data
+ key size: 69 ulen: 50
+ data size: 69 ulen: 50
+exception, iteration 7: Dbt not large enough for available data
+ key size: 65 ulen: 50
+ data size: 65 ulen: 50
+=-=-=-= Test with DBT flags 32 bufsize 200
+=-=-=-= Test with DBT flags 0 bufsize -1
diff --git a/storage/bdb/test/scr016/TestDbtFlags.testout b/storage/bdb/test/scr016/TestDbtFlags.testout
new file mode 100644
index 00000000000..b8deb1bcc16
--- /dev/null
+++ b/storage/bdb/test/scr016/TestDbtFlags.testout
@@ -0,0 +1,78 @@
+=-=-=-= Test with DBT flags 4 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 16 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 20
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 50
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 32 bufsize 200
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
+=-=-=-= Test with DBT flags 0 bufsize -1
+And this weak and idle theme : emeht eldi dna kaew siht dnA
+And, as I am an honest Puck, if we have unearned luck : kcul denraenu evah ew fi ,kcuP tsenoh na ma I sa ,dnA
+Else the Puck a liar call; so, good night unto you all. : .lla uoy otnu thgin doog ,os ;llac rail a kcuP eht eslE
+Gentles, do not reprehend : dneherper ton od ,seltneG
+Give me your hands, if we be friends, and Robin shall restore amends. : .sdnema erotser llahs niboR dna ,sdneirf eb ew fi ,sdnah ruoy em eviG
+If we shadows have offended : dedneffo evah swodahs ew fI
+No more yielding but a dream : maerd a tub gnidleiy erom oN
+Now to 'scape the serpent's tongue, we will make amends ere long; : ;gnol ere sdnema ekam lliw ew ,eugnot s'tnepres eht epacs' ot woN
+That you have but slumber'd here : ereh d'rebmuls tub evah uoy tahT
+Think but this, and all is mended : dednem si lla dna ,siht tub knihT
+While these visions did appear : raeppa did snoisiv eseht elihW
+if you pardon, we will mend : dnem lliw ew ,nodrap uoy fi
diff --git a/storage/bdb/test/scr016/TestGetSetMethods.java b/storage/bdb/test/scr016/TestGetSetMethods.java
new file mode 100644
index 00000000000..a1b2722d8fd
--- /dev/null
+++ b/storage/bdb/test/scr016/TestGetSetMethods.java
@@ -0,0 +1,99 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 2000-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestGetSetMethods.java,v 1.3 2002/01/11 15:54:02 bostic Exp $
+ */
+
+/*
+ * Do some regression tests for simple get/set access methods
+ * on DbEnv, DbTxn, Db. We don't currently test that they have
+ * the desired effect, only that they operate and return correctly.
+ */
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestGetSetMethods
+{
+ public void testMethods()
+ throws DbException, FileNotFoundException
+ {
+ DbEnv dbenv = new DbEnv(0);
+ DbTxn dbtxn;
+ byte[][] conflicts = new byte[10][10];
+
+ dbenv.set_timeout(0x90000000,
+ Db.DB_SET_LOCK_TIMEOUT);
+ dbenv.set_lg_bsize(0x1000);
+ dbenv.set_lg_dir(".");
+ dbenv.set_lg_max(0x10000000);
+ dbenv.set_lg_regionmax(0x100000);
+ dbenv.set_lk_conflicts(conflicts);
+ dbenv.set_lk_detect(Db.DB_LOCK_DEFAULT);
+ // exists, but is deprecated:
+ // dbenv.set_lk_max(0);
+ dbenv.set_lk_max_lockers(100);
+ dbenv.set_lk_max_locks(10);
+ dbenv.set_lk_max_objects(1000);
+ dbenv.set_mp_mmapsize(0x10000);
+ dbenv.set_tas_spins(1000);
+
+ // Need to open the environment so we
+ // can get a transaction.
+ //
+ dbenv.open(".", Db.DB_CREATE | Db.DB_INIT_TXN |
+ Db.DB_INIT_LOCK | Db.DB_INIT_LOG |
+ Db.DB_INIT_MPOOL,
+ 0644);
+
+ dbtxn = dbenv.txn_begin(null, Db.DB_TXN_NOWAIT);
+ dbtxn.set_timeout(0xA0000000, Db.DB_SET_TXN_TIMEOUT);
+ dbtxn.abort();
+
+ dbenv.close(0);
+
+ // We get a db, one for each type.
+ // That's because once we call (for instance)
+ // set_bt_maxkey, DB 'knows' that this is a
+ // Btree Db, and it cannot be used to try Hash
+ // or Recno functions.
+ //
+ Db db_bt = new Db(null, 0);
+ db_bt.set_bt_maxkey(10000);
+ db_bt.set_bt_minkey(100);
+ db_bt.set_cachesize(0, 0x100000, 0);
+ db_bt.close(0);
+
+ Db db_h = new Db(null, 0);
+ db_h.set_h_ffactor(0x10);
+ db_h.set_h_nelem(100);
+ db_h.set_lorder(0);
+ db_h.set_pagesize(0x10000);
+ db_h.close(0);
+
+ Db db_re = new Db(null, 0);
+ db_re.set_re_delim('@');
+ db_re.set_re_pad(10);
+ db_re.set_re_source("re.in");
+ db_re.close(0);
+
+ Db db_q = new Db(null, 0);
+ db_q.set_q_extentsize(200);
+ db_q.close(0);
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ TestGetSetMethods tester = new TestGetSetMethods();
+ tester.testMethods();
+ }
+ catch (Exception e) {
+ System.err.println("TestGetSetMethods: Exception: " + e);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestKeyRange.java b/storage/bdb/test/scr016/TestKeyRange.java
new file mode 100644
index 00000000000..8eda2de426f
--- /dev/null
+++ b/storage/bdb/test/scr016/TestKeyRange.java
@@ -0,0 +1,203 @@
+/*NOTE: TestKeyRange is AccessExample changed to test Db.key_range.
+ * See comments with ADDED for specific areas of change.
+ */
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestKeyRange.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.StringReader;
+import java.io.Reader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestKeyRange
+{
+ private static final String FileName = "access.db";
+
+ public TestKeyRange()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestKeyRange\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestKeyRange app = new TestKeyRange();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestKeyRange: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestKeyRange: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestKeyRange");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader = new StringReader("abc\nmiddle\nzend\nmoremiddle\nZED\nMAMAMIA");
+
+ int count= 0;/*ADDED*/
+ for (;;) {
+ String line = askForLine(reader, System.out, "input>");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null, key, data, 0)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+
+ /*START ADDED*/
+ {
+ if (count++ > 0) {
+ DbKeyRange range = new DbKeyRange();
+ table.key_range(null, key, range, 0);
+ System.out.println("less: " + range.less);
+ System.out.println("equal: " + range.equal);
+ System.out.println("greater: " + range.greater);
+ }
+ }
+ /*END ADDED*/
+
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestKeyRange.testout b/storage/bdb/test/scr016/TestKeyRange.testout
new file mode 100644
index 00000000000..c265f3289fb
--- /dev/null
+++ b/storage/bdb/test/scr016/TestKeyRange.testout
@@ -0,0 +1,27 @@
+input>
+input>
+less: 0.5
+equal: 0.5
+greater: 0.0
+input>
+less: 0.6666666666666666
+equal: 0.3333333333333333
+greater: 0.0
+input>
+less: 0.5
+equal: 0.25
+greater: 0.25
+input>
+less: 0.0
+equal: 0.2
+greater: 0.8
+input>
+less: 0.0
+equal: 0.16666666666666666
+greater: 0.8333333333333334
+input>MAMAMIA : AIMAMAM
+ZED : DEZ
+abc : cba
+middle : elddim
+moremiddle : elddimerom
+zend : dnez
diff --git a/storage/bdb/test/scr016/TestLockVec.java b/storage/bdb/test/scr016/TestLockVec.java
new file mode 100644
index 00000000000..ad48e9f2f9a
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLockVec.java
@@ -0,0 +1,249 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLockVec.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * test of DbEnv.lock_vec()
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLockVec
+{
+ public static int locker1;
+ public static int locker2;
+
+ public static void gdb_pause()
+ {
+ try {
+ System.err.println("attach gdb and type return...");
+ System.in.read(new byte[10]);
+ }
+ catch (java.io.IOException ie) {
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv dbenv1 = new DbEnv(0);
+ DbEnv dbenv2 = new DbEnv(0);
+ dbenv1.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ dbenv2.open(".",
+ Db.DB_CREATE | Db.DB_INIT_LOCK | Db.DB_INIT_MPOOL, 0);
+ locker1 = dbenv1.lock_id();
+ locker2 = dbenv1.lock_id();
+ Db db1 = new Db(dbenv1, 0);
+ db1.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ Db db2 = new Db(dbenv2, 0);
+ db2.open(null, "my.db", null, Db.DB_BTREE, 0, 0);
+
+ // populate our database, just two elements.
+ Dbt Akey = new Dbt("A".getBytes());
+ Dbt Adata = new Dbt("Adata".getBytes());
+ Dbt Bkey = new Dbt("B".getBytes());
+ Dbt Bdata = new Dbt("Bdata".getBytes());
+
+ // We don't allow Dbts to be reused within the
+ // same method call, so we need some duplicates.
+ Dbt Akeyagain = new Dbt("A".getBytes());
+ Dbt Bkeyagain = new Dbt("B".getBytes());
+
+ db1.put(null, Akey, Adata, 0);
+ db1.put(null, Bkey, Bdata, 0);
+
+ Dbt notInDatabase = new Dbt("C".getBytes());
+
+ /* make sure our check mechanisms work */
+ int expectedErrs = 0;
+
+ lock_check_free(dbenv2, Akey);
+ try {
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ }
+ catch (DbException dbe1) {
+ expectedErrs += 1;
+ }
+ DbLock tmplock = dbenv1.lock_get(locker1, Db.DB_LOCK_NOWAIT,
+ Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ try {
+ lock_check_free(dbenv2, Akey);
+ }
+ catch (DbException dbe2) {
+ expectedErrs += 2;
+ }
+ if (expectedErrs != 1+2) {
+ System.err.println("lock check mechanism is broken");
+ System.exit(1);
+ }
+ dbenv1.lock_put(tmplock);
+
+ /* Now on with the test, a series of lock_vec requests,
+ * with checks between each call.
+ */
+
+ System.out.println("get a few");
+ /* Request: get A(W), B(R), B(R) */
+ DbLockRequest[] reqs = new DbLockRequest[3];
+
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkeyagain, null);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+
+ System.out.println("put a couple");
+ /* Request: put A, B(first) */
+ reqs[0].set_op(Db.DB_LOCK_PUT);
+ reqs[1].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 2);
+
+ /* Locks held: B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("put one more, test index offset");
+ /* Request: put B(second) */
+ reqs[2].set_op(Db.DB_LOCK_PUT);
+
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 2, 1);
+
+ /* Locks held: <none> */
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+
+ System.out.println("get a few");
+ /* Request: get A(R), A(R), B(R) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akey, null);
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Akeyagain, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 3);
+
+ /* Locks held: A(R), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_READ);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("try putobj");
+ /* Request: get B(R), putobj A */
+ reqs[1] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_READ,
+ Bkey, null);
+ reqs[2] = new DbLockRequest(Db.DB_LOCK_PUT_OBJ, 0,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 1, 2);
+
+ /* Locks held: B(R), B(R) */
+ lock_check_free(dbenv2, Akey);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("get one more");
+ /* Request: get A(W) */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_GET, Db.DB_LOCK_WRITE,
+ Akey, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ /* Locks held: A(W), B(R), B(R) */
+ lock_check_held(dbenv2, Akey, Db.DB_LOCK_WRITE);
+ lock_check_held(dbenv2, Bkey, Db.DB_LOCK_READ);
+
+ System.out.println("putall");
+ /* Request: putall */
+ reqs[0] = new DbLockRequest(Db.DB_LOCK_PUT_ALL, 0,
+ null, null);
+ dbenv1.lock_vec(locker1, Db.DB_LOCK_NOWAIT, reqs, 0, 1);
+
+ lock_check_free(dbenv2, Akey);
+ lock_check_free(dbenv2, Bkey);
+ db1.close(0);
+ dbenv1.close(0);
+ db2.close(0);
+ dbenv2.close(0);
+ System.out.println("done");
+ }
+ catch (DbLockNotGrantedException nge) {
+ System.err.println("Db Exception: " + nge);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ /* Verify that the lock is free, throw an exception if not.
+ * We do this by trying to grab a write lock (no wait).
+ */
+ static void lock_check_free(DbEnv dbenv, Dbt dbt)
+ throws DbException
+ {
+ DbLock tmplock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ dbenv.lock_put(tmplock);
+ }
+
+ /* Verify that the lock is held with the mode, throw an exception if not.
+ * If we have a write lock, we should not be able to get the lock
+ * for reading. If we have a read lock, we should be able to get
+ * it for reading, but not writing.
+ */
+ static void lock_check_held(DbEnv dbenv, Dbt dbt, int mode)
+ throws DbException
+ {
+ DbLock never = null;
+
+ try {
+ if (mode == Db.DB_LOCK_WRITE) {
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ }
+ else if (mode == Db.DB_LOCK_READ) {
+ DbLock rlock = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_READ);
+ dbenv.lock_put(rlock);
+ never = dbenv.lock_get(locker2, Db.DB_LOCK_NOWAIT,
+ dbt, Db.DB_LOCK_WRITE);
+ }
+ else {
+ throw new DbException("lock_check_held bad mode");
+ }
+ }
+ catch (DbLockNotGrantedException nge) {
+ /* We expect this on our last lock_get call */
+ }
+
+ /* make sure we failed */
+ if (never != null) {
+ try {
+ dbenv.lock_put(never);
+ }
+ catch (DbException dbe2) {
+ System.err.println("Got some real troubles now");
+ System.exit(1);
+ }
+ throw new DbException("lock_check_held: lock was not held");
+ }
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestLockVec.testout b/storage/bdb/test/scr016/TestLockVec.testout
new file mode 100644
index 00000000000..1cf16c6ac4e
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLockVec.testout
@@ -0,0 +1,8 @@
+get a few
+put a couple
+put one more, test index offset
+get a few
+try putobj
+get one more
+putall
+done
diff --git a/storage/bdb/test/scr016/TestLogc.java b/storage/bdb/test/scr016/TestLogc.java
new file mode 100644
index 00000000000..ec9c373a93b
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLogc.java
@@ -0,0 +1,100 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestLogc.java,v 1.7 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * A basic regression test for the Logc class.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestLogc
+{
+ public static void main(String[] args)
+ {
+ try {
+ DbEnv env = new DbEnv(0);
+ env.open(".", Db.DB_CREATE | Db.DB_INIT_LOG | Db.DB_INIT_MPOOL, 0);
+
+ // Do some database activity to get something into the log.
+ Db db1 = new Db(env, 0);
+ db1.open(null, "first.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+ db1.put(null, new Dbt("a".getBytes()), new Dbt("b".getBytes()), 0);
+ db1.put(null, new Dbt("c".getBytes()), new Dbt("d".getBytes()), 0);
+ db1.close(0);
+
+ Db db2 = new Db(env, 0);
+ db2.open(null, "second.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ db2.put(null, new Dbt("w".getBytes()), new Dbt("x".getBytes()), 0);
+ db2.put(null, new Dbt("y".getBytes()), new Dbt("z".getBytes()), 0);
+ db2.close(0);
+
+ // Now get a log cursor and walk through.
+ DbLogc logc = env.log_cursor(0);
+
+ int ret = 0;
+ DbLsn lsn = new DbLsn();
+ Dbt dbt = new Dbt();
+ int flags = Db.DB_FIRST;
+
+ int count = 0;
+ while ((ret = logc.get(lsn, dbt, flags)) == 0) {
+
+ // We ignore the contents of the log record,
+ // it's not portable. Even the exact count
+ // is may change when the underlying implementation
+ // changes, we'll just make sure at the end we saw
+ // 'enough'.
+ //
+ // System.out.println("logc.get: " + count);
+ // System.out.println(showDbt(dbt));
+ //
+ count++;
+ flags = Db.DB_NEXT;
+ }
+ if (ret != Db.DB_NOTFOUND) {
+ System.err.println("*** FAIL: logc.get returned: " +
+ DbEnv.strerror(ret));
+ }
+ logc.close(0);
+
+ // There has to be at *least* four log records,
+ // since we did four separate database operations.
+ //
+ if (count < 4)
+ System.out.println("*** FAIL: not enough log records");
+
+ System.out.println("TestLogc done.");
+ }
+ catch (DbException dbe) {
+ System.err.println("*** FAIL: Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("*** FAIL: FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+ public static String showDbt(Dbt dbt)
+ {
+ StringBuffer sb = new StringBuffer();
+ int size = dbt.get_size();
+ byte[] data = dbt.get_data();
+ int i;
+ for (i=0; i<size && i<10; i++) {
+ sb.append(Byte.toString(data[i]));
+ sb.append(' ');
+ }
+ if (i<size)
+ sb.append("...");
+ return "size: " + size + " data: " + sb.toString();
+ }
+}
diff --git a/storage/bdb/test/scr016/TestLogc.testout b/storage/bdb/test/scr016/TestLogc.testout
new file mode 100644
index 00000000000..afac3af7eda
--- /dev/null
+++ b/storage/bdb/test/scr016/TestLogc.testout
@@ -0,0 +1 @@
+TestLogc done.
diff --git a/storage/bdb/test/scr016/TestOpenEmpty.java b/storage/bdb/test/scr016/TestOpenEmpty.java
new file mode 100644
index 00000000000..ae92fd363d9
--- /dev/null
+++ b/storage/bdb/test/scr016/TestOpenEmpty.java
@@ -0,0 +1,189 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestOpenEmpty.java,v 1.4 2002/08/16 19:35:55 dda Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.InputStreamReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestOpenEmpty
+{
+ private static final String FileName = "access.db";
+
+ public TestOpenEmpty()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestOpenEmpty\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestOpenEmpty app = new TestOpenEmpty();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestOpenEmpty: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestOpenEmpty: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(InputStreamReader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(InputStreamReader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ try { (new java.io.FileOutputStream(FileName)).close(); }
+ catch (IOException ioe) { }
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(null, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestOpenEmpty");
+ table.open(null, FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ InputStreamReader reader = new InputStreamReader(System.in);
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestOpenEmpty.testerr b/storage/bdb/test/scr016/TestOpenEmpty.testerr
new file mode 100644
index 00000000000..dd3e01c7ab7
--- /dev/null
+++ b/storage/bdb/test/scr016/TestOpenEmpty.testerr
@@ -0,0 +1,2 @@
+TestOpenEmpty: access.db: unexpected file type or format
+TestOpenEmpty: com.sleepycat.db.DbException: Invalid argument: Invalid argument
diff --git a/storage/bdb/test/scr016/TestReplication.java b/storage/bdb/test/scr016/TestReplication.java
new file mode 100644
index 00000000000..87cb683d60f
--- /dev/null
+++ b/storage/bdb/test/scr016/TestReplication.java
@@ -0,0 +1,289 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestReplication.java,v 1.3 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test of replication, merely to exercise the individual
+ * methods in the API. Rather than use TCP/IP, our transport
+ * mechanism is just an ArrayList of byte arrays.
+ * It's managed like a queue, and synchronization is via
+ * the ArrayList object itself and java's wait/notify.
+ * It's not terribly extensible, but it's fine for a small test.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.Vector;
+
+public class TestReplication extends Thread
+ implements DbRepTransport
+{
+ public static final String MASTER_ENVDIR = "./master";
+ public static final String CLIENT_ENVDIR = "./client";
+
+ private Vector queue = new Vector();
+ private DbEnv master_env;
+ private DbEnv client_env;
+
+ private static void mkdir(String name)
+ throws IOException
+ {
+ (new File(name)).mkdir();
+ }
+
+
+ // The client thread runs this
+ public void run()
+ {
+ try {
+ System.err.println("c10");
+ client_env = new DbEnv(0);
+ System.err.println("c11");
+ client_env.set_rep_transport(1, this);
+ System.err.println("c12");
+ client_env.open(CLIENT_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0);
+ System.err.println("c13");
+ Dbt myid = new Dbt("master01".getBytes());
+ System.err.println("c14");
+ client_env.rep_start(myid, Db.DB_REP_CLIENT);
+ System.err.println("c15");
+ DbEnv.RepProcessMessage processMsg = new DbEnv.RepProcessMessage();
+ processMsg.envid = 2;
+ System.err.println("c20");
+ boolean running = true;
+
+ Dbt control = new Dbt();
+ Dbt rec = new Dbt();
+
+ while (running) {
+ int msgtype = 0;
+
+ System.err.println("c30");
+ synchronized (queue) {
+ if (queue.size() == 0) {
+ System.err.println("c40");
+ sleepShort();
+ }
+ else {
+ msgtype = ((Integer)queue.firstElement()).intValue();
+ queue.removeElementAt(0);
+ byte[] data;
+
+ System.err.println("c50 " + msgtype);
+
+ switch (msgtype) {
+ case -1:
+ running = false;
+ break;
+ case 1:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ control.set_data(data);
+ control.set_size(data.length);
+ break;
+ case 2:
+ control.set_data(null);
+ control.set_size(0);
+ break;
+ case 3:
+ data = (byte[])queue.firstElement();
+ queue.removeElementAt(0);
+ rec.set_data(data);
+ rec.set_size(data.length);
+ break;
+ case 4:
+ rec.set_data(null);
+ rec.set_size(0);
+ break;
+ }
+
+ }
+ }
+ System.err.println("c60");
+ if (msgtype == 3 || msgtype == 4) {
+ System.out.println("cLIENT: Got message");
+ client_env.rep_process_message(control, rec,
+ processMsg);
+ }
+ }
+ System.err.println("c70");
+ Db db = new Db(client_env, 0);
+ db.open(null, "x.db", null, Db.DB_BTREE, 0, 0);
+ Dbt data = new Dbt();
+ System.err.println("c80");
+ db.get(null, new Dbt("Hello".getBytes()), data, 0);
+ System.err.println("c90");
+ System.out.println("Hello " + new String(data.get_data(), data.get_offset(), data.get_size()));
+ System.err.println("c95");
+ client_env.close(0);
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ // Implements DbTransport
+ public int send(DbEnv env, Dbt control, Dbt rec, int flags, int envid)
+ throws DbException
+ {
+ System.out.println("Send to " + envid);
+ if (envid == 1) {
+ System.err.println("Unexpected envid = " + envid);
+ return 0;
+ }
+
+ int nbytes = 0;
+
+ synchronized (queue) {
+ System.out.println("Sending message");
+ byte[] data = control.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(1));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(2));
+ }
+
+ data = rec.get_data();
+ if (data != null && data.length > 0) {
+ queue.addElement(new Integer(3));
+ nbytes += data.length;
+ byte[] newdata = new byte[data.length];
+ System.arraycopy(data, 0, newdata, 0, data.length);
+ queue.addElement(newdata);
+ }
+ else
+ {
+ queue.addElement(new Integer(4));
+ }
+ System.out.println("MASTER: sent message");
+ }
+ return 0;
+ }
+
+ public void sleepShort()
+ {
+ try {
+ sleep(100);
+ }
+ catch (InterruptedException ie)
+ {
+ }
+ }
+
+ public void send_terminator()
+ {
+ synchronized (queue) {
+ queue.addElement(new Integer(-1));
+ }
+ }
+
+ public void master()
+ {
+ try {
+ master_env = new DbEnv(0);
+ master_env.set_rep_transport(2, this);
+ master_env.open(MASTER_ENVDIR, Db.DB_CREATE | Db.DB_INIT_MPOOL, 0644);
+ System.err.println("10");
+ Dbt myid = new Dbt("client01".getBytes());
+ master_env.rep_start(myid, Db.DB_REP_MASTER);
+ System.err.println("10");
+ Db db = new Db(master_env, 0);
+ System.err.println("20");
+ db.open(null, "x.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.err.println("30");
+ db.put(null, new Dbt("Hello".getBytes()),
+ new Dbt("world".getBytes()), 0);
+ System.err.println("40");
+ //DbEnv.RepElectResult electionResult = new DbEnv.RepElectResult();
+ //master_env.rep_elect(2, 2, 3, 4, electionResult);
+ db.close(0);
+ System.err.println("50");
+ master_env.close(0);
+ send_terminator();
+ }
+ catch (Exception e) {
+ System.err.println("client exception: " + e);
+ }
+ }
+
+ public static void main(String[] args)
+ {
+ // The test should only take a few milliseconds.
+ // give it 10 seconds before bailing out.
+ TimelimitThread t = new TimelimitThread(1000*10);
+ t.start();
+
+ try {
+ mkdir(CLIENT_ENVDIR);
+ mkdir(MASTER_ENVDIR);
+
+ TestReplication rep = new TestReplication();
+
+ // Run the client as a seperate thread.
+ rep.start();
+
+ // Run the master.
+ rep.master();
+
+ // Wait for the master to finish.
+ rep.join();
+ }
+ catch (Exception e)
+ {
+ System.err.println("Exception: " + e);
+ }
+ t.finished();
+ }
+
+}
+
+class TimelimitThread extends Thread
+{
+ long nmillis;
+ boolean finished = false;
+
+ TimelimitThread(long nmillis)
+ {
+ this.nmillis = nmillis;
+ }
+
+ public void finished()
+ {
+ finished = true;
+ }
+
+ public void run()
+ {
+ long targetTime = System.currentTimeMillis() + nmillis;
+ long curTime;
+
+ while (!finished &&
+ ((curTime = System.currentTimeMillis()) < targetTime)) {
+ long diff = targetTime - curTime;
+ if (diff > 100)
+ diff = 100;
+ try {
+ sleep(diff);
+ }
+ catch (InterruptedException ie) {
+ }
+ }
+ System.err.println("");
+ System.exit(1);
+ }
+}
diff --git a/storage/bdb/test/scr016/TestRpcServer.java b/storage/bdb/test/scr016/TestRpcServer.java
new file mode 100644
index 00000000000..ef325cef075
--- /dev/null
+++ b/storage/bdb/test/scr016/TestRpcServer.java
@@ -0,0 +1,193 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestRpcServer.java,v 1.3 2002/01/11 15:54:03 bostic Exp $
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.Reader;
+import java.io.StringReader;
+import java.io.IOException;
+import java.io.PrintStream;
+
+public class TestRpcServer
+{
+ private static final String FileName = "access.db";
+
+ public TestRpcServer()
+ {
+ }
+
+ private static void usage()
+ {
+ System.err.println("usage: TestRpcServer\n");
+ System.exit(1);
+ }
+
+ public static void main(String argv[])
+ {
+ try
+ {
+ TestRpcServer app = new TestRpcServer();
+ app.run();
+ }
+ catch (DbException dbe)
+ {
+ System.err.println("TestRpcServer: " + dbe.toString());
+ System.exit(1);
+ }
+ catch (FileNotFoundException fnfe)
+ {
+ System.err.println("TestRpcServer: " + fnfe.toString());
+ System.exit(1);
+ }
+ System.exit(0);
+ }
+
+ // Prompts for a line, and keeps prompting until a non blank
+ // line is returned. Returns null on error.
+ //
+ static public String askForLine(Reader reader,
+ PrintStream out, String prompt)
+ {
+ String result = "";
+ while (result != null && result.length() == 0) {
+ out.print(prompt);
+ out.flush();
+ result = getLine(reader);
+ }
+ return result;
+ }
+
+ // Not terribly efficient, but does the job.
+ // Works for reading a line from stdin or a file.
+ // Returns null on EOF. If EOF appears in the middle
+ // of a line, returns that line, then null on next call.
+ //
+ static public String getLine(Reader reader)
+ {
+ StringBuffer b = new StringBuffer();
+ int c;
+ try {
+ while ((c = reader.read()) != -1 && c != '\n') {
+ if (c != '\r')
+ b.append((char)c);
+ }
+ }
+ catch (IOException ioe) {
+ c = -1;
+ }
+
+ if (c == -1 && b.length() == 0)
+ return null;
+ else
+ return b.toString();
+ }
+
+ public void run()
+ throws DbException, FileNotFoundException
+ {
+ // Remove the previous database.
+ new File(FileName).delete();
+
+ DbEnv dbenv = new DbEnv(Db.DB_CLIENT);
+ dbenv.set_rpc_server(null, "localhost", 0, 0, 0);
+ dbenv.open(".", Db.DB_CREATE, 0644);
+ System.out.println("server connection set");
+
+ // Create the database object.
+ // There is no environment for this simple example.
+ Db table = new Db(dbenv, 0);
+ table.set_error_stream(System.err);
+ table.set_errpfx("TestRpcServer");
+ table.open(FileName, null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ //
+ // Insert records into the database, where the key is the user
+ // input and the data is the user input in reverse order.
+ //
+ Reader reader =
+ new StringReader("abc\nStuff\nmore Stuff\nlast line\n");
+
+ for (;;) {
+ String line = askForLine(reader, System.out, "input> ");
+ if (line == null)
+ break;
+
+ String reversed = (new StringBuffer(line)).reverse().toString();
+
+ // See definition of StringDbt below
+ //
+ StringDbt key = new StringDbt(line);
+ StringDbt data = new StringDbt(reversed);
+
+ try
+ {
+ int err;
+ if ((err = table.put(null,
+ key, data, Db.DB_NOOVERWRITE)) == Db.DB_KEYEXIST) {
+ System.out.println("Key " + line + " already exists.");
+ }
+ }
+ catch (DbException dbe)
+ {
+ System.out.println(dbe.toString());
+ }
+ System.out.println("");
+ }
+
+ // Acquire an iterator for the table.
+ Dbc iterator;
+ iterator = table.cursor(null, 0);
+
+ // Walk through the table, printing the key/data pairs.
+ // See class StringDbt defined below.
+ //
+ StringDbt key = new StringDbt();
+ StringDbt data = new StringDbt();
+ while (iterator.get(key, data, Db.DB_NEXT) == 0)
+ {
+ System.out.println(key.getString() + " : " + data.getString());
+ }
+ iterator.close();
+ table.close(0);
+ }
+
+ // Here's an example of how you can extend a Dbt in a straightforward
+ // way to allow easy storage/retrieval of strings, or whatever
+ // kind of data you wish. We've declared it as a static inner
+ // class, but it need not be.
+ //
+ static /*inner*/
+ class StringDbt extends Dbt
+ {
+ StringDbt()
+ {
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ StringDbt(String value)
+ {
+ setString(value);
+ set_flags(Db.DB_DBT_MALLOC); // tell Db to allocate on retrieval
+ }
+
+ void setString(String value)
+ {
+ set_data(value.getBytes());
+ set_size(value.length());
+ }
+
+ String getString()
+ {
+ return new String(get_data(), 0, get_size());
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestSameDbt.java b/storage/bdb/test/scr016/TestSameDbt.java
new file mode 100644
index 00000000000..9866ed49307
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSameDbt.java
@@ -0,0 +1,56 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSameDbt.java,v 1.4 2002/01/23 14:29:51 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSameDbt
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // try reusing the dbt
+ Dbt keydatadbt = new Dbt("stuff".getBytes());
+ int gotexcept = 0;
+
+ try {
+ db.put(null, keydatadbt, keydatadbt, 0);
+ }
+ catch (DbException dbe) {
+ System.out.println("got expected Db Exception: " + dbe);
+ gotexcept++;
+ }
+
+ if (gotexcept != 1) {
+ System.err.println("Missed exception");
+ System.out.println("** FAIL **");
+ }
+ else {
+ System.out.println("Test succeeded.");
+ }
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestSameDbt.testout b/storage/bdb/test/scr016/TestSameDbt.testout
new file mode 100644
index 00000000000..be4bbbe59e9
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSameDbt.testout
@@ -0,0 +1,2 @@
+got expected Db Exception: com.sleepycat.db.DbException: Dbt is already in use
+Test succeeded.
diff --git a/storage/bdb/test/scr016/TestSimpleAccess.java b/storage/bdb/test/scr016/TestSimpleAccess.java
new file mode 100644
index 00000000000..ba7390cada1
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSimpleAccess.java
@@ -0,0 +1,37 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestSimpleAccess.java,v 1.5 2002/08/16 19:35:55 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestSimpleAccess
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ TestUtil.populate(db);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestSimpleAccess.testout b/storage/bdb/test/scr016/TestSimpleAccess.testout
new file mode 100644
index 00000000000..dc88d4788e4
--- /dev/null
+++ b/storage/bdb/test/scr016/TestSimpleAccess.testout
@@ -0,0 +1,3 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr016/TestStat.java b/storage/bdb/test/scr016/TestStat.java
new file mode 100644
index 00000000000..55ba9823115
--- /dev/null
+++ b/storage/bdb/test/scr016/TestStat.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestStat.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestStat
+{
+ public static void main(String[] args)
+ {
+ int envflags =
+ Db.DB_INIT_MPOOL | Db.DB_INIT_LOCK |
+ Db.DB_INIT_LOG | Db.DB_INIT_TXN | Db.DB_CREATE;
+ try {
+ DbEnv dbenv = new DbEnv(0);
+ dbenv.open(".", envflags, 0);
+ Db db = new Db(dbenv, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0);
+
+ TestUtil.populate(db);
+ System.out.println("BtreeStat:");
+ DbBtreeStat stat = (DbBtreeStat)db.stat(0);
+ System.out.println(" bt_magic: " + stat.bt_magic);
+
+ System.out.println("LogStat:");
+ DbLogStat logstat = dbenv.log_stat(0);
+ System.out.println(" st_magic: " + logstat.st_magic);
+ System.out.println(" st_cur_file: " + logstat.st_cur_file);
+
+ System.out.println("RepStat:");
+ DbRepStat repstat = dbenv.rep_stat(0);
+ System.out.println(" st_status: " + repstat.st_status);
+ System.out.println(" st_log_duplication: " +
+ repstat.st_log_duplicated);
+
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestStat.testout b/storage/bdb/test/scr016/TestStat.testout
new file mode 100644
index 00000000000..caf9db1fb13
--- /dev/null
+++ b/storage/bdb/test/scr016/TestStat.testout
@@ -0,0 +1,11 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+BtreeStat:
+ bt_magic: 340322
+LogStat:
+ st_magic: 264584
+ st_cur_file: 1
+RepStat:
+ st_status: 0
+ st_log_duplication: 0
+finished test
diff --git a/storage/bdb/test/scr016/TestTruncate.java b/storage/bdb/test/scr016/TestTruncate.java
new file mode 100644
index 00000000000..71377236246
--- /dev/null
+++ b/storage/bdb/test/scr016/TestTruncate.java
@@ -0,0 +1,87 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestTruncate.java,v 1.4 2002/01/23 14:29:52 bostic Exp $
+ */
+
+/*
+ * Simple test for get/put of specific values.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestTruncate
+{
+ public static void main(String[] args)
+ {
+ try {
+ Db db = new Db(null, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+
+ // Now, truncate and make sure that it's really gone.
+ System.out.println("truncating data...");
+ int nrecords = db.truncate(null, 0);
+ System.out.println("truncate returns " + nrecords);
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("after trunctate get: " +
+ DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ db.close(0);
+ System.out.println("finished test");
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ }
+
+ }
+
+}
diff --git a/storage/bdb/test/scr016/TestTruncate.testout b/storage/bdb/test/scr016/TestTruncate.testout
new file mode 100644
index 00000000000..23f291df754
--- /dev/null
+++ b/storage/bdb/test/scr016/TestTruncate.testout
@@ -0,0 +1,6 @@
+got data: data
+get using bad key: DB_NOTFOUND: No matching key/data pair found
+truncating data...
+truncate returns 1
+after trunctate get: DB_NOTFOUND: No matching key/data pair found
+finished test
diff --git a/storage/bdb/test/scr016/TestUtil.java b/storage/bdb/test/scr016/TestUtil.java
new file mode 100644
index 00000000000..1bddfb0b014
--- /dev/null
+++ b/storage/bdb/test/scr016/TestUtil.java
@@ -0,0 +1,57 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997-2002
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestUtil.java,v 1.1 2002/08/16 19:35:56 dda Exp $
+ */
+
+/*
+ * Utilities used by many tests.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import java.io.FileNotFoundException;
+
+public class TestUtil
+{
+ public static void populate(Db db)
+ throws DbException
+ {
+ // populate our massive database.
+ Dbt keydbt = new Dbt("key".getBytes());
+ Dbt datadbt = new Dbt("data".getBytes());
+ db.put(null, keydbt, datadbt, 0);
+
+ // Now, retrieve. We could use keydbt over again,
+ // but that wouldn't be typical in an application.
+ Dbt goodkeydbt = new Dbt("key".getBytes());
+ Dbt badkeydbt = new Dbt("badkey".getBytes());
+ Dbt resultdbt = new Dbt();
+ resultdbt.set_flags(Db.DB_DBT_MALLOC);
+
+ int ret;
+
+ if ((ret = db.get(null, goodkeydbt, resultdbt, 0)) != 0) {
+ System.out.println("get: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("got data: " + result);
+ }
+
+ if ((ret = db.get(null, badkeydbt, resultdbt, 0)) != 0) {
+ // We expect this...
+ System.out.println("get using bad key: " + DbEnv.strerror(ret));
+ }
+ else {
+ String result =
+ new String(resultdbt.get_data(), 0, resultdbt.get_size());
+ System.out.println("*** got data using bad key!!: " + result);
+ }
+ }
+}
diff --git a/storage/bdb/test/scr016/TestXAServlet.java b/storage/bdb/test/scr016/TestXAServlet.java
new file mode 100644
index 00000000000..8b9fe57e261
--- /dev/null
+++ b/storage/bdb/test/scr016/TestXAServlet.java
@@ -0,0 +1,313 @@
+/*-
+ * See the file LICENSE for redistribution information.
+ *
+ * Copyright (c) 1997, 1998, 1999, 2000
+ * Sleepycat Software. All rights reserved.
+ *
+ * $Id: TestXAServlet.java,v 1.1 2002/04/24 03:26:33 dda Exp $
+ */
+
+/*
+ * Simple test of XA, using WebLogic.
+ */
+
+package com.sleepycat.test;
+
+import com.sleepycat.db.*;
+import com.sleepycat.db.xa.*;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.util.Hashtable;
+import javax.servlet.*;
+import javax.servlet.http.*;
+import javax.transaction.*;
+import javax.transaction.xa.*;
+import javax.naming.Context;
+import javax.naming.InitialContext;
+import javax.naming.NamingException;
+import weblogic.transaction.TxHelper;
+import weblogic.transaction.TransactionManager;
+
+public class TestXAServlet extends HttpServlet
+{
+ public static final String ENV_HOME = "TESTXADIR";
+ public static final String DEFAULT_URL = "t3://localhost:7001";
+ public static String filesep = System.getProperty("file.separator");
+
+ private static TransactionManager tm;
+ private static DbXAResource xaresource;
+ private static boolean initialized = false;
+
+ /**
+ * Utility to remove files recursively.
+ */
+ public static void removeRecursive(File f)
+ {
+ if (f.isDirectory()) {
+ String[] sub = f.list();
+ for (int i=0; i<sub.length; i++)
+ removeRecursive(new File(f.getName() + filesep + sub[i]));
+ }
+ f.delete();
+ }
+
+ /**
+ * Typically done only once, unless shutdown is invoked. This
+ * sets up directories, and removes any work files from previous
+ * runs. Also establishes a transaction manager that we'll use
+ * for various transactions. Each call opens/creates a new DB
+ * environment in our work directory.
+ */
+ public static synchronized void startup()
+ {
+ if (initialized)
+ return;
+
+ try {
+ File dir = new File(ENV_HOME);
+ removeRecursive(dir);
+ dir.mkdirs();
+
+ System.out.println("Getting context");
+ InitialContext ic = getInitialContext(DEFAULT_URL);
+ System.out.println("Creating XAResource");
+ xaresource = new DbXAResource(ENV_HOME, 77, 0);
+ System.out.println("Registering with transaction manager");
+ tm = TxHelper.getTransactionManager();
+ tm.registerStaticResource("DbXA", xaresource);
+ initialized = true;
+ }
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+ initialized = true;
+ }
+
+ /**
+ * Closes the XA resource manager.
+ */
+ public static synchronized void shutdown(PrintWriter out)
+ throws XAException
+ {
+ if (!initialized)
+ return;
+
+ out.println("Closing the resource.");
+ xaresource.close(0);
+ out.println("Shutdown complete.");
+ initialized = false;
+ }
+
+
+ /**
+ * Should be called once per chunk of major activity.
+ */
+ public void initialize()
+ {
+ startup();
+ }
+
+ private static int count = 1;
+ private static boolean debugInited = false;
+ private Xid bogusXid;
+
+ public static synchronized int incrCount()
+ {
+ return count++;
+ }
+
+ public void debugSetup(PrintWriter out)
+ throws ServletException, IOException
+ {
+ try {
+ Db.load_db();
+ }
+ catch (Exception e) {
+ out.println("got exception during load: " + e);
+ System.out.println("got exception during load: " + e);
+ }
+ out.println("The servlet has been restarted, and Berkeley DB is loaded");
+ out.println("<p>If you're debugging, you should now start the debugger and set breakpoints.");
+ }
+
+ public void doXATransaction(PrintWriter out, String key, String value,
+ String operation)
+ throws ServletException, IOException
+ {
+ try {
+ int counter = incrCount();
+ if (key == null || key.equals(""))
+ key = "key" + counter;
+ if (value == null || value.equals(""))
+ value = "value" + counter;
+
+ out.println("Adding (\"" + key + "\", \"" + value + "\")");
+
+ System.out.println("XA transaction begin");
+ tm.begin();
+ System.out.println("getting XA transaction");
+ DbXAResource.DbAttach attach = DbXAResource.xa_attach(null, null);
+ DbTxn txn = attach.get_txn();
+ DbEnv env = attach.get_env();
+ Db db = new Db(env, 0);
+ db.open(txn, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ System.out.println("DB put " + key);
+ db.put(txn,
+ new Dbt(key.getBytes()),
+ new Dbt(value.getBytes()),
+ 0);
+
+ if (operation.equals("rollback")) {
+ out.println("<p>ROLLBACK");
+ System.out.println("XA transaction rollback");
+ tm.rollback();
+ System.out.println("XA rollback returned");
+
+ // The old db is no good after the rollback
+ // since the open was part of the transaction.
+ // Get another db for the cursor dump
+ //
+ db = new Db(env, 0);
+ db.open(null, "my.db", null, Db.DB_BTREE, Db.DB_CREATE, 0644);
+ }
+ else {
+ out.println("<p>COMMITTED");
+ System.out.println("XA transaction commit");
+ tm.commit();
+ }
+
+ // Show the current state of the database.
+ Dbc dbc = db.cursor(null, 0);
+ Dbt gotkey = new Dbt();
+ Dbt gotdata = new Dbt();
+
+ out.println("<p>Current database values:");
+ while (dbc.get(gotkey, gotdata, Db.DB_NEXT) == 0) {
+ out.println("<br> " + getDbtString(gotkey) + " : "
+ + getDbtString(gotdata));
+ }
+ dbc.close();
+ db.close(0);
+ }
+ catch (DbException dbe) {
+ System.err.println("Db Exception: " + dbe);
+ out.println(" *** Exception received: " + dbe);
+ dbe.printStackTrace();
+ }
+ catch (FileNotFoundException fnfe) {
+ System.err.println("FileNotFoundException: " + fnfe);
+ out.println(" *** Exception received: " + fnfe);
+ fnfe.printStackTrace();
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ out.println(" *** Exception received: " + e);
+ e.printStackTrace();
+ }
+ }
+
+ private static Xid getBogusXid()
+ throws XAException
+ {
+ return new DbXid(1, "BOGUS_gtrid".getBytes(),
+ "BOGUS_bqual".getBytes());
+ }
+
+ private static String getDbtString(Dbt dbt)
+ {
+ return new String(dbt.get_data(), 0, dbt.get_size());
+ }
+
+ /**
+ * doGet is called as a result of invoking the servlet.
+ */
+ public void doGet(HttpServletRequest req, HttpServletResponse resp)
+ throws ServletException, IOException
+ {
+ try {
+ resp.setContentType("text/html");
+ PrintWriter out = resp.getWriter();
+
+ String key = req.getParameter("key");
+ String value = req.getParameter("value");
+ String operation = req.getParameter("operation");
+
+ out.println("<HTML>");
+ out.println("<HEAD>");
+ out.println("<TITLE>Berkeley DB with XA</TITLE>");
+ out.println("</HEAD><BODY>");
+ out.println("<a href=\"TestXAServlet" +
+ "\">Database put and commit</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=rollback" +
+ "\">Database put and rollback</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=close" +
+ "\">Close the XA resource manager</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=forget" +
+ "\">Forget an operation (bypasses TM)</a><br>");
+ out.println("<a href=\"TestXAServlet?operation=prepare" +
+ "\">Prepare an operation (bypasses TM)</a><br>");
+ out.println("<br>");
+
+ if (!debugInited) {
+ // Don't initialize XA yet, give the user
+ // a chance to attach a debugger if necessary.
+ debugSetup(out);
+ debugInited = true;
+ }
+ else {
+ initialize();
+ if (operation == null)
+ operation = "commit";
+
+ if (operation.equals("close")) {
+ shutdown(out);
+ }
+ else if (operation.equals("forget")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>FORGET");
+ System.out.println("XA forget bogus XID (bypass TM)");
+ xaresource.forget(getBogusXid());
+ }
+ else if (operation.equals("prepare")) {
+ // A bogus test, we just make sure the API is callable.
+ out.println("<p>PREPARE");
+ System.out.println("XA prepare bogus XID (bypass TM)");
+ xaresource.prepare(getBogusXid());
+ }
+ else {
+ // commit, rollback, prepare, forget
+ doXATransaction(out, key, value, operation);
+ }
+ }
+ out.println("</BODY></HTML>");
+
+ System.out.println("Finished.");
+ }
+ // Includes SystemException, NotSupportedException, RollbackException
+ catch (Exception e) {
+ System.err.println("Exception: " + e);
+ e.printStackTrace();
+ }
+
+ }
+
+
+ /**
+ * From weblogic's sample code:
+ * samples/examples/jta/jmsjdbc/Client.java
+ */
+ private static InitialContext getInitialContext(String url)
+ throws NamingException
+ {
+ Hashtable env = new Hashtable();
+ env.put(Context.INITIAL_CONTEXT_FACTORY,
+ "weblogic.jndi.WLInitialContextFactory");
+ env.put(Context.PROVIDER_URL, url);
+ return new InitialContext(env);
+ }
+
+}
diff --git a/storage/bdb/test/scr016/chk.javatests b/storage/bdb/test/scr016/chk.javatests
new file mode 100644
index 00000000000..34d7dfe78d7
--- /dev/null
+++ b/storage/bdb/test/scr016/chk.javatests
@@ -0,0 +1,79 @@
+#!/bin/sh -
+#
+# $Id: chk.javatests,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Check to make sure that regression tests for Java run.
+
+TEST_JAVA_SRCDIR=../test/scr016 # must be a relative directory
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="./classes:../db.jar"
+export LD_LIBRARY_PATH="../.libs"
+
+
+# All paths must be relative to a subdirectory of the build directory
+LIBS="-L.. -ldb -ldb_cxx"
+CXXFLAGS="-I.. -I../../dbinc"
+
+# Test must be run from a local build directory, not from a test
+# directory.
+cd ..
+[ -f db_config.h ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+[ -d ../docs_src ] || {
+ echo 'FAIL: chk.javatests must be run from a local build directory.'
+ exit 1
+}
+version=`sed -e 's/.* \([0-9]*\.[0-9]*\)\..*/\1/' -e q ../README `
+[ -f libdb_java-$version.la ] || make libdb_java-$version.la || {
+ echo "FAIL: unable to build libdb_java-$version.la"
+ exit 1
+}
+[ -f db.jar ] || make db.jar || {
+ echo 'FAIL: unable to build db.jar'
+ exit 1
+}
+testnames=`cd $TEST_JAVA_SRCDIR; ls *.java | sed -e 's/\.java$//'`
+
+for testname in $testnames; do
+ if grep -x $testname $TEST_JAVA_SRCDIR/ignore > /dev/null; then
+ echo " **** java test $testname ignored"
+ continue
+ fi
+
+ echo " ==== java test $testname"
+ rm -rf TESTJAVA; mkdir -p TESTJAVA/classes
+ cd ./TESTJAVA
+ testprefix=../$TEST_JAVA_SRCDIR/$testname
+ ${JAVAC} -d ./classes $testprefix.java ../$TEST_JAVA_SRCDIR/TestUtil.java > ../$testname.compileout 2>&1 || {
+pwd
+ echo "FAIL: compilation of $testname failed, see ../$testname.compileout"
+ exit 1
+ }
+ rm -f ../$testname.compileout
+ infile=$testprefix.testin
+ [ -f $infile ] || infile=/dev/null
+ goodoutfile=$testprefix.testout
+ [ -f $goodoutfile ] || goodoutfile=/dev/null
+ gooderrfile=$testprefix.testerr
+ [ -f $gooderrfile ] || gooderrfile=/dev/null
+ ${JAVA} com.sleepycat.test.$testname <$infile >../$testname.out 2>../$testname.err
+ cmp ../$testname.out $goodoutfile > /dev/null || {
+ echo "FAIL: $testname output differs: see ../$testname.out, $goodoutfile"
+ exit 1
+ }
+ cmp ../$testname.err $gooderrfile > /dev/null || {
+ echo "FAIL: $testname error differs: see ../$testname.err, $gooderrfile"
+ exit 1
+ }
+ cd ..
+ rm -f $testname.err $testname.out
+done
+rm -rf TESTJAVA
+exit 0
diff --git a/storage/bdb/test/scr016/ignore b/storage/bdb/test/scr016/ignore
new file mode 100644
index 00000000000..1dfaf6adea4
--- /dev/null
+++ b/storage/bdb/test/scr016/ignore
@@ -0,0 +1,22 @@
+#
+# $Id: ignore,v 1.4 2002/08/16 19:35:56 dda Exp $
+#
+# A list of tests to ignore
+
+# TestRpcServer is not debugged
+TestRpcServer
+
+# TestReplication is not debugged
+TestReplication
+
+# These are currently not working
+TestAppendRecno
+TestAssociate
+TestLogc
+TestConstruct02
+
+# TestUtil is used by the other tests, it does not stand on its own
+TestUtil
+
+# XA needs a special installation, it is not part of testall
+TestXAServlet
diff --git a/storage/bdb/test/scr016/testall b/storage/bdb/test/scr016/testall
new file mode 100644
index 00000000000..a4e1b5a8c70
--- /dev/null
+++ b/storage/bdb/test/scr016/testall
@@ -0,0 +1,32 @@
+#!/bin/sh -
+# $Id: testall,v 1.4 2001/09/13 14:49:37 dda Exp $
+#
+# Run all the Java regression tests
+
+ecode=0
+prefixarg=""
+stdinarg=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefixarg="$1"; shift;;
+ --stdin )
+ stdinarg="$1"; shift;;
+ * )
+ break
+ esac
+done
+files="`find . -name \*.java -print`"
+for file in $files; do
+ name=`echo $file | sed -e 's:^\./::' -e 's/\.java$//'`
+ if grep $name ignore > /dev/null; then
+ echo " **** java test $name ignored"
+ else
+ echo " ==== java test $name"
+ if ! sh ./testone $prefixarg $stdinarg $name; then
+ ecode=1
+ fi
+ fi
+done
+exit $ecode
diff --git a/storage/bdb/test/scr016/testone b/storage/bdb/test/scr016/testone
new file mode 100644
index 00000000000..5f5d2e0017d
--- /dev/null
+++ b/storage/bdb/test/scr016/testone
@@ -0,0 +1,122 @@
+#!/bin/sh -
+# $Id: testone,v 1.5 2002/08/16 19:35:56 dda Exp $
+#
+# Run just one Java regression test, the single argument
+# is the classname within this package.
+
+error()
+{
+ echo '' >&2
+ echo "Java regression error: $@" >&2
+ echo '' >&2
+ ecode=1
+}
+
+# compares the result against the good version,
+# reports differences, and removes the result file
+# if there are no differences.
+#
+compare_result()
+{
+ good="$1"
+ latest="$2"
+ if [ ! -e "$good" ]; then
+ echo "Note: $good does not exist"
+ return
+ fi
+ tmpout=/tmp/blddb$$.tmp
+ diff "$good" "$latest" > $tmpout
+ if [ -s $tmpout ]; then
+ nbad=`grep '^[0-9]' $tmpout | wc -l`
+ error "$good and $latest differ in $nbad places."
+ else
+ rm $latest
+ fi
+ rm -f $tmpout
+}
+
+ecode=0
+stdinflag=n
+JAVA=${JAVA:-java}
+JAVAC=${JAVAC:-javac}
+
+# classdir is relative to TESTDIR subdirectory
+classdir=./classes
+
+# CLASSPATH is used by javac and java.
+# We use CLASSPATH rather than the -classpath command line option
+# because the latter behaves differently from JDK1.1 and JDK1.2
+export CLASSPATH="$classdir:$CLASSPATH"
+
+# determine the prefix of the install tree
+prefix=""
+while :
+do
+ case "$1" in
+ --prefix=* )
+ prefix="`echo $1 | sed -e 's/--prefix=//'`"; shift
+ export LD_LIBRARY_PATH="$prefix/lib:$LD_LIBRARY_PATH"
+ export CLASSPATH="$prefix/lib/db.jar:$CLASSPATH"
+ ;;
+ --stdin )
+ stdinflag=y; shift
+ ;;
+ * )
+ break
+ ;;
+ esac
+done
+
+if [ "$#" = 0 ]; then
+ echo 'Usage: testone [ --prefix=<dir> | --stdin ] TestName'
+ exit 1
+fi
+name="$1"
+
+# class must be public
+if ! grep "public.*class.*$name" $name.java > /dev/null; then
+ error "public class $name is not declared in file $name.java"
+ exit 1
+fi
+
+# compile
+rm -rf TESTDIR; mkdir TESTDIR
+cd ./TESTDIR
+mkdir -p $classdir
+${JAVAC} -d $classdir ../$name.java ../TestUtil.java > ../$name.compileout 2>&1
+if [ $? != 0 -o -s ../$name.compileout ]; then
+ error "compilation of $name failed, see $name.compileout"
+ exit 1
+fi
+rm -f ../$name.compileout
+
+# find input and error file
+infile=../$name.testin
+if [ ! -f $infile ]; then
+ infile=/dev/null
+fi
+
+# run and diff results
+rm -rf TESTDIR
+if [ "$stdinflag" = y ]
+then
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS >../$name.out 2>../$name.err
+else
+ ${JAVA} com.sleepycat.test.$name $TEST_ARGS <$infile >../$name.out 2>../$name.err
+fi
+cd ..
+
+testerr=$name.testerr
+if [ ! -f $testerr ]; then
+ testerr=/dev/null
+fi
+
+testout=$name.testout
+if [ ! -f $testout ]; then
+ testout=/dev/null
+fi
+
+compare_result $testout $name.out
+compare_result $testerr $name.err
+rm -rf TESTDIR
+exit $ecode
diff --git a/storage/bdb/test/scr017/O.BH b/storage/bdb/test/scr017/O.BH
new file mode 100644
index 00000000000..cd499d38779
--- /dev/null
+++ b/storage/bdb/test/scr017/O.BH
@@ -0,0 +1,196 @@
+abc_10_efg
+abc_10_efg
+abc_11_efg
+abc_11_efg
+abc_12_efg
+abc_12_efg
+abc_13_efg
+abc_13_efg
+abc_14_efg
+abc_14_efg
+abc_15_efg
+abc_15_efg
+abc_16_efg
+abc_16_efg
+abc_17_efg
+abc_17_efg
+abc_18_efg
+abc_18_efg
+abc_19_efg
+abc_19_efg
+abc_1_efg
+abc_1_efg
+abc_20_efg
+abc_20_efg
+abc_21_efg
+abc_21_efg
+abc_22_efg
+abc_22_efg
+abc_23_efg
+abc_23_efg
+abc_24_efg
+abc_24_efg
+abc_25_efg
+abc_25_efg
+abc_26_efg
+abc_26_efg
+abc_27_efg
+abc_27_efg
+abc_28_efg
+abc_28_efg
+abc_29_efg
+abc_29_efg
+abc_2_efg
+abc_2_efg
+abc_30_efg
+abc_30_efg
+abc_31_efg
+abc_31_efg
+abc_32_efg
+abc_32_efg
+abc_33_efg
+abc_33_efg
+abc_34_efg
+abc_34_efg
+abc_36_efg
+abc_36_efg
+abc_37_efg
+abc_37_efg
+abc_38_efg
+abc_38_efg
+abc_39_efg
+abc_39_efg
+abc_3_efg
+abc_3_efg
+abc_40_efg
+abc_40_efg
+abc_41_efg
+abc_41_efg
+abc_42_efg
+abc_42_efg
+abc_43_efg
+abc_43_efg
+abc_44_efg
+abc_44_efg
+abc_45_efg
+abc_45_efg
+abc_46_efg
+abc_46_efg
+abc_47_efg
+abc_47_efg
+abc_48_efg
+abc_48_efg
+abc_49_efg
+abc_49_efg
+abc_4_efg
+abc_4_efg
+abc_50_efg
+abc_50_efg
+abc_51_efg
+abc_51_efg
+abc_52_efg
+abc_52_efg
+abc_53_efg
+abc_53_efg
+abc_54_efg
+abc_54_efg
+abc_55_efg
+abc_55_efg
+abc_56_efg
+abc_56_efg
+abc_57_efg
+abc_57_efg
+abc_58_efg
+abc_58_efg
+abc_59_efg
+abc_59_efg
+abc_5_efg
+abc_5_efg
+abc_60_efg
+abc_60_efg
+abc_61_efg
+abc_61_efg
+abc_62_efg
+abc_62_efg
+abc_63_efg
+abc_63_efg
+abc_64_efg
+abc_64_efg
+abc_65_efg
+abc_65_efg
+abc_66_efg
+abc_66_efg
+abc_67_efg
+abc_67_efg
+abc_68_efg
+abc_68_efg
+abc_69_efg
+abc_69_efg
+abc_6_efg
+abc_6_efg
+abc_70_efg
+abc_70_efg
+abc_71_efg
+abc_71_efg
+abc_72_efg
+abc_72_efg
+abc_73_efg
+abc_73_efg
+abc_74_efg
+abc_74_efg
+abc_75_efg
+abc_75_efg
+abc_76_efg
+abc_76_efg
+abc_77_efg
+abc_77_efg
+abc_78_efg
+abc_78_efg
+abc_79_efg
+abc_79_efg
+abc_7_efg
+abc_7_efg
+abc_80_efg
+abc_80_efg
+abc_81_efg
+abc_81_efg
+abc_82_efg
+abc_82_efg
+abc_83_efg
+abc_83_efg
+abc_84_efg
+abc_84_efg
+abc_85_efg
+abc_85_efg
+abc_86_efg
+abc_86_efg
+abc_87_efg
+abc_87_efg
+abc_88_efg
+abc_88_efg
+abc_89_efg
+abc_89_efg
+abc_8_efg
+abc_8_efg
+abc_90_efg
+abc_90_efg
+abc_91_efg
+abc_91_efg
+abc_92_efg
+abc_92_efg
+abc_93_efg
+abc_93_efg
+abc_94_efg
+abc_94_efg
+abc_95_efg
+abc_95_efg
+abc_96_efg
+abc_96_efg
+abc_97_efg
+abc_97_efg
+abc_98_efg
+abc_98_efg
+abc_99_efg
+abc_99_efg
+abc_9_efg
+abc_9_efg
diff --git a/storage/bdb/test/scr017/O.R b/storage/bdb/test/scr017/O.R
new file mode 100644
index 00000000000..d78a04727d8
--- /dev/null
+++ b/storage/bdb/test/scr017/O.R
@@ -0,0 +1,196 @@
+1
+abc_1_efg
+2
+abc_2_efg
+3
+abc_3_efg
+4
+abc_4_efg
+5
+abc_5_efg
+6
+abc_6_efg
+7
+abc_7_efg
+8
+abc_8_efg
+9
+abc_9_efg
+10
+abc_10_efg
+11
+abc_11_efg
+12
+abc_12_efg
+13
+abc_13_efg
+14
+abc_14_efg
+15
+abc_15_efg
+16
+abc_16_efg
+17
+abc_17_efg
+18
+abc_18_efg
+19
+abc_19_efg
+20
+abc_20_efg
+21
+abc_21_efg
+22
+abc_22_efg
+23
+abc_23_efg
+24
+abc_24_efg
+25
+abc_25_efg
+26
+abc_26_efg
+27
+abc_27_efg
+28
+abc_28_efg
+29
+abc_29_efg
+30
+abc_30_efg
+31
+abc_31_efg
+32
+abc_32_efg
+33
+abc_33_efg
+34
+abc_34_efg
+35
+abc_36_efg
+36
+abc_37_efg
+37
+abc_38_efg
+38
+abc_39_efg
+39
+abc_40_efg
+40
+abc_41_efg
+41
+abc_42_efg
+42
+abc_43_efg
+43
+abc_44_efg
+44
+abc_45_efg
+45
+abc_46_efg
+46
+abc_47_efg
+47
+abc_48_efg
+48
+abc_49_efg
+49
+abc_50_efg
+50
+abc_51_efg
+51
+abc_52_efg
+52
+abc_53_efg
+53
+abc_54_efg
+54
+abc_55_efg
+55
+abc_56_efg
+56
+abc_57_efg
+57
+abc_58_efg
+58
+abc_59_efg
+59
+abc_60_efg
+60
+abc_61_efg
+61
+abc_62_efg
+62
+abc_63_efg
+63
+abc_64_efg
+64
+abc_65_efg
+65
+abc_66_efg
+66
+abc_67_efg
+67
+abc_68_efg
+68
+abc_69_efg
+69
+abc_70_efg
+70
+abc_71_efg
+71
+abc_72_efg
+72
+abc_73_efg
+73
+abc_74_efg
+74
+abc_75_efg
+75
+abc_76_efg
+76
+abc_77_efg
+77
+abc_78_efg
+78
+abc_79_efg
+79
+abc_80_efg
+80
+abc_81_efg
+81
+abc_82_efg
+82
+abc_83_efg
+83
+abc_84_efg
+84
+abc_85_efg
+85
+abc_86_efg
+86
+abc_87_efg
+87
+abc_88_efg
+88
+abc_89_efg
+89
+abc_90_efg
+90
+abc_91_efg
+91
+abc_92_efg
+92
+abc_93_efg
+93
+abc_94_efg
+94
+abc_95_efg
+95
+abc_96_efg
+96
+abc_97_efg
+97
+abc_98_efg
+98
+abc_99_efg
diff --git a/storage/bdb/test/scr017/chk.db185 b/storage/bdb/test/scr017/chk.db185
new file mode 100644
index 00000000000..c2a07c51d26
--- /dev/null
+++ b/storage/bdb/test/scr017/chk.db185
@@ -0,0 +1,26 @@
+#!/bin/sh -
+#
+# $Id: chk.db185,v 1.2 2001/10/12 17:55:38 bostic Exp $
+#
+# Check to make sure we can run DB 1.85 code.
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr017/t.c b/storage/bdb/test/scr017/t.c
new file mode 100644
index 00000000000..f03b33880d6
--- /dev/null
+++ b/storage/bdb/test/scr017/t.c
@@ -0,0 +1,188 @@
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "db_185.h"
+
+void err(char *);
+int mycmp(const DBT *, const DBT *);
+void ops(DB *, int);
+
+int
+main()
+{
+ DB *dbp;
+ HASHINFO h_info;
+ BTREEINFO b_info;
+ RECNOINFO r_info;
+
+ printf("\tBtree...\n");
+ memset(&b_info, 0, sizeof(b_info));
+ b_info.flags = R_DUP;
+ b_info.cachesize = 100 * 1024;
+ b_info.psize = 512;
+ b_info.lorder = 4321;
+ b_info.compare = mycmp;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_BTREE, &b_info)) == NULL)
+ err("dbopen: btree");
+ ops(dbp, DB_BTREE);
+
+ printf("\tHash...\n");
+ memset(&h_info, 0, sizeof(h_info));
+ h_info.bsize = 512;
+ h_info.ffactor = 6;
+ h_info.nelem = 1000;
+ h_info.cachesize = 100 * 1024;
+ h_info.lorder = 1234;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_HASH, &h_info)) == NULL)
+ err("dbopen: hash");
+ ops(dbp, DB_HASH);
+
+ printf("\tRecno...\n");
+ memset(&r_info, 0, sizeof(r_info));
+ r_info.flags = R_FIXEDLEN;
+ r_info.cachesize = 100 * 1024;
+ r_info.psize = 1024;
+ r_info.reclen = 37;
+ (void)remove("a.db");
+ if ((dbp =
+ dbopen("a.db", O_CREAT | O_RDWR, 0664, DB_RECNO, &r_info)) == NULL)
+ err("dbopen: recno");
+ ops(dbp, DB_RECNO);
+
+ return (0);
+}
+
+int
+mycmp(a, b)
+ const DBT *a, *b;
+{
+ size_t len;
+ u_int8_t *p1, *p2;
+
+ len = a->size > b->size ? b->size : a->size;
+ for (p1 = a->data, p2 = b->data; len--; ++p1, ++p2)
+ if (*p1 != *p2)
+ return ((long)*p1 - (long)*p2);
+ return ((long)a->size - (long)b->size);
+}
+
+void
+ops(dbp, type)
+ DB *dbp;
+ int type;
+{
+ FILE *outfp;
+ DBT key, data;
+ recno_t recno;
+ int i, ret;
+ char buf[64];
+
+ memset(&key, 0, sizeof(key));
+ memset(&data, 0, sizeof(data));
+
+ for (i = 1; i < 100; ++i) { /* Test DB->put. */
+ sprintf(buf, "abc_%d_efg", i);
+ if (type == DB_RECNO) {
+ recno = i;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = data.data = buf;
+ key.size = data.size = strlen(buf);
+ }
+
+ data.data = buf;
+ data.size = strlen(buf);
+ if (dbp->put(dbp, &key, &data, 0))
+ err("DB->put");
+ }
+
+ if (type == DB_RECNO) { /* Test DB->get. */
+ recno = 97;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 97);
+ if (dbp->get(dbp, &key, &data, 0) != 0)
+ err("DB->get");
+ if (memcmp(data.data, buf, strlen(buf)))
+ err("DB->get: wrong data returned");
+
+ if (type == DB_RECNO) { /* Test DB->put no-overwrite. */
+ recno = 42;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ sprintf(buf, "abc_%d_efg", 42);
+ if (dbp->put(dbp, &key, &data, R_NOOVERWRITE) == 0)
+ err("DB->put: no-overwrite succeeded");
+
+ if (type == DB_RECNO) { /* Test DB->del. */
+ recno = 35;
+ key.data = &recno;
+ key.size = sizeof(recno);
+ } else {
+ sprintf(buf, "abc_%d_efg", 35);
+ key.data = buf;
+ key.size = strlen(buf);
+ }
+ if (dbp->del(dbp, &key, 0))
+ err("DB->del");
+
+ /* Test DB->seq. */
+ if ((outfp = fopen("output", "w")) == NULL)
+ err("fopen: output");
+ while ((ret = dbp->seq(dbp, &key, &data, R_NEXT)) == 0) {
+ if (type == DB_RECNO)
+ fprintf(outfp, "%d\n", *(int *)key.data);
+ else
+ fprintf(outfp,
+ "%.*s\n", (int)key.size, (char *)key.data);
+ fprintf(outfp, "%.*s\n", (int)data.size, (char *)data.data);
+ }
+ if (ret != 1)
+ err("DB->seq");
+ fclose(outfp);
+ switch (type) {
+ case DB_BTREE:
+ ret = system("cmp output O.BH");
+ break;
+ case DB_HASH:
+ ret = system("sort output | cmp - O.BH");
+ break;
+ case DB_RECNO:
+ ret = system("cmp output O.R");
+ break;
+ }
+ if (ret != 0)
+ err("output comparison failed");
+
+ if (dbp->sync(dbp, 0)) /* Test DB->sync. */
+ err("DB->sync");
+
+ if (dbp->close(dbp)) /* Test DB->close. */
+ err("DB->close");
+}
+
+void
+err(s)
+ char *s;
+{
+ fprintf(stderr, "\t%s: %s\n", s, strerror(errno));
+ exit (1);
+}
diff --git a/storage/bdb/test/scr018/chk.comma b/storage/bdb/test/scr018/chk.comma
new file mode 100644
index 00000000000..42df48d1881
--- /dev/null
+++ b/storage/bdb/test/scr018/chk.comma
@@ -0,0 +1,30 @@
+#!/bin/sh -
+#
+# $Id: chk.comma,v 1.1 2001/11/03 18:43:49 bostic Exp $
+#
+# Look for trailing commas in declarations. Some compilers can't handle:
+# enum {
+# foo,
+# bar,
+# };
+
+[ -f ../libdb.a ] || (cd .. && make libdb.a) || {
+ echo 'FAIL: unable to find or build libdb.a'
+ exit 1
+}
+
+if cc -g -Wall -I.. t.c ../libdb.a -o t; then
+ :
+else
+ echo "FAIL: unable to compile test program t.c"
+ exit 1
+fi
+
+if ./t ../../*/*.[ch] ../../*/*.in; then
+ :
+else
+ echo "FAIL: test program failed"
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr018/t.c b/storage/bdb/test/scr018/t.c
new file mode 100644
index 00000000000..4056a605928
--- /dev/null
+++ b/storage/bdb/test/scr018/t.c
@@ -0,0 +1,46 @@
+#include <sys/types.h>
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <strings.h>
+
+int
+chk(f)
+ char *f;
+{
+ int ch, l, r;
+
+ if (freopen(f, "r", stdin) == NULL) {
+ fprintf(stderr, "%s: %s\n", f, strerror(errno));
+ exit (1);
+ }
+ for (l = 1, r = 0; (ch = getchar()) != EOF;) {
+ if (ch != ',')
+ goto next;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '\n')
+ goto next;
+ ++l;
+ do { ch = getchar(); } while (isblank(ch));
+ if (ch != '}')
+ goto next;
+ r = 1;
+ printf("%s: line %d\n", f, l);
+
+next: if (ch == '\n')
+ ++l;
+ }
+ return (r);
+}
+
+int
+main(int argc, char *argv[])
+{
+ int r;
+
+ for (r = 0; *++argv != NULL;)
+ if (chk(*argv))
+ r = 1;
+ return (r);
+}
diff --git a/storage/bdb/test/scr019/chk.include b/storage/bdb/test/scr019/chk.include
new file mode 100644
index 00000000000..444217bedb4
--- /dev/null
+++ b/storage/bdb/test/scr019/chk.include
@@ -0,0 +1,40 @@
+#!/bin/sh -
+#
+# $Id: chk.include,v 1.3 2002/03/27 04:33:09 bostic Exp $
+#
+# Check for inclusion of files already included in db_int.h.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+egrep -- '#include[ ]' $d/dbinc/db_int.in |
+sed -e '/[ ]db\.h'/d \
+ -e 's/^#include.//' \
+ -e 's/[<>"]//g' \
+ -e 's/[ ].*//' > $t1
+
+for i in `cat $t1`; do
+ (cd $d && egrep "^#include[ ].*[<\"]$i[>\"]" */*.[ch])
+done |
+sed -e '/^build/d' \
+ -e '/^db_dump185/d' \
+ -e '/^examples_c/d' \
+ -e '/^libdb_java.*errno.h/d' \
+ -e '/^libdb_java.*java_util.h/d' \
+ -e '/^test_/d' \
+ -e '/^mutex\/tm.c/d' > $t2
+
+[ -s $t2 ] && {
+ echo 'FAIL: found extraneous includes in the source'
+ cat $t2
+ exit 1
+}
+exit 0
diff --git a/storage/bdb/test/scr020/chk.inc b/storage/bdb/test/scr020/chk.inc
new file mode 100644
index 00000000000..189126b10c3
--- /dev/null
+++ b/storage/bdb/test/scr020/chk.inc
@@ -0,0 +1,43 @@
+#!/bin/sh -
+#
+# $Id: chk.inc,v 1.1 2002/02/10 17:14:33 bostic Exp $
+#
+# Check for inclusion of db_config.h after "const" or other includes.
+
+d=../..
+
+# Test must be run from the top-level directory, not from a test directory.
+[ -f $d/LICENSE ] || {
+ echo 'FAIL: cannot find source distribution directory.'
+ exit 1
+}
+
+t1=__1
+t2=__2
+
+(cd $d && find . -name '*.[chys]' -o -name '*.cpp' |
+ xargs egrep -l '#include.*db_config.h') > $t1
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w 'db_config.h|const' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep const $t2 > /dev/null; then
+ echo 'FAIL: found const before include of db_config.h'
+ egrep const $t2
+ exit 1
+fi
+
+:> $t2
+for i in `cat $t1`; do
+ egrep -w '#include' /dev/null $d/$i | head -1 >> $t2
+done
+
+if egrep -v db_config.h $t2 > /dev/null; then
+ echo 'FAIL: found includes before include of db_config.h'
+ egrep -v db_config.h $t2
+ exit 1
+fi
+
+exit 0
diff --git a/storage/bdb/test/scr021/chk.flags b/storage/bdb/test/scr021/chk.flags
new file mode 100644
index 00000000000..1b2bb62cca7
--- /dev/null
+++ b/storage/bdb/test/scr021/chk.flags
@@ -0,0 +1,97 @@
+#!/bin/sh -
+#
+# $Id: chk.flags,v 1.8 2002/08/14 02:19:55 bostic Exp $
+#
+# Check flag name-spaces.
+
+d=../..
+
+t1=__1
+
+# Check for DB_ENV flags.
+(grep 'F_ISSET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbenv,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbenv,' $d/*/*.[chys]) |
+ sed -e '/DB_ENV_/d' -e '/F_SET([^ ]*dbenv, db_env_reset)/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_ENV_' $d/*/*.c |
+sed -e '/F_.*dbenv,/d' \
+ -e '/DB_ENV_TEST_RECOVERY(.*DB_TEST_/d' \
+ -e '/\/libdb_java\//d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DB flags.
+(grep 'F_ISSET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbp,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbp,' $d/*/*.[chys]) |
+ sed -e '/DB_AM_/d' \
+ -e '/db.c:.*F_SET.*F_ISSET(subdbp,/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DB_AM_' $d/*/*.c |
+sed -e '/F_.*dbp/d' \
+ -e '/"DB->open", dbp->flags, DB_AM_DUP,/d' \
+ -e '/"DB_NODUPDATA" behavior for databases with/d' \
+ -e '/If DB_AM_OPEN_CALLED is not set, then we/d' \
+ -e '/This was checked in set_flags when DB_AM_ENCRYPT/d' \
+ -e '/XA_ABORT, we can safely set DB_AM_RECOVER/d' \
+ -e '/ DB_AM_RECNUM\./d' \
+ -e '/ DB_AM_RECOVER set\./d' \
+ -e '/isdup = dbp->flags & DB_AM_DUP/d' \
+ -e '/otherwise we simply do/d' \
+ -e '/pginfo/d' \
+ -e '/setting DB_AM_RECOVER, we guarantee that we don/d' \
+ -e '/:[ {]*DB_AM_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for DBC flags.
+(grep 'F_ISSET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_SET([^ ]*dbc,' $d/*/*.[chys];
+ grep 'F_CLR([^ ]*dbc,' $d/*/*.[chys]) |
+ sed -e '/DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+grep 'DBC_' $d/*/*.c |
+sed -e '/F_.*dbc/d' \
+ -e '/DBC_INTERNAL/d' \
+ -e '/DBC_LOGGING/d' \
+ -e '/Do the actual get. Set DBC_TRANSIENT/d' \
+ -e '/If DBC_WRITEDUP is set, the cursor is an in/d' \
+ -e '/The DBC_TRANSIENT flag indicates that we/d' \
+ -e '/This function replaces the DBC_CONTINUE and DBC_KEYSET/d' \
+ -e '/db_cam.c:.*F_CLR(opd, DBC_ACTIVE);/d' \
+ -e '/{ DBC_/d' > $t1
+[ -s $t1 ] && {
+ cat $t1
+ exit 1
+}
+
+# Check for bad use of macros.
+egrep 'case .*F_SET\(|case .*F_CLR\(' $d/*/*.c > $t1
+egrep 'for .*F_SET\(|for .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'if .*F_SET\(|if .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'switch .*F_SET\(|switch .*F_CLR\(' $d/*/*.c >> $t1
+egrep 'while .*F_SET\(|while .*F_CLR\(' $d/*/*.c >> $t1
+[ -s $t1 ] && {
+ echo 'if statement followed by non-test macro'
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/scr022/chk.rr b/storage/bdb/test/scr022/chk.rr
new file mode 100644
index 00000000000..df230315299
--- /dev/null
+++ b/storage/bdb/test/scr022/chk.rr
@@ -0,0 +1,22 @@
+#!/bin/sh -
+#
+# $Id: chk.rr,v 1.1 2002/04/19 15:13:05 bostic Exp $
+
+d=../..
+
+t1=__1
+
+# Check for DB_RUNRECOVERY being specified instead of a call to db_panic.
+egrep DB_RUNRECOVERY $d/*/*.c |
+ sed -e '/common\/db_err.c:/d' \
+ -e '/libdb_java\/java_util.c:/d' \
+ -e '/db_dispatch.c:.*if (ret == DB_RUNRECOVERY/d' \
+ -e '/txn.c:.* \* DB_RUNRECOVERY and we need to/d' \
+ -e '/__db_panic(.*, DB_RUNRECOVERY)/d' > $t1
+[ -s $t1 ] && {
+ echo "DB_RUNRECOVERY used; should be a call to db_panic."
+ cat $t1
+ exit 1
+}
+
+exit 0
diff --git a/storage/bdb/test/sdb001.tcl b/storage/bdb/test/sdb001.tcl
new file mode 100644
index 00000000000..a03160e0ab7
--- /dev/null
+++ b/storage/bdb/test/sdb001.tcl
@@ -0,0 +1,156 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb001.tcl,v 11.18 2002/06/10 15:39:36 sue Exp $
+#
+# TEST subdb001 Tests mixing db and subdb operations
+# TEST Tests mixing db and subdb operations
+# TEST Create a db, add data, try to create a subdb.
+# TEST Test naming db and subdb with a leading - for correct parsing
+# TEST Existence check -- test use of -excl with subdbs
+# TEST
+# TEST Test non-subdb and subdb operations
+# TEST Test naming (filenames begin with -)
+# TEST Test existence (cannot create subdb of same name with -excl)
+proc subdb001 { method args } {
+ source ./include.tcl
+ global errorInfo
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping for method $method"
+ return
+ }
+ puts "Subdb001: $method ($args) subdb and non-subdb tests"
+
+ set testfile $testdir/subdb001.db
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb001 skipping for env $env"
+ return
+ }
+ # Create the database and open the dictionary
+ set subdb subdb0
+ cleanup $testdir NULL
+ puts "\tSubdb001.a: Non-subdb database and subdb operations"
+ #
+ # Create a db with no subdbs. Add some data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ puts "\tSubdb001.a.0: Create db, add data, close, try subdb"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+ while { [gets $did str] != -1 && $count < 5 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ error_check_good db_close [$db close] 0
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+ #
+ # Create a db with no subdbs. Add no data. Close. Try to
+ # open/add with a subdb. Should fail.
+ #
+ set testfile $testdir/subdb001a.db
+ puts "\tSubdb001.a.1: Create db, close, try subdb"
+ #
+ # !!!
+ # Using -truncate is illegal when opening for subdbs, but we
+ # can use it here because we are not using subdbs for this
+ # create.
+ #
+ set db [eval {berkdb_open -create -truncate -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ set ret [catch {eval {berkdb_open_noerr -create -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_bad dbopen $ret 0
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb001: skipping remainder of test for method $method"
+ return
+ }
+
+ #
+ # Test naming, db and subdb names beginning with -.
+ #
+ puts "\tSubdb001.b: Naming"
+ set cwd [pwd]
+ cd $testdir
+ set testfile1 -subdb001.db
+ set subdb -subdb
+ puts "\tSubdb001.b.0: Create db and subdb with -name, no --"
+ set ret [catch {eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile1 $subdb}} db]
+ error_check_bad dbopen $ret 0
+ puts "\tSubdb001.b.1: Create db and subdb with -name, with --"
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod -- $testfile1 $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ cd $cwd
+
+ #
+ # Create 1 db with 1 subdb. Try to create another subdb of
+ # the same name. Should fail.
+ #
+ puts "\tSubdb001.c: Truncate check"
+ set testfile $testdir/subdb001c.db
+ set subdb subdb
+ set stat [catch {eval {berkdb_open_noerr -create -truncate -mode 0644} \
+ $args {$omethod $testfile $subdb}} ret]
+ error_check_bad dbopen $stat 0
+ error_check_good trunc [is_substr $ret \
+ "illegal with multiple databases"] 1
+
+ puts "\tSubdb001.d: Existence check"
+ set testfile $testdir/subdb001d.db
+ set subdb subdb
+ set ret [catch {eval {berkdb_open -create -excl -mode 0644} $args \
+ {$omethod $testfile $subdb}} db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [catch {eval {berkdb_open_noerr -create -excl -mode 0644} \
+ $args {$omethod $testfile $subdb}} db1]
+ error_check_bad dbopen $ret 0
+ error_check_good db_close [$db close] 0
+
+ return
+}
diff --git a/storage/bdb/test/sdb002.tcl b/storage/bdb/test/sdb002.tcl
new file mode 100644
index 00000000000..4757e12afc7
--- /dev/null
+++ b/storage/bdb/test/sdb002.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb002.tcl,v 11.35 2002/08/23 18:01:53 sandstro Exp $
+#
+# TEST subdb002
+# TEST Tests basic subdb functionality
+# TEST Small keys, small data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST Then repeat using an environment.
+proc subdb002 { method {nentries 10000} args } {
+ global passwd
+
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ set env NULL
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb002 skipping for env $env"
+ return
+ }
+ set largs $args
+ subdb002_main $method $nentries $largs
+ append largs " -chksum "
+ subdb002_main $method $nentries $largs
+ append largs "-encryptaes $passwd "
+ subdb002_main $method $nentries $largs
+}
+
+proc subdb002_main { method nentries largs } {
+ source ./include.tcl
+ global encrypt
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+
+ env_cleanup $testdir
+
+ puts "Subdb002: $method ($largs) basic subdb tests"
+ set testfile $testdir/subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile NULL
+
+ # Run convert_encrypt so that old_encrypt will be reset to
+ # the proper value and cleanup will work.
+ convert_encrypt $largs
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ cleanup $testdir NULL
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_env berkdb_env_noerr
+ } else {
+ set sdb002_env berkdb_env
+ }
+ set env [eval {$sdb002_env -create -cachesize {0 10000000 0} \
+ -mode 0644 -txn} -home $testdir $encargs]
+ error_check_good env_open [is_valid_env $env] TRUE
+ puts "Subdb002: $method ($largs) basic subdb tests in an environment"
+
+ # We're in an env--use default path to database rather than specifying
+ # it explicitly.
+ set testfile subdb002.db
+ subdb002_body $method $omethod $nentries $largs $testfile $env
+ error_check_good env_close [$env close] 0
+}
+
+proc subdb002_body { method omethod nentries largs testfile env } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ # Create the database and open the dictionary
+ set subdb subdb0
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ if { [is_queue $omethod] == 1 } {
+ set sdb002_open berkdb_open_noerr
+ } else {
+ set sdb002_open berkdb_open
+ }
+
+ if { $env == "NULL" } {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {$omethod $testfile $subdb}} db]
+ } else {
+ set ret [catch {eval {$sdb002_open -create -mode 0644} $largs \
+ {-env $env $omethod $testfile $subdb}} db]
+ }
+
+ #
+ # If -queue method, we need to make sure that trying to
+ # create a subdb fails.
+ if { [is_queue $method] == 1 } {
+ error_check_bad dbopen $ret 0
+ puts "Subdb002: skipping remainder of test for method $method"
+ return
+ }
+
+ error_check_good dbopen $ret 0
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb002_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb002.check
+ }
+ puts "\tSubdb002.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tSubdb002.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdb002.d: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdb002.e: db_dump with subdatabase"
+ set outfile $testdir/subdb002.dump
+ set dumpargs " -f $outfile -s $subdb "
+ if { $encrypt > 0 } {
+ append dumpargs " -P $passwd "
+ }
+ if { $env != "NULL" } {
+ append dumpargs " -h $testdir "
+ }
+ append dumpargs " $testfile"
+ set stat [catch {eval {exec $util_path/db_dump} $dumpargs} ret]
+ error_check_good dbdump.subdb $stat 0
+}
+
+# Check function for Subdb002; keys and data are identical
+proc subdb002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb002_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdb003.tcl b/storage/bdb/test/sdb003.tcl
new file mode 100644
index 00000000000..5d1536d8c84
--- /dev/null
+++ b/storage/bdb/test/sdb003.tcl
@@ -0,0 +1,179 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb003.tcl,v 11.24 2002/06/10 15:39:37 sue Exp $
+#
+# TEST subdb003
+# TEST Tests many subdbs
+# TEST Creates many subdbs and puts a small amount of
+# TEST data in each (many defaults to 2000)
+# TEST
+# TEST Use the first 10,000 entries from the dictionary as subdbnames.
+# TEST Insert each with entry as name of subdatabase and a partial list
+# TEST as key/data. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+proc subdb003 { method {nentries 1000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb003: skipping for method $method"
+ return
+ }
+
+ puts "Subdb003: $method ($args) many subdb tests"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb003.db
+ set env NULL
+ } else {
+ set testfile subdb003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set fcount 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb003_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc subdb003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set ndataent 10
+ set fdid [open $dict]
+ while { [gets $fdid str] != -1 && $fcount < $nentries } {
+ set subdb $str
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $ndataent } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $str]]]
+ incr count
+ }
+ close $did
+ incr fcount
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $ndataent} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $ndataent $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next" $subdb
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ open_and_dump_subfile $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" $subdb
+
+ if { [is_record_based $method] != 1 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdb003:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ if { [expr $fcount % 100] == 0 } {
+ puts -nonewline "$fcount "
+ flush stdout
+ }
+ }
+ close $fdid
+ puts ""
+}
+
+# Check function for Subdb003; keys and data are identical
+proc subdb003.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdb003_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdb004.tcl b/storage/bdb/test/sdb004.tcl
new file mode 100644
index 00000000000..d3d95f1fde0
--- /dev/null
+++ b/storage/bdb/test/sdb004.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb004.tcl,v 11.22 2002/07/11 18:53:45 sandstro Exp $
+#
+# TEST subdb004
+# TEST Tests large subdb names
+# TEST subdb name = filecontents,
+# TEST key = filename, data = filecontents
+# TEST Put/get per key
+# TEST Dump file
+# TEST Dump subdbs, verify data and subdb name match
+# TEST
+# TEST Create 1 db with many large subdbs. Use the contents as subdb names.
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc subdb004 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb004: skipping for method $method"
+ return
+ }
+
+ puts "Subdb004: $method ($args) \
+ filecontents=subdbname filename=key filecontents=data pairs"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb004.db
+ set env NULL
+ } else {
+ set testfile subdb004.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdb004_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc subdb004.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ # Note that the subdatabase name is passed in as a char *, not
+ # in a DBT, so it may not contain nulls; use only source files.
+ set file_list [glob $src_root/*/*.c]
+ set fcount [llength $file_list]
+ if { $txnenv == 1 && $fcount > 100 } {
+ set file_list [lrange $file_list 0 99]
+ set fcount 100
+ }
+
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $fcount} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ }
+ puts "\tSubdb004.a: Set/Check each subdb"
+ foreach f $file_list {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ set subdb $data
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+
+ error_check_good Subdb004:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ # puts "\tSubdb004.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ }
+
+ #
+ # Now for each file, check that the subdb name is the same
+ # as the data in that subdb and that the filename is the key.
+ #
+ puts "\tSubdb004.b: Compare subdb names with key/data"
+ set db [eval {berkdb_open -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get -first] } { [llength $d] != 0 } \
+ {set d [$c get -next] } {
+ set subdbname [lindex [lindex $d 0] 0]
+ set subdb [eval {berkdb_open} $args {$testfile $subdbname}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Output the subdb name
+ set ofid [open $t3 w]
+ fconfigure $ofid -translation binary
+ if { [string compare "\0" \
+ [string range $subdbname end end]] == 0 } {
+ set slen [expr [string length $subdbname] - 2]
+ set subdbname [string range $subdbname 1 $slen]
+ }
+ puts -nonewline $ofid $subdbname
+ close $ofid
+
+ # Output the data
+ set subc [eval {$subdb cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $subc $subdb] TRUE
+ set d [$subc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set key [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+
+ set ofid [open $t1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $key $t1
+ $checkfunc $key $t3
+
+ error_check_good Subdb004:diff($t3,$t1) \
+ [filecmp $t3 $t1] 0
+ error_check_good curs_close [$subc close] 0
+ error_check_good db_close [$subdb close] 0
+ }
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_record_based $method] != 1 } {
+ fileremove $t2.tmp
+ }
+}
+
+# Check function for subdb004; key should be file name; data should be contents
+proc subdb004.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Subdb004:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc subdb004_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Subdb004:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/storage/bdb/test/sdb005.tcl b/storage/bdb/test/sdb005.tcl
new file mode 100644
index 00000000000..98cea5b348b
--- /dev/null
+++ b/storage/bdb/test/sdb005.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb005.tcl,v 11.18 2002/07/11 18:53:46 sandstro Exp $
+#
+# TEST subdb005
+# TEST Tests cursor operations in subdbs
+# TEST Put/get per key
+# TEST Verify cursor operations work within subdb
+# TEST Verify cursor operations do not work across subdbs
+# TEST
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb005 {method {nentries 100} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb005: skipping for method $method"
+ return
+ }
+
+ puts "Subdb005: $method ( $args ) subdb cursor operations test"
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb005.db
+ set env NULL
+ } else {
+ set testfile subdb005.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ if { $nentries == 100 } {
+ set nentries 20
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ cleanup $testdir $env
+ set txn ""
+ set psize 8192
+ set duplist {-1 -1 -1 -1 -1}
+ build_all_subdb \
+ $testfile [list $method] $psize $duplist $nentries $args
+ set numdb [llength $duplist]
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb005.a: Cursor ops - first/prev and last/next"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0} {$i < $numdb} {incr i} {
+ set db [eval {berkdb_open -unknown} $args {$testfile sub$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set db_handle($i) $db
+ # Used in 005.c test
+ lappend subdbnames sub$i.db
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set d [$dbc get -first]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+
+ # Used in 005.b test
+ set db_key($i) [lindex [lindex $d 0] 0]
+
+ set d [$dbc get -prev]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ set d [$dbc get -last]
+ error_check_good dbc_get [expr [llength $d] != 0] 1
+ set d [$dbc get -next]
+ error_check_good dbc_get [expr [llength $d] == 0] 1
+ error_check_good dbc_close [$dbc close] 0
+ }
+ #
+ # Get a key from each subdb and try to get this key in a
+ # different subdb. Make sure it fails
+ #
+ puts "\tSubdb005.b: Get keys in different subdb's"
+ for {set i 0} {$i < $numdb} {incr i} {
+ set n [expr $i + 1]
+ if {$n == $numdb} {
+ set n 0
+ }
+ set db $db_handle($i)
+ if { [is_record_based $method] == 1 } {
+ set d [eval {$db get -recno} $txn {$db_key($n)}]
+ error_check_good \
+ db_get [expr [llength $d] == 0] 1
+ } else {
+ set d [eval {$db get} $txn {$db_key($n)}]
+ error_check_good db_get [expr [llength $d] == 0] 1
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ #
+ # Clean up
+ #
+ for {set i 0} {$i < $numdb} {incr i} {
+ error_check_good db_close [$db_handle($i) close] 0
+ }
+
+ #
+ # Check contents of DB for subdb names only. Makes sure that
+ # every subdbname is there and that nothing else is there.
+ #
+ puts "\tSubdb005.c: Check DB is read-only"
+ error_check_bad dbopen [catch \
+ {berkdb_open_noerr -unknown $testfile} ret] 0
+
+ puts "\tSubdb005.d: Check contents of DB for subdb names only"
+ set db [eval {berkdb_open -unknown -rdonly} $envargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set subdblist [$db get -glob *]
+ foreach kd $subdblist {
+ # subname also used in subdb005.e,f below
+ set subname [lindex $kd 0]
+ set i [lsearch $subdbnames $subname]
+ error_check_good subdb_search [expr $i != -1] 1
+ set subdbnames [lreplace $subdbnames $i $i]
+ }
+ error_check_good subdb_done [llength $subdbnames] 0
+
+ error_check_good db_close [$db close] 0
+ return
+}
diff --git a/storage/bdb/test/sdb006.tcl b/storage/bdb/test/sdb006.tcl
new file mode 100644
index 00000000000..fd6066b08d6
--- /dev/null
+++ b/storage/bdb/test/sdb006.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb006.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
+#
+# TEST subdb006
+# TEST Tests intra-subdb join
+# TEST
+# TEST We'll test 2-way, 3-way, and 4-way joins and figure that if those work,
+# TEST everything else does as well. We'll create test databases called
+# TEST sub1.db, sub2.db, sub3.db, and sub4.db. The number on the database
+# TEST describes the duplication -- duplicates are of the form 0, N, 2N, 3N,
+# TEST ... where N is the number of the database. Primary.db is the primary
+# TEST database, and sub0.db is the database that has no matching duplicates.
+# TEST All of these are within a single database.
+#
+# We should test this on all btrees, all hash, and a combination thereof
+proc subdb006 {method {nentries 100} args } {
+ source ./include.tcl
+ global rand_init
+
+ # NB: these flags are internal only, ok
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "\tSubdb006 skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb006.db
+ set env NULL
+ } else {
+ set testfile subdb006.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $nentries == 100 } {
+ # !!!
+ # nentries must be greater than the number
+ # of do_join_subdb calls below.
+ #
+ set nentries 35
+ }
+ }
+ set testdir [get_home $env]
+ }
+ berkdb srand $rand_init
+
+ set oargs $args
+ foreach opt {" -dup" " -dupsort"} {
+ append args $opt
+
+ puts "Subdb006: $method ( $args ) Intra-subdb join"
+ set txn ""
+ #
+ # Get a cursor in each subdb and move past the end of each
+ # subdb. Make sure we don't end up in another subdb.
+ #
+ puts "\tSubdb006.a: Intra-subdb join"
+
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set psize 8192
+ set duplist {0 50 25 16 12}
+ set numdb [llength $duplist]
+ build_all_subdb $testfile [list $method] $psize \
+ $duplist $nentries $args
+
+ # Build the primary
+ puts "Subdb006: Building the primary database $method"
+ set oflags "-create -mode 0644 [conv $omethod \
+ [berkdb random_int 1 2]]"
+ set db [eval {berkdb_open} $oflags $oargs $testfile primary.db]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for { set i 0 } { $i < 1000 } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set key [format "%04d" $i]
+ set ret [eval {$db put} $txn {$key stub}]
+ error_check_good "primary put" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good "primary close" [$db close] 0
+ set did [open $dict]
+ gets $did str
+ do_join_subdb $testfile primary.db "1 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "1 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "2 3 4" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 4 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "0 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "3 2 0" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 2 1" $str $oargs
+ gets $did str
+ do_join_subdb $testfile primary.db "4 3 0 1" $str $oargs
+
+ close $did
+ }
+}
diff --git a/storage/bdb/test/sdb007.tcl b/storage/bdb/test/sdb007.tcl
new file mode 100644
index 00000000000..0f9488a92a1
--- /dev/null
+++ b/storage/bdb/test/sdb007.tcl
@@ -0,0 +1,132 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb007.tcl,v 11.20 2002/07/11 18:53:46 sandstro Exp $
+#
+# TEST subdb007
+# TEST Tests page size difference errors between subdbs.
+# TEST Test 3 different scenarios for page sizes.
+# TEST 1. Create/open with a default page size, 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 2. Create/open with specific page size, 2nd subdb create with
+# TEST different one, should error.
+# TEST 3. Create/open with specified page size, 2nd subdb create with
+# TEST same specified size, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb007 { method args } {
+ source ./include.tcl
+
+ set db2args [convert_args -btree $args]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb007: skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Subdb007: skipping for specific page sizes"
+ return
+ }
+
+ puts "Subdb007: $method ($args) subdb tests with different page sizes"
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb007.db
+ set env NULL
+ } else {
+ set testfile subdb007.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ append db2args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+ set txn ""
+
+ puts "\tSubdb007.a.0: create subdb with default page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ #
+ # Figure out what the default page size is so that we can
+ # guarantee we create it with a different value.
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+ error_check_good dbclose [$db close] 0
+
+ if { $pgsz == 512 } {
+ set pgsz2 2048
+ } else {
+ set pgsz2 512
+ }
+
+ puts "\tSubdb007.a.1: create 2nd subdb with specified page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz2 $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.b.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set newpgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set newpgsz [lindex $pair 1]
+ }
+ }
+ error_check_good pgsize $pgsz2 $newpgsz
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.b.1: create 2nd subdb with different page size"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-pagesize $pgsz $testfile $sub2}} ret]
+ error_check_good subdb:pgsz $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different pagesize specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb007.c.0: create subdb with specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb007.c.1: create 2nd subdb with same specified page size"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-pagesize $pgsz2 $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+}
diff --git a/storage/bdb/test/sdb008.tcl b/storage/bdb/test/sdb008.tcl
new file mode 100644
index 00000000000..1c46aed2087
--- /dev/null
+++ b/storage/bdb/test/sdb008.tcl
@@ -0,0 +1,121 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb008.tcl,v 11.25 2002/07/11 18:53:46 sandstro Exp $
+# TEST subdb008
+# TEST Tests lorder difference errors between subdbs.
+# TEST Test 3 different scenarios for lorder.
+# TEST 1. Create/open with specific lorder, 2nd subdb create with
+# TEST different one, should error.
+# TEST 2. Create/open with a default lorder 2nd subdb create with
+# TEST specified different one, should error.
+# TEST 3. Create/open with specified lorder, 2nd subdb create with
+# TEST same specified lorder, should succeed.
+# TEST (4th combo of using all defaults is a basic test, done elsewhere)
+proc subdb008 { method args } {
+ source ./include.tcl
+
+ set db2args [convert_args -btree $args]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb008: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb008.db
+ set env NULL
+ } else {
+ set testfile subdb008.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append db2args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ puts "Subdb008: $method ($args) subdb tests with different lorders"
+
+ set sub1 "sub1"
+ set sub2 "sub2"
+ cleanup $testdir $env
+
+ puts "\tSubdb008.b.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 4321 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ # Figure out what the default lorder is so that we can
+ # guarantee we create it with a different value later.
+ set is_swap [$db is_byteswapped]
+ if { $is_swap } {
+ set other 4321
+ } else {
+ set other 1234
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.b.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 1234 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.c.0: create subdb with opposite specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder 1234 $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.c.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create $omethod} \
+ $args {-lorder 4321 $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.d.0: create subdb with default lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.d.1: create 2nd subdb with different lorder"
+ set stat [catch {eval {berkdb_open_noerr -create -btree} \
+ $db2args {-lorder $other $testfile $sub2}} ret]
+ error_check_good subdb:lorder $stat 1
+ error_check_good subdb:fail [is_substr $ret \
+ "Different lorder specified"] 1
+
+ set ret [eval {berkdb dbremove} $envargs {$testfile}]
+
+ puts "\tSubdb008.e.0: create subdb with specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub1}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSubdb008.e.1: create 2nd subdb with same specified lorder"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {-lorder $other $omethod $testfile $sub2}]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+}
diff --git a/storage/bdb/test/sdb009.tcl b/storage/bdb/test/sdb009.tcl
new file mode 100644
index 00000000000..4e4869643ef
--- /dev/null
+++ b/storage/bdb/test/sdb009.tcl
@@ -0,0 +1,108 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb009.tcl,v 11.9 2002/07/11 18:53:46 sandstro Exp $
+#
+# TEST subdb009
+# TEST Test DB->rename() method for subdbs
+proc subdb009 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Subdb009: $method ($args): Test of DB->rename()"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb009: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb009.db
+ set env NULL
+ } else {
+ set testfile subdb009.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set oldsdb OLDDB
+ set newsdb NEWDB
+
+ # Make sure we're starting from a clean slate.
+ cleanup $testdir $env
+ error_check_bad "$testfile exists" [file exists $testfile] 1
+
+ puts "\tSubdb009.a: Create/rename file"
+ puts "\t\tSubdb009.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644}\
+ $omethod $args {$testfile $oldsdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set txn ""
+ set key 1
+ set data [pad_data $method data]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key $data}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tSubdb009.a.2: rename"
+ error_check_good rename_file [eval {berkdb dbrename} $envargs \
+ {$testfile $oldsdb $newsdb}] 0
+
+ puts "\t\tSubdb009.a.3: check"
+ # Open again with create to make sure we've really completely
+ # disassociated the subdb from the old name.
+ set odb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $oldsdb]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ set odbt [$odb get $key]
+ error_check_good odb_close [$odb close] 0
+
+ set ndb [eval {berkdb_open -create -mode 0644}\
+ $omethod $args $testfile $newsdb]
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+ set ndbt [$ndb get $key]
+ error_check_good ndb_close [$ndb close] 0
+
+ # The DBT from the "old" database should be empty, not the "new" one.
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex [lindex $ndbt 0] 1] $data
+
+ # Now there's both an old and a new. Rename the "new" to the "old"
+ # and make sure that fails.
+ puts "\tSubdb009.b: Make sure rename fails instead of overwriting"
+ set ret [catch {eval {berkdb dbrename} $envargs $testfile \
+ $oldsdb $newsdb} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret [is_substr $errorCode EEXIST] 1
+
+ puts "\tSubdb009 succeeded."
+}
diff --git a/storage/bdb/test/sdb010.tcl b/storage/bdb/test/sdb010.tcl
new file mode 100644
index 00000000000..51f25976c56
--- /dev/null
+++ b/storage/bdb/test/sdb010.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb010.tcl,v 11.14 2002/07/11 18:53:47 sandstro Exp $
+#
+# TEST subdb010
+# TEST Test DB->remove() method and DB->truncate() for subdbs
+proc subdb010 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Subdb010: Test of DB->remove() and DB->truncate"
+
+ if { [is_queue $method] == 1 } {
+ puts "\tSubdb010: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set envargs ""
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb010.db
+ set tfpath $testfile
+ set env NULL
+ } else {
+ set testfile subdb010.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+ cleanup $testdir $env
+
+ set txn ""
+ set testdb DATABASE
+ set testdb2 DATABASE2
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.a: Test of DB->remove()"
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ $testfile $testdb] 0
+
+ # File should still exist.
+ error_check_good file_exists_after [file exists $tfpath] 1
+
+ # But database should not.
+ set ret [catch {eval berkdb_open $omethod $args $testfile $testdb} res]
+ error_check_bad open_failed ret 0
+ error_check_good open_failed_ret [is_substr $errorCode ENOENT] 1
+
+ puts "\tSubdb010.b: Setup for DB->truncate()"
+ # The nature of the key and data are unimportant; use numeric key
+ # so record-based methods don't need special treatment.
+ set key1 1
+ set key2 2
+ set data1 [pad_data $method data1]
+ set data2 [pad_data $method data2]
+
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db put} $txn {$key1 $data1}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb2]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good dbput [eval {$db2 put} $txn {$key2 $data2}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+
+ puts "\tSubdb010.c: truncate"
+ #
+ # Return value should be 1, the count of how many items were
+ # destroyed when we truncated.
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $args $testfile $testdb]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ error_check_good trunc_subdb [eval {$db truncate} $txn] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tSubdb010.d: check"
+ set db [eval {berkdb_open} $args {$testfile $testdb}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set kd [$dbc get -first]
+ error_check_good trunc_dbcget [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set db2 [eval {berkdb_open} $args {$testfile $testdb2}]
+ error_check_good db_open [is_valid_db $db2] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db2 cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db2] TRUE
+ set kd [$dbc get -first]
+ error_check_bad notrunc_dbcget1 [llength $kd] 0
+ set db2kd [list [list $key2 $data2]]
+ error_check_good key2 $kd $db2kd
+ set kd [$dbc get -next]
+ error_check_good notrunc_dbget2 [llength $kd] 0
+ error_check_good dbcclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ error_check_good db_close [$db2 close] 0
+ puts "\tSubdb010 succeeded."
+}
diff --git a/storage/bdb/test/sdb011.tcl b/storage/bdb/test/sdb011.tcl
new file mode 100644
index 00000000000..862e32f73ed
--- /dev/null
+++ b/storage/bdb/test/sdb011.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb011.tcl,v 11.9 2002/07/11 18:53:47 sandstro Exp $
+#
+# TEST subdb011
+# TEST Test deleting Subdbs with overflow pages
+# TEST Create 1 db with many large subdbs.
+# TEST Test subdatabases with overflow pages.
+proc subdb011 { method {ndups 13} {nsubdbs 10} args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 || [is_fixed_length $method] == 1 } {
+ puts "Subdb011: skipping for method $method"
+ return
+ }
+ set txnenv 0
+ set envargs ""
+ set max_files 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/subdb011.db
+ set env NULL
+ set tfpath $testfile
+ } else {
+ set testfile subdb011.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set envargs " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ append envargs " -auto_commit "
+ set max_files 50
+ if { $ndups == 13 } {
+ set ndups 7
+ }
+ }
+ set testdir [get_home $env]
+ set tfpath $testdir/$testfile
+ }
+
+ # Create the database and open the dictionary
+
+ cleanup $testdir $env
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $max_files != 0 && [llength $file_list] > $max_files } {
+ set fend [expr $max_files - 1]
+ set file_list [lrange $file_list 0 $fend]
+ }
+ set flen [llength $file_list]
+ puts "Subdb011: $method ($args) $ndups overflow dups with \
+ $flen filename=key filecontents=data pairs"
+
+ puts "\tSubdb011.a: Create each of $nsubdbs subdbs and dups"
+ set slist {}
+ set i 0
+ set count 0
+ foreach f $file_list {
+ set i [expr $i % $nsubdbs]
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ set subdb subdb$i
+ lappend slist $subdb
+ close $fid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ for {set dup 0} {$dup < $ndups} {incr dup} {
+ set data $dup:$filecont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good dbclose [$db close] 0
+ incr i
+ incr count
+ }
+
+ puts "\tSubdb011.b: Verify overflow pages"
+ foreach subdb $slist {
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+
+ # What everyone else calls overflow pages, hash calls "big
+ # pages", so we need to special-case hash here. (Hash
+ # overflow pages are additional pages after the first in a
+ # bucket.)
+ if { [string compare [$db get_type] hash] == 0 } {
+ error_check_bad overflow \
+ [is_substr $stat "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good dbclose [$db close] 0
+ }
+
+ puts "\tSubdb011.c: Delete subdatabases"
+ for {set i $nsubdbs} {$i > 0} {set i [expr $i - 1]} {
+ #
+ # Randomly delete a subdatabase
+ set sindex [berkdb random_int 0 [expr $i - 1]]
+ set subdb [lindex $slist $sindex]
+ #
+ # Delete the one we did from the list
+ set slist [lreplace $slist $sindex $sindex]
+ error_check_good file_exists_before [file exists $tfpath] 1
+ error_check_good db_remove [eval {berkdb dbremove} $envargs \
+ {$testfile $subdb}] 0
+ }
+}
+
diff --git a/storage/bdb/test/sdb012.tcl b/storage/bdb/test/sdb012.tcl
new file mode 100644
index 00000000000..9c05d977daf
--- /dev/null
+++ b/storage/bdb/test/sdb012.tcl
@@ -0,0 +1,428 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdb012.tcl,v 1.3 2002/08/08 15:38:10 bostic Exp $
+#
+# TEST subdb012
+# TEST Test subdbs with locking and transactions
+# TEST Tests creating and removing subdbs while handles
+# TEST are open works correctly, and in the face of txns.
+#
+proc subdb012 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_queue $method] == 1 } {
+ puts "Subdb012: skipping for method $method"
+ return
+ }
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Subdb012 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set largs [split_encargs $args encargs]
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ #
+ # sdb012_body takes a txn list containing 4 elements.
+ # {txn command for first subdb
+ # txn command for second subdb
+ # txn command for first subdb removal
+ # txn command for second subdb removal}
+ #
+ # The allowed commands are 'none' 'one', 'auto', 'abort', 'commit'.
+ # 'none' is a special case meaning run without a txn. In the
+ # case where all 4 items are 'none', we run in a lock-only env.
+ # 'one' is a special case meaning we create the subdbs together
+ # in one single transaction. It is indicated as the value for t1,
+ # and the value in t2 indicates if that single txn should be
+ # aborted or committed. It is not used and has no meaning
+ # in the removal case. 'auto' means use the -auto_commit flag
+ # to the operation, and 'abort' and 'commit' do the obvious.
+ #
+ # First test locking w/o txns. If any in tlist are 'none',
+ # all must be none.
+ #
+ # Now run through the txn-based operations
+ set count 0
+ set sdb "Subdb012."
+ set teststr "abcdefghijklmnopqrstuvwxyz"
+ set testlet [split $teststr {}]
+ foreach t1 { none one abort auto commit } {
+ foreach t2 { none abort auto commit } {
+ if { $t1 == "one" } {
+ if { $t2 == "none" || $t2 == "auto"} {
+ continue
+ }
+ }
+ set tlet [lindex $testlet $count]
+ foreach r1 { none abort auto commit } {
+ foreach r2 { none abort auto commit } {
+ set tlist [list $t1 $t2 $r1 $r2]
+ sdb012_body $testdir $omethod $largs \
+ $encargs $sdb$tlet $tlist
+ }
+ }
+ incr count
+ }
+ }
+
+}
+
+proc s012 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ set encargs ""
+ set largs ""
+
+ puts "Subdb012: $method ($largs $encargs) subdb txn/locking tests"
+
+ set sdb "Subdb012."
+ set tlet X
+ set tlist $args
+ error_check_good tlist [llength $tlist] 4
+ sdb012_body $testdir $omethod $largs $encargs $sdb$tlet $tlist
+}
+
+#
+# This proc checks the tlist values and returns the flags
+# that should be used when opening the env. If we are running
+# with no txns, then just -lock, otherwise -txn.
+#
+proc sdb012_subsys { tlist } {
+ set t1 [lindex $tlist 0]
+ #
+ # If we have no txns, all elements of the list should be none.
+ # In that case we only run with locking turned on.
+ # Otherwise, we use the full txn subsystems.
+ #
+ set allnone {none none none none}
+ if { $allnone == $tlist } {
+ set subsys "-lock"
+ } else {
+ set subsys "-txn"
+ }
+ return $subsys
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in operations. I.e. it will begin the txns as
+# needed, or return a -auto_commit flag, etc.
+#
+proc sdb012_tflags { env tlist } {
+ set ret ""
+ set t1 ""
+ foreach t $tlist {
+ switch $t {
+ one {
+ set t1 [$env txn]
+ error_check_good txnbegin [is_valid_txn $t1 $env] TRUE
+ lappend ret "-txn $t1"
+ lappend ret "-txn $t1"
+ }
+ auto {
+ lappend ret "-auto_commit"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one", skip over
+ # this commit/abort. Otherwise start a new txn
+ # for the removal case.
+ #
+ if { $t1 == "" } {
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn \
+ $env] TRUE
+ lappend ret "-txn $txn"
+ } else {
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret ""
+ }
+ default {
+ error "Txn command $t not implemented"
+ }
+ }
+ }
+ return $ret
+}
+
+#
+# This proc parses the tlist and returns a list of 4 items that
+# should be used in the txn conclusion operations. I.e. it will
+# give "" if using auto_commit (i.e. no final txn op), or a single
+# abort/commit if both subdb's are in one txn.
+#
+proc sdb012_top { tflags tlist } {
+ set ret ""
+ set t1 ""
+ #
+ # We know both lists have 4 items. Iterate over them
+ # using multiple value lists so we know which txn goes
+ # with each op.
+ #
+ # The tflags list is needed to extract the txn command
+ # out for the operation. The tlist list is needed to
+ # determine what operation we are doing.
+ #
+ foreach t $tlist tf $tflags {
+ switch $t {
+ one {
+ set t1 [lindex $tf 1]
+ }
+ auto {
+ lappend ret "sdb012_nop"
+ }
+ abort -
+ commit {
+ #
+ # If the previous command was a "one" (i.e. t1
+ # is set), append a correct command and then
+ # an empty one.
+ #
+ if { $t1 == "" } {
+ set txn [lindex $tf 1]
+ set top "$txn $t"
+ lappend ret $top
+ } else {
+ set top "$t1 $t"
+ lappend ret "sdb012_nop"
+ lappend ret $top
+ set t1 ""
+ }
+ }
+ none {
+ lappend ret "sdb012_nop"
+ }
+ }
+ }
+ return $ret
+}
+
+proc sdb012_nop { } {
+ return 0
+}
+
+proc sdb012_isabort { tlist item } {
+ set i [lindex $tlist $item]
+ if { $i == "one" } {
+ set i [lindex $tlist [expr $item + 1]]
+ }
+ if { $i == "abort" } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc sdb012_body { testdir omethod largs encargs msg tlist } {
+
+ puts "\t$msg: $tlist"
+ set testfile subdb012.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ set subsys [sdb012_subsys $tlist]
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $subsys $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good test_lock [$env test abort subdb_lock] 0
+
+ #
+ # Convert from our tlist txn commands into real flags we
+ # will pass to commands. Use the multiple values feature
+ # of foreach to do this efficiently.
+ #
+ set tflags [sdb012_tflags $env $tlist]
+ foreach {txn1 txn2 rem1 rem2} $tflags {break}
+ foreach {top1 top2 rop1 rop2} [sdb012_top $tflags $tlist] {break}
+
+# puts "txn1 $txn1, txn2 $txn2, rem1 $rem1, rem2 $rem2"
+# puts "top1 $top1, top2 $top2, rop1 $rop1, rop2 $rop2"
+ puts "\t$msg.0: Create sub databases in env with $subsys"
+ set s1 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn1 {$omethod $testfile $subdb1}]
+ error_check_good dbopen [is_valid_db $s1] TRUE
+
+ set ret [eval $top1]
+ error_check_good t1_end $ret 0
+
+ set s2 [eval {berkdb_open -env $env -create -mode 0644} \
+ $largs $txn2 {$omethod $testfile $subdb2}]
+ error_check_good dbopen [is_valid_db $s2] TRUE
+
+ puts "\t$msg.1: Subdbs are open; resolve txns if necessary"
+ set ret [eval $top2]
+ error_check_good t2_end $ret 0
+
+ set t1_isabort [sdb012_isabort $tlist 0]
+ set t2_isabort [sdb012_isabort $tlist 1]
+ set r1_isabort [sdb012_isabort $tlist 2]
+ set r2_isabort [sdb012_isabort $tlist 3]
+
+# puts "t1_isabort $t1_isabort, t2_isabort $t2_isabort, r1_isabort $r1_isabort, r2_isabort $r2_isabort"
+
+ puts "\t$msg.2: Subdbs are open; verify removal failures"
+ # Verify removes of subdbs with open subdb's fail
+ #
+ # We should fail no matter what. If we aborted, then the
+ # subdb should not exist. If we didn't abort, we should fail
+ # with DB_LOCK_NOTGRANTED.
+ #
+ # XXX - Do we need -auto_commit for all these failing ones?
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ error_check_bad dbremove2_open $r 0
+ if { $t2_isabort } {
+ error_check_good dbremove2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # Verify file remove fails
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ error_check_bad dbremovef_open $r 0
+
+ #
+ # If both aborted, there should be no file??
+ #
+ if { $t1_isabort && $t2_isabort } {
+ error_check_good dbremovef_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremovef_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ puts "\t$msg.3: Close subdb2; verify removals"
+ error_check_good close_s2 [$s2 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem2 $testfile $subdb2} result ]
+ if { $t2_isabort } {
+ error_check_bad dbrem2_ab $r 0
+ error_check_good dbrem2_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbrem2 $result 0
+ }
+ # Resolve subdb2 removal txn
+ set r [eval $rop2]
+ error_check_good rop2 $r 0
+
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ error_check_bad dbremove1.2_open $r 0
+ if { $t1_isabort } {
+ error_check_good dbremove1.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+
+ # There are three cases here:
+ # 1. if both t1 and t2 aborted, the file shouldn't exist
+ # 2. if only t1 aborted, the file still exists and nothing is open
+ # 3. if neither aborted a remove should fail because the first
+ # subdb is still open
+ # In case 2, don't try the remove, because it should succeed
+ # and we won't be able to test anything else.
+ if { !$t1_isabort || $t2_isabort } {
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_bad dbremovef.2_open $r 0
+ error_check_good dbremove.2_open [is_substr \
+ $result DB_LOCK_NOTGRANTED] 1
+ }
+ }
+
+ puts "\t$msg.4: Close subdb1; verify removals"
+ error_check_good close_s1 [$s1 close] 0
+ set r [ catch {eval {berkdb dbremove -env} \
+ $env $rem1 $testfile $subdb1} result ]
+ if { $t1_isabort } {
+ error_check_bad dbremove1_ab $r 0
+ error_check_good dbremove1_ab [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove1 $result 0
+ }
+ # Resolve subdb1 removal txn
+ set r [eval $rop1]
+ error_check_good rop1 $r 0
+
+
+ # Verify removal of subdb2. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb2 above was successful and subdb2
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb2 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb2} result ]
+ if { $r2_isabort && !$t2_isabort } {
+ error_check_good dbremove2.1_ab $result 0
+ } else {
+ error_check_bad dbremove2.1 $r 0
+ error_check_good dbremove2.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ # Verify removal of subdb1. All DB handles are closed now.
+ # So we have two scenarios:
+ # 1. The removal of subdb1 above was successful and subdb1
+ # doesn't exist and we should fail that way.
+ # 2. The removal of subdb1 above was aborted, and this
+ # removal should succeed.
+ #
+ set r [ catch {berkdb dbremove -env $env $testfile $subdb1} result ]
+ if { $r1_isabort && !$t1_isabort } {
+ error_check_good dbremove1.1 $result 0
+ } else {
+ error_check_bad dbremove_open $r 0
+ error_check_good dbremove.1 [is_substr \
+ $result "no such file"] 1
+ }
+
+ puts "\t$msg.5: All closed; remove file"
+ set r [catch {berkdb dbremove -env $env $testfile} result]
+ if { $t1_isabort && $t2_isabort } {
+ error_check_bad dbremove_final_ab $r 0
+ error_check_good dbremove_file_abstr [is_substr \
+ $result "no such file"] 1
+ } else {
+ error_check_good dbremove_final $r 0
+ }
+ error_check_good envclose [$env close] 0
+}
diff --git a/storage/bdb/test/sdbscript.tcl b/storage/bdb/test/sdbscript.tcl
new file mode 100644
index 00000000000..d1978ccb048
--- /dev/null
+++ b/storage/bdb/test/sdbscript.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbscript.tcl,v 11.9 2002/01/11 15:53:36 bostic Exp $
+#
+# Usage: subdbscript testfile subdbnumber factor
+# testfile: name of DB itself
+# subdbnumber: n, subdb indicator, of form sub$n.db
+# factor: Delete over factor'th + n'th from my subdb.
+#
+# I.e. if factor is 10, and n is 0, remove entries, 0, 10, 20, ...
+# if factor is 10 and n is 1, remove entries 1, 11, 21, ...
+source ./include.tcl
+source $test_path/test.tcl
+
+set usage "subdbscript testfile subdbnumber factor"
+
+# Verify usage
+if { $argc != 3 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set testfile [lindex $argv 0]
+set n [ lindex $argv 1 ]
+set factor [ lindex $argv 2 ]
+
+set db [berkdb_open -unknown $testfile sub$n.db]
+error_check_good db_open [is_valid_db $db] TRUE
+
+set dbc [$db cursor]
+error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+set i 1
+for {set d [$dbc get -first]} {[llength $d] != 0} {set d [$dbc get -next]} {
+ set x [expr $i - $n]
+ if { $x >= 0 && [expr $x % $factor] == 0 } {
+ puts "Deleting $d"
+ error_check_good dbc_del [$dbc del] 0
+ }
+ incr i
+}
+error_check_good db_close [$db close] 0
+
+exit
diff --git a/storage/bdb/test/sdbtest001.tcl b/storage/bdb/test/sdbtest001.tcl
new file mode 100644
index 00000000000..b8b4508c2a4
--- /dev/null
+++ b/storage/bdb/test/sdbtest001.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbtest001.tcl,v 11.19 2002/05/22 15:42:42 sue Exp $
+#
+# TEST sdbtest001
+# TEST Tests multiple access methods in one subdb
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Dump file, verify per subdb
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Rotate methods and repeat [#762].
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest001 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest001: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest001.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ lappend method_list [list "-rrecno" "-rbtree" "-hash" "-recno" "-btree"]
+ lappend method_list [list "-recno" "-hash" "-btree" "-rbtree" "-rrecno"]
+ lappend method_list [list "-btree" "-recno" "-rbtree" "-rrecno" "-hash"]
+ lappend method_list [list "-hash" "-recno" "-rbtree" "-rrecno" "-btree"]
+ lappend method_list [list "-rbtree" "-hash" "-btree" "-rrecno" "-recno"]
+ lappend method_list [list "-rrecno" "-recno"]
+ lappend method_list [list "-recno" "-rrecno"]
+ lappend method_list [list "-hash" "-dhash"]
+ lappend method_list [list "-dhash" "-hash"]
+ lappend method_list [list "-rbtree" "-btree" "-dbtree" "-ddbtree"]
+ lappend method_list [list "-btree" "-rbtree" "-ddbtree" "-dbtree"]
+ lappend method_list [list "-dbtree" "-ddbtree" "-btree" "-rbtree"]
+ lappend method_list [list "-ddbtree" "-dbtree" "-rbtree" "-btree"]
+ set plist [list 512 8192 1024 4096 2048 16384]
+ set mlen [llength $method_list]
+ set plen [llength $plist]
+ while { $plen < $mlen } {
+ set plist [concat $plist $plist]
+ set plen [llength $plist]
+ }
+ set pgsz 0
+ foreach methods $method_list {
+ cleanup $testdir NULL
+ puts "\tSubdbtest001.a: create subdbs of different access methods:"
+ puts "\tSubdbtest001.a: $methods"
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set psize [lindex $plist $pgsz]
+ incr pgsz
+ set newent [expr $nentries / $nsubdbs]
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest001_recno.check
+ } else {
+ set checkfunc subdbtest001.check
+ }
+
+ puts "\tSubdbtest001.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the
+ # dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ # filehead uses 1-based line numbers
+ set beg [expr $subdb * $newent]
+ incr beg
+ set end [expr $beg + $newent - 1]
+ filehead $end $dict $t3 $beg
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest001.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest001.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest001:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ }
+}
+
+# Check function for Subdbtest001; keys and data are identical
+proc subdbtest001.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest001_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdbtest002.tcl b/storage/bdb/test/sdbtest002.tcl
new file mode 100644
index 00000000000..95717413a7b
--- /dev/null
+++ b/storage/bdb/test/sdbtest002.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbtest002.tcl,v 11.26 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST sdbtest002
+# TEST Tests multiple access methods in one subdb access by multiple
+# TEST processes.
+# TEST Open several subdbs, each with a different access method
+# TEST Small keys, small data
+# TEST Put/get per key per subdb
+# TEST Fork off several child procs to each delete selected
+# TEST data from their subdb and then exit
+# TEST Dump file, verify contents of each subdb is correct
+# TEST Close, reopen per subdb
+# TEST Dump file, verify per subdb
+# TEST
+# TEST Make several subdb's of different access methods all in one DB.
+# TEST Fork of some child procs to each manipulate one subdb and when
+# TEST they are finished, verify the contents of the databases.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc sdbtest002 { {nentries 10000} } {
+ source ./include.tcl
+
+ puts "Subdbtest002: many different subdb access methods in one"
+
+ # Create the database and open the dictionary
+ set testfile $testdir/subdbtest002.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ set txn ""
+ set count 0
+
+ # Set up various methods to rotate through
+ set methods \
+ [list "-rbtree" "-recno" "-btree" "-btree" "-recno" "-rbtree"]
+ cleanup $testdir NULL
+ puts "\tSubdbtest002.a: create subdbs of different access methods:"
+ puts "\t\t$methods"
+ set psize 4096
+ set nsubdbs [llength $methods]
+ set duplist ""
+ for { set i 0 } { $i < $nsubdbs } { incr i } {
+ lappend duplist -1
+ }
+ set newent [expr $nentries / $nsubdbs]
+
+ #
+ # XXX We need dict sorted to figure out what was deleted
+ # since things are stored sorted in the btree.
+ #
+ filesort $dict $t4
+ set dictorig $dict
+ set dict $t4
+
+ build_all_subdb $testfile $methods $psize $duplist $newent
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ set pidlist ""
+ puts "\tSubdbtest002.b: create $nsubdbs procs to delete some keys"
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ puts "$tclsh_path\
+ $test_path/sdbscript.tcl $testfile \
+ $subdb $nsubdbs >& $testdir/subdb002.log.$subdb"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ sdbscript.tcl \
+ $testdir/subdb002.log.$subdb $testfile $subdb $nsubdbs &]
+ lappend pidlist $p
+ }
+ watch_procs $pidlist 5
+
+ for { set subdb 0 } { $subdb < $nsubdbs } { incr subdb } {
+ set method [lindex $methods $subdb]
+ set method [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ set checkfunc subdbtest002_recno.check
+ } else {
+ set checkfunc subdbtest002.check
+ }
+
+ puts "\tSubdbtest002.b: dump file sub$subdb.db"
+ set db [berkdb_open -unknown $testfile sub$subdb.db]
+ error_check_good db_open [is_valid_db $db] TRUE
+ dump_file $db $txn $t1 $checkfunc
+ error_check_good db_close [$db close] 0
+ #
+ # This is just so that t2 is there and empty
+ # since we are only appending below.
+ #
+ exec > $t2
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ puts $oid [expr $subdb * $newent + $i]
+ }
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t4 r]
+ for {set i 1} {[gets $oid line] >= 0} {incr i} {
+ set farr($i) $line
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $newent} {incr i} {
+ # Sed uses 1-based line numbers
+ set x [expr $i - $subdb]
+ if { [expr $x % $nsubdbs] != 0 } {
+ set beg [expr $subdb * $newent]
+ set beg [expr $beg + $i]
+ puts $oid $farr($beg)
+ }
+ }
+ close $oid
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tSubdbtest002.c: sub$subdb.db: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-first" "-next" sub$subdb.db
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tSubdbtest002.d: sub$subdb.db: close, open, and dump file in reverse direction"
+ open_and_dump_subfile $testfile NULL $t1 $checkfunc \
+ dump_file_direction "-last" "-prev" sub$subdb.db
+
+ if { [string compare $method "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Subdbtest002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+ set dict $dictorig
+ return
+}
+
+# Check function for Subdbtest002; keys and data are identical
+proc subdbtest002.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc subdbtest002_recno.check { key data } {
+global dict
+global kvals
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/sdbutils.tcl b/storage/bdb/test/sdbutils.tcl
new file mode 100644
index 00000000000..3221a422e18
--- /dev/null
+++ b/storage/bdb/test/sdbutils.tcl
@@ -0,0 +1,197 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sdbutils.tcl,v 11.14 2002/06/10 15:39:39 sue Exp $
+#
+proc build_all_subdb { dbname methods psize dups {nentries 100} {dbargs ""}} {
+ set nsubdbs [llength $dups]
+ set mlen [llength $methods]
+ set savearg $dbargs
+ for {set i 0} {$i < $nsubdbs} { incr i } {
+ set m [lindex $methods [expr $i % $mlen]]
+ set dbargs $savearg
+ subdb_build $dbname $nentries [lindex $dups $i] \
+ $i $m $psize sub$i.db $dbargs
+ }
+}
+
+proc subdb_build { name nkeys ndups dup_interval method psize subdb dbargs} {
+ source ./include.tcl
+
+ set dbargs [convert_args $method $dbargs]
+ set omethod [convert_method $method]
+
+ puts "Method: $method"
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ }
+ # Create the database and open the dictionary
+ set oflags "-create -mode 0644 $omethod \
+ -pagesize $psize $dbargs $name $subdb"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ if { $ndups >= 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys keys with $ndups duplicates at interval of $dup_interval"
+ }
+ if { $ndups < 0 } {
+ puts "\tBuilding $method $name $subdb. \
+ $nkeys unique keys of pagesize $psize"
+ #
+ # If ndups is < 0, we want unique keys in each subdb,
+ # so skip ahead in the dict by nkeys * iteration
+ #
+ for { set count 0 } \
+ { $count < [expr $nkeys * $dup_interval] } {
+ incr count} {
+ set ret [gets $did str]
+ if { $ret == -1 } {
+ break
+ }
+ }
+ }
+ set txn ""
+ for { set count 0 } { [gets $did str] != -1 && $count < $nkeys } {
+ incr count} {
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set data [format "%04d" [expr $i * $dup_interval]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $ndups == 0 } {
+ set ret [eval {$db put} $txn {$str \
+ [chop_data $method NODUP]}]
+ error_check_good put $ret 0
+ } elseif { $ndups < 0 } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set num [expr $nkeys * $dup_interval]
+ set num [expr $num + $count + 1]
+ set ret [eval {$db put} $txn {$num \
+ [chop_data $method $str]}]
+ set kvals($num) [pad_data $method $str]
+ error_check_good put $ret 0
+ } else {
+ set ret [eval {$db put} $txn \
+ {$str [chop_data $method $str]}]
+ error_check_good put $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+ error_check_good close:$name [$db close] 0
+}
+
+proc do_join_subdb { db primary subdbs key oargs } {
+ source ./include.tcl
+
+ puts "\tJoining: $subdbs on $key"
+
+ # Open all the databases
+ set p [eval {berkdb_open -unknown} $oargs $db $primary]
+ error_check_good "primary open" [is_valid_db $p] TRUE
+
+ set dblist ""
+ set curslist ""
+
+ foreach i $subdbs {
+ set jdb [eval {berkdb_open -unknown} $oargs $db sub$i.db]
+ error_check_good "sub$i.db open" [is_valid_db $jdb] TRUE
+
+ lappend jlist [list $jdb $key]
+ lappend dblist $jdb
+
+ }
+
+ set join_res [eval {$p get_join} $jlist]
+ set ndups [llength $join_res]
+
+ # Calculate how many dups we expect.
+ # We go through the list of indices. If we find a 0, then we
+ # expect 0 dups. For everything else, we look at pairs of numbers,
+ # if the are relatively prime, multiply them and figure out how
+ # many times that goes into 50. If they aren't relatively prime,
+ # take the number of times the larger goes into 50.
+ set expected 50
+ set last 1
+ foreach n $subdbs {
+ if { $n == 0 } {
+ set expected 0
+ break
+ }
+ if { $last == $n } {
+ continue
+ }
+
+ if { [expr $last % $n] == 0 || [expr $n % $last] == 0 } {
+ if { $n > $last } {
+ set last $n
+ set expected [expr 50 / $last]
+ }
+ } else {
+ set last [expr $n * $last / [gcd $n $last]]
+ set expected [expr 50 / $last]
+ }
+ }
+
+ error_check_good number_of_dups:$subdbs $ndups $expected
+
+ #
+ # If we get here, we have the number expected, now loop
+ # through each and see if it is what we expected.
+ #
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set pair [lindex $join_res $i]
+ set k [lindex $pair 0]
+ foreach j $subdbs {
+ error_check_bad valid_dup:$j:$subdbs $j 0
+ set kval [string trimleft $k 0]
+ if { [string length $kval] == 0 } {
+ set kval 0
+ }
+ error_check_good \
+ valid_dup:$j:$subdbs [expr $kval % $j] 0
+ }
+ }
+
+ error_check_good close_primary [$p close] 0
+ foreach i $dblist {
+ error_check_good close_index:$i [$i close] 0
+ }
+}
+
+proc n_to_subname { n } {
+ if { $n == 0 } {
+ return null.db;
+ } else {
+ return sub$n.db;
+ }
+}
diff --git a/storage/bdb/test/sec001.tcl b/storage/bdb/test/sec001.tcl
new file mode 100644
index 00000000000..eb4bcc24dd2
--- /dev/null
+++ b/storage/bdb/test/sec001.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec001.tcl,v 11.7 2002/05/31 16:19:30 sue Exp $
+#
+# TEST sec001
+# TEST Test of security interface
+proc sec001 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 env1.db
+ set testfile2 $testdir/env2.db
+ set subdb1 sub1
+ set subdb2 sub2
+
+ puts "Sec001: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd1_bad "passwd1_bad"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+
+ #
+ # This first group tests bad create scenarios and also
+ # tests attempting to use encryption after creating a
+ # non-encrypted env/db to begin with.
+ #
+ set nopass ""
+ puts "\tSec001.a.1: Create db with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.a.2: Open db without encryption."
+ set stat [catch {berkdb_open_noerr $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.b.1: Create db without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.b.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.c.1: Create db with checksum."
+ set db [berkdb_open -create -chksum -btree $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.c.2: Open db with encryption."
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile2} ret]
+ error_check_good db:nocrypto $stat 1
+ error_check_good db:fail [is_substr $ret "supplied encryption key"] 1
+
+ set ret [berkdb dbremove $testfile2]
+
+ puts "\tSec001.d.1: Create subdb with encryption."
+ set db [berkdb_open -create -encryptaes $passwd1 -btree \
+ $testfile2 $subdb1]
+ error_check_good subdb [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.d.2: Create 2nd subdb without encryption."
+ set stat [catch {berkdb_open_noerr -create -btree \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "no encryption key"] 1
+
+ set ret [berkdb dbremove -encryptaes $passwd1 $testfile2]
+
+ puts "\tSec001.e.1: Create subdb without encryption or checksum."
+ set db [berkdb_open -create -btree $testfile2 $subdb1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec001.e.2: Create 2nd subdb with encryption."
+ set stat [catch {berkdb_open_noerr -create -btree -encryptaes $passwd1 \
+ $testfile2 $subdb2} ret]
+ error_check_good subdb:nocrypto $stat 1
+ error_check_good subdb:fail [is_substr $ret "supplied encryption key"] 1
+
+ env_cleanup $testdir
+
+ puts "\tSec001.f.1: Open env with encryption, empty passwd."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptaes $nopass} ret]
+ error_check_good env:nopass $stat 1
+ error_check_good env:fail [is_substr $ret "Empty password"] 1
+
+ puts "\tSec001.f.2: Create without encryption algorithm (DB_ENCRYPT_ANY)."
+ set stat [catch {berkdb_env_noerr -create -home $testdir \
+ -encryptany $passwd1} ret]
+ error_check_good env:any $stat 1
+ error_check_good env:fail [is_substr $ret "algorithm not supplied"] 1
+
+ puts "\tSec001.f.3: Create without encryption."
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.f.4: Open again with encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1} ret]
+ error_check_good env:unencrypted $stat 1
+ error_check_good env:fail [is_substr $ret \
+ "Joining non-encrypted environment"] 1
+
+ error_check_good envclose [$env close] 0
+
+ env_cleanup $testdir
+
+ #
+ # This second group tests creating and opening a secure env.
+ # We test that others can join successfully, and that other's with
+ # bad/no passwords cannot. Also test that we cannot use the
+ # db->set_encrypt method when we've already got a secure dbenv.
+ #
+ puts "\tSec001.g.1: Open with encryption."
+ set env [berkdb_env_noerr -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.g.2: Open again with encryption - same passwd."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.3: Open again with any encryption (DB_ENCRYPT_ANY)."
+ set env1 [berkdb_env -home $testdir -encryptany $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+ error_check_good envclose [$env1 close] 0
+
+ puts "\tSec001.g.4: Open with encryption - different length passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.5: Open with encryption - different passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir \
+ -encryptaes $passwd2} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.g.6: Open env without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.g.7: Open database with encryption in env"
+ set stat [catch {berkdb_open_noerr -env $env -btree -create \
+ -encryptaes $passwd2 $testfile1} ret]
+ error_check_good db:$passwd2 $stat 1
+ error_check_good env:fail [is_substr $ret "method not permitted"] 1
+
+ puts "\tSec001.g.8: Close creating env"
+ error_check_good envclose [$env close] 0
+
+ #
+ # This third group tests opening the env after the original env
+ # handle is closed. Just to make sure we can reopen it in
+ # the right fashion even if no handles are currently open.
+ #
+ puts "\tSec001.h.1: Reopen without encryption."
+ set stat [catch {berkdb_env_noerr -home $testdir} ret]
+ error_check_good env:noencrypt $stat 1
+ error_check_good env:fail [is_substr $ret "Encrypted environment"] 1
+
+ puts "\tSec001.h.2: Reopen with bad passwd."
+ set stat [catch {berkdb_env_noerr -home $testdir -encryptaes \
+ $passwd1_bad} ret]
+ error_check_good env:$passwd1_bad $stat 1
+ error_check_good env:fail [is_substr $ret "Invalid password"] 1
+
+ puts "\tSec001.h.3: Reopen with encryption."
+ set env [berkdb_env -create -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env] TRUE
+
+ puts "\tSec001.h.4: 2nd Reopen with encryption."
+ set env1 [berkdb_env -home $testdir -encryptaes $passwd1]
+ error_check_good env [is_valid_env $env1] TRUE
+
+ error_check_good envclose [$env1 close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tSec001 complete."
+}
diff --git a/storage/bdb/test/sec002.tcl b/storage/bdb/test/sec002.tcl
new file mode 100644
index 00000000000..d790162f1d7
--- /dev/null
+++ b/storage/bdb/test/sec002.tcl
@@ -0,0 +1,143 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2001
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sec002.tcl,v 11.3 2002/04/24 19:04:59 bostic Exp $
+#
+# TEST sec002
+# TEST Test of security interface and catching errors in the
+# TEST face of attackers overwriting parts of existing files.
+proc sec002 { } {
+ global errorInfo
+ global errorCode
+
+ source ./include.tcl
+
+ set testfile1 $testdir/sec002-1.db
+ set testfile2 $testdir/sec002-2.db
+ set testfile3 $testdir/sec002-3.db
+ set testfile4 $testdir/sec002-4.db
+
+ puts "Sec002: Test of basic encryption interface."
+ env_cleanup $testdir
+
+ set passwd1 "passwd1"
+ set passwd2 "passwd2"
+ set key "key"
+ set data "data"
+ set pagesize 1024
+
+ #
+ # Set up 4 databases, two encrypted, but with different passwords
+ # and one unencrypt, but with checksumming turned on and one
+ # unencrypted and no checksumming. Place the exact same data
+ # in each one.
+ #
+ puts "\tSec002.a: Setup databases"
+ set db_cmd "-create -pagesize $pagesize -btree "
+ set db [eval {berkdb_open} -encryptaes $passwd1 $db_cmd $testfile1]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -encryptaes $passwd2 $db_cmd $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} -chksum $db_cmd $testfile3]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $db_cmd $testfile4]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ #
+ # First just touch some bits in the file. We know that in btree
+ # meta pages, bytes 92-459 are unused. Scribble on them in both
+ # an encrypted, and both unencrypted files. We should get
+ # a checksum error for the encrypted, and checksummed files.
+ # We should get no error for the normal file.
+ #
+ set fidlist {}
+ set fid [open $testfile1 r+]
+ lappend fidlist $fid
+ set fid [open $testfile3 r+]
+ lappend fidlist $fid
+ set fid [open $testfile4 r+]
+ lappend fidlist $fid
+
+ puts "\tSec002.b: Overwrite unused space in meta-page"
+ foreach f $fidlist {
+ fconfigure $f -translation binary
+ seek $f 100 start
+ set byte [read $f 1]
+ binary scan $byte c val
+ set newval [expr ~$val]
+ set newbyte [binary format c $newval]
+ seek $f 100 start
+ puts -nonewline $f $newbyte
+ close $f
+ }
+ puts "\tSec002.c: Reopen modified databases"
+ set stat [catch {berkdb_open_noerr -encryptaes $passwd1 $testfile1} ret]
+ error_check_good db:$testfile1 $stat 1
+ error_check_good db:$testfile1:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr -chksum $testfile3} ret]
+ error_check_good db:$testfile3 $stat 1
+ error_check_good db:$testfile3:fail \
+ [is_substr $ret "metadata page checksum error"] 1
+
+ set stat [catch {berkdb_open_noerr $testfile4} db]
+ error_check_good db:$testfile4 $stat 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\tSec002.d: Replace root page in encrypted w/ encrypted"
+ set fid1 [open $testfile1 r+]
+ set fid2 [open $testfile2 r+]
+ seek $fid1 $pagesize start
+ seek $fid2 $pagesize start
+ set root1 [read $fid1 $pagesize]
+ close $fid1
+ puts -nonewline $fid2 $root1
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ puts "\tSec002.e: Replace root page in encrypted w/ unencrypted"
+ set fid2 [open $testfile2 r+]
+ set fid4 [open $testfile4 r+]
+ seek $fid2 $pagesize start
+ seek $fid4 $pagesize start
+ set root4 [read $fid4 $pagesize]
+ close $fid4
+ puts -nonewline $fid2 $root4
+ close $fid2
+
+ set db [berkdb_open_noerr -encryptaes $passwd2 $testfile2]
+ error_check_good db [is_valid_db $db] TRUE
+ set stat [catch {$db get $key} ret]
+ error_check_good dbget $stat 1
+ error_check_good db:$testfile2:fail \
+ [is_substr $ret "checksum error: catastrophic recovery required"] 1
+ set stat [catch {$db close} ret]
+ error_check_good dbclose $stat 1
+ error_check_good db:$testfile2:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ cleanup $testdir NULL 1
+ puts "\tSec002 complete."
+}
diff --git a/storage/bdb/test/shelltest.tcl b/storage/bdb/test/shelltest.tcl
new file mode 100644
index 00000000000..6190bac1f8d
--- /dev/null
+++ b/storage/bdb/test/shelltest.tcl
@@ -0,0 +1,88 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: shelltest.tcl,v 1.20 2002/04/19 15:42:20 bostic Exp $
+#
+# TEST scr###
+# TEST The scr### directories are shell scripts that test a variety of
+# TEST things, including things about the distribution itself. These
+# TEST tests won't run on most systems, so don't even try to run them.
+#
+# shelltest.tcl:
+# Code to run shell script tests, to incorporate Java, C++,
+# example compilation, etc. test scripts into the Tcl framework.
+proc shelltest { { run_one 0 }} {
+ source ./include.tcl
+ global shelltest_list
+
+ set SH /bin/sh
+ if { [file executable $SH] != 1 } {
+ puts "Shell tests require valid shell /bin/sh: not found."
+ puts "Skipping shell tests."
+ return 0
+ }
+
+ if { $run_one == 0 } {
+ puts "Running shell script tests..."
+
+ foreach testpair $shelltest_list {
+ set dir [lindex $testpair 0]
+ set test [lindex $testpair 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+ } else {
+ set run_one [expr $run_one - 1];
+ set dir [lindex [lindex $shelltest_list $run_one] 0]
+ set test [lindex [lindex $shelltest_list $run_one] 1]
+
+ env_cleanup $testdir
+ shelltest_copy $test_path/$dir $testdir
+ shelltest_run $SH $dir $test $testdir
+ }
+}
+
+proc shelltest_copy { fromdir todir } {
+ set globall [glob $fromdir/*]
+
+ foreach f $globall {
+ file copy $f $todir/
+ }
+}
+
+proc shelltest_run { sh srcdir test testdir } {
+ puts "Running shell script $srcdir ($test)..."
+
+ set ret [catch {exec $sh -c "cd $testdir && sh $test" >&@ stdout} res]
+
+ if { $ret != 0 } {
+ puts "FAIL: shell test $srcdir/$test exited abnormally"
+ }
+}
+
+proc scr001 {} { shelltest 1 }
+proc scr002 {} { shelltest 2 }
+proc scr003 {} { shelltest 3 }
+proc scr004 {} { shelltest 4 }
+proc scr005 {} { shelltest 5 }
+proc scr006 {} { shelltest 6 }
+proc scr007 {} { shelltest 7 }
+proc scr008 {} { shelltest 8 }
+proc scr009 {} { shelltest 9 }
+proc scr010 {} { shelltest 10 }
+proc scr011 {} { shelltest 11 }
+proc scr012 {} { shelltest 12 }
+proc scr013 {} { shelltest 13 }
+proc scr014 {} { shelltest 14 }
+proc scr015 {} { shelltest 15 }
+proc scr016 {} { shelltest 16 }
+proc scr017 {} { shelltest 17 }
+proc scr018 {} { shelltest 18 }
+proc scr019 {} { shelltest 19 }
+proc scr020 {} { shelltest 20 }
+proc scr021 {} { shelltest 21 }
+proc scr022 {} { shelltest 22 }
diff --git a/storage/bdb/test/si001.tcl b/storage/bdb/test/si001.tcl
new file mode 100644
index 00000000000..1a2247c5f8b
--- /dev/null
+++ b/storage/bdb/test/si001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si001.tcl,v 1.7 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex001
+# TEST Basic secondary index put/delete test
+proc sindex001 { methods {nentries 200} {tnum 1} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si002.tcl b/storage/bdb/test/si002.tcl
new file mode 100644
index 00000000000..46ba86e7560
--- /dev/null
+++ b/storage/bdb/test/si002.tcl
@@ -0,0 +1,167 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si002.tcl,v 1.6 2002/04/29 17:12:02 sandstro Exp $
+#
+# TEST sindex002
+# TEST Basic cursor-based secondary index put/delete test
+proc sindex002 { methods {nentries 200} {tnum 2} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop"
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts "\tSindex00$tnum.c: Secondary c_pget/primary put overwrite loop"
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.c"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si003.tcl b/storage/bdb/test/si003.tcl
new file mode 100644
index 00000000000..1cc8c884e75
--- /dev/null
+++ b/storage/bdb/test/si003.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si003.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex003
+# TEST sindex001 with secondaries created and closed mid-test
+# TEST Basic secondary index put/delete test with secondaries
+# TEST created mid-test.
+proc sindex003 { methods {nentries 200} {tnum 3} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [eval {berkdb_env -create -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline "\tSindex00$tnum.a: Put loop ... "
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set ret [eval {$pdb put} {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "opening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts -nonewline "\tSindex00$tnum.b: Put/overwrite loop ... "
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+ set ret [eval {$pdb put} {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ }
+
+ # Close the secondaries again.
+ puts "closing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline \
+ "\tSindex00$tnum.c: Primary delete loop: deleting $half entries ..."
+ for { set n $half } { $n < $nentries } { incr n } {
+ set ret [$pdb del $keys($n)]
+ error_check_good pdel($n) $ret 0
+ }
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set ret [$sdb del $skey]
+ error_check_good sdel($n) $ret 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si004.tcl b/storage/bdb/test/si004.tcl
new file mode 100644
index 00000000000..291100da6b3
--- /dev/null
+++ b/storage/bdb/test/si004.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si004.tcl,v 1.6 2002/04/29 17:12:03 sandstro Exp $
+#
+# TEST sindex004
+# TEST sindex002 with secondaries created and closed mid-test
+# TEST Basic cursor-based secondary index put/delete test, with
+# TEST secondaries created mid-test.
+proc sindex004 { methods {nentries 200} {tnum 4} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ puts -nonewline \
+ "\tSindex00$tnum.a: Cursor put (-keyfirst/-keylast) loop ... "
+ set did [open $dict]
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set ns($key) $n
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ if { $n % 2 == 0 } {
+ set pflag " -keyfirst "
+ } else {
+ set pflag " -keylast "
+ }
+
+ set ret [eval {$pdbc put} $pflag \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ }
+ close $did
+ error_check_good pdbc_close [$pdbc close] 0
+
+ # Open and associate the secondaries
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Cursor put overwrite (-current) loop"
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+ set newd $datum.$key
+ set ret [eval {$pdbc put -current} [chop_data $pmethod $newd]]
+ error_check_good put_overwrite($key) $ret 0
+ set data($ns($key)) [pad_data $pmethod $newd]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ puts -nonewline "\tSindex00$tnum.c:\
+ Secondary c_pget/primary put overwrite loop ... "
+ # We walk the first secondary, then put-overwrite each primary key/data
+ # pair we find. This doubles as a DBC->c_pget test.
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ error_check_good sdb_cursor [is_valid_cursor $sdbc $sdb] TRUE
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdatum [lindex [lindex $dbt 0] 2]
+
+ # Extended entries will be showing up underneath us, in
+ # unpredictable places. Keep track of which pkeys
+ # we've extended, and don't extend them repeatedly.
+ if { [info exists pkeys_done($pkey)] == 1 } {
+ continue
+ } else {
+ set pkeys_done($pkey) 1
+ }
+
+ set newd $pdatum.[string range $pdatum 0 2]
+ set ret [eval {$pdb put} $pkey [chop_data $pmethod $newd]]
+ error_check_good pdb_put($pkey) $ret 0
+ set data($ns($pkey)) [pad_data $pmethod $newd]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+
+ # Close the secondaries again.
+ puts "\n\t\tclosing secondaries."
+ for { set sdb [lindex $sdbs end] } { [string length $sdb] > 0 } \
+ { set sdb [lindex $sdbs end] } {
+ error_check_good second_close($sdb) [$sdb close] 0
+ set sdbs [lrange $sdbs 0 end-1]
+ check_secondaries \
+ $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+ }
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts -nonewline "\tSindex00$tnum.d:\
+ Primary cursor delete loop: deleting $half entries ... "
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ set dbt [$pdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $half } { incr i } {
+ error_check_good pdbc_del [$pdbc del] 0
+ set dbt [$pdbc get -next]
+ }
+ error_check_good pdbc_close [$pdbc close] 0
+
+ set sdbs {}
+ puts "\n\t\topening secondaries."
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] \
+ $snamebase.r2.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -create [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+ cursor_check_secondaries $pdb $sdbs $half "Sindex00$tnum.d"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.e:\
+ Secondary cursor delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set sdbc [$sdb cursor]
+ set dbt [$sdbc get -first]
+ for { set i 0 } { [llength $dbt] > 0 && $i < $quar } { incr i } {
+ error_check_good sdbc_del [$sdbc del] 0
+ set dbt [$sdbc get -next]
+ }
+ error_check_good sdbc_close [$sdbc close] 0
+ cursor_check_secondaries $pdb $sdbs $quar "Sindex00$tnum.e"
+
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/si005.tcl b/storage/bdb/test/si005.tcl
new file mode 100644
index 00000000000..e5ed49175c9
--- /dev/null
+++ b/storage/bdb/test/si005.tcl
@@ -0,0 +1,179 @@
+
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si005.tcl,v 11.4 2002/04/29 17:12:03 sandstro Exp $
+#
+# Sindex005: Secondary index and join test.
+proc sindex005 { methods {nitems 1000} {tnum 5} args } {
+ source ./include.tcl
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Sindex005 does a join within a simulated database schema
+ # in which the primary index maps a record ID to a ZIP code and
+ # name in the form "XXXXXname", and there are two secondaries:
+ # one mapping ZIP to ID, the other mapping name to ID.
+ # The primary may be of any database type; the two secondaries
+ # must be either btree or hash.
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method for the two secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < 2 } { incr i } {
+ lappend methods $pmethod
+ }
+ } elseif { [llength $methods] != 2 } {
+ puts "FAIL: Sindex00$tnum requires exactly two secondaries."
+ return
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) Secondary index join test."
+ env_cleanup $testdir
+
+ set pname "sindex00$tnum-primary.db"
+ set zipname "sindex00$tnum-zip.db"
+ set namename "sindex00$tnum-name.db"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the databases.
+ set pdb [eval {berkdb_open -create -env} $env $pomethod $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ set zipdb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 0] [lindex $argses 0] $zipname]
+ error_check_good zip_open [is_valid_db $zipdb] TRUE
+ error_check_good zip_associate [$pdb associate s5_getzip $zipdb] 0
+
+ set namedb [eval {berkdb_open -create -dup -env} $env \
+ [lindex $omethods 1] [lindex $argses 1] $namename]
+ error_check_good name_open [is_valid_db $namedb] TRUE
+ error_check_good name_associate [$pdb associate s5_getname $namedb] 0
+
+ puts "\tSindex00$tnum.a: Populate database with $nitems \"names\""
+ s5_populate $pdb $nitems
+ puts "\tSindex00$tnum.b: Perform a join on each \"name\" and \"ZIP\""
+ s5_jointest $pdb $zipdb $namedb
+
+ error_check_good name_close [$namedb close] 0
+ error_check_good zip_close [$zipdb close] 0
+ error_check_good primary_close [$pdb close] 0
+ error_check_good env_close [$env close] 0
+}
+
+proc s5_jointest { pdb zipdb namedb } {
+ set pdbc [$pdb cursor]
+ error_check_good pdb_cursor [is_valid_cursor $pdbc $pdb] TRUE
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ set item [lindex [lindex $dbt 0] 1]
+ set retlist [s5_dojoin $item $pdb $zipdb $namedb]
+ }
+}
+
+proc s5_dojoin { item pdb zipdb namedb } {
+ set name [s5_getname "" $item]
+ set zip [s5_getzip "" $item]
+
+ set zipc [$zipdb cursor]
+ error_check_good zipc($item) [is_valid_cursor $zipc $zipdb] TRUE
+
+ set namec [$namedb cursor]
+ error_check_good namec($item) [is_valid_cursor $namec $namedb] TRUE
+
+ set pc [$pdb cursor]
+ error_check_good pc($item) [is_valid_cursor $pc $pdb] TRUE
+
+ set ret [$zipc get -set $zip]
+ set zd [lindex [lindex $ret 0] 1]
+ error_check_good zipset($zip) [s5_getzip "" $zd] $zip
+
+ set ret [$namec get -set $name]
+ set nd [lindex [lindex $ret 0] 1]
+ error_check_good nameset($name) [s5_getname "" $nd] $name
+
+ set joinc [$pdb join $zipc $namec]
+
+ set anyreturned 0
+ for { set dbt [$joinc get] } { [llength $dbt] > 0 } \
+ { set dbt [$joinc get] } {
+ set ritem [lindex [lindex $dbt 0] 1]
+ error_check_good returned_item($item) $ritem $item
+ incr anyreturned
+ }
+ error_check_bad anyreturned($item) $anyreturned 0
+
+ error_check_good joinc_close($item) [$joinc close] 0
+ error_check_good pc_close($item) [$pc close] 0
+ error_check_good namec_close($item) [$namec close] 0
+ error_check_good zipc_close($item) [$zipc close] 0
+}
+
+proc s5_populate { db nitems } {
+ global dict
+
+ set did [open $dict]
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ gets $did word
+ if { [string length $word] < 3 } {
+ puts "FAIL:\
+ unexpected pair of words < 3 chars long"
+ }
+ }
+ set datalist [s5_name2zips $word]
+ foreach data $datalist {
+ error_check_good db_put($data) [$db put $i $data$word] 0
+ }
+ }
+ close $did
+}
+
+proc s5_getzip { key data } { return [string range $data 0 4] }
+proc s5_getname { key data } { return [string range $data 5 end] }
+
+# The dirty secret of this test is that the ZIP code is a function of the
+# name, so we can generate a database and then verify join results easily
+# without having to consult actual data.
+#
+# Any word passed into this function will generate from 1 to 26 ZIP
+# entries, out of the set {00000, 01000 ... 99000}. The number of entries
+# is just the position in the alphabet of the word's first letter; the
+# entries are then hashed to the set {00, 01 ... 99} N different ways.
+proc s5_name2zips { name } {
+ global alphabet
+
+ set n [expr [string first [string index $name 0] $alphabet] + 1]
+ error_check_bad starts_with_abc($name) $n -1
+
+ set ret {}
+ for { set i 0 } { $i < $n } { incr i } {
+ set b 0
+ for { set j 1 } { $j < [string length $name] } \
+ { incr j } {
+ set b [s5_nhash $name $i $j $b]
+ }
+ lappend ret [format %05u [expr $b % 100]000]
+ }
+ return $ret
+}
+proc s5_nhash { name i j b } {
+ global alphabet
+
+ set c [string first [string index $name $j] $alphabet']
+ return [expr (($b * 991) + ($i * 997) + $c) % 10000000]
+}
diff --git a/storage/bdb/test/si006.tcl b/storage/bdb/test/si006.tcl
new file mode 100644
index 00000000000..3a1dbb3c4f8
--- /dev/null
+++ b/storage/bdb/test/si006.tcl
@@ -0,0 +1,129 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: si006.tcl,v 1.2 2002/05/15 17:18:03 sandstro Exp $
+#
+# TEST sindex006
+# TEST Basic secondary index put/delete test with transactions
+proc sindex006 { methods {nentries 200} {tnum 6} args } {
+ source ./include.tcl
+ global dict nsecondaries
+
+ # Primary method/args.
+ set pmethod [lindex $methods 0]
+ set pargs [convert_args $pmethod $args]
+ set pomethod [convert_method $pmethod]
+
+ # Method/args for all the secondaries. If only one method
+ # was specified, assume the same method and a standard N
+ # secondaries.
+ set methods [lrange $methods 1 end]
+ if { [llength $methods] == 0 } {
+ for { set i 0 } { $i < $nsecondaries } { incr i } {
+ lappend methods $pmethod
+ }
+ }
+
+ set argses [convert_argses $methods $args]
+ set omethods [convert_methods $methods]
+
+ puts "Sindex00$tnum ($pmethod/$methods) $nentries equal key/data pairs"
+ puts " with transactions"
+ env_cleanup $testdir
+
+ set pname "primary00$tnum.db"
+ set snamebase "secondary00$tnum"
+
+ # Open an environment
+ # XXX if one is not supplied!
+ set env [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ # Open the primary.
+ set pdb [eval {berkdb_open -create -auto_commit -env} $env $pomethod \
+ $pargs $pname]
+ error_check_good primary_open [is_valid_db $pdb] TRUE
+
+ # Open and associate the secondaries
+ set sdbs {}
+ for { set i 0 } { $i < [llength $omethods] } { incr i } {
+ set sdb [eval {berkdb_open -create -auto_commit -env} $env \
+ [lindex $omethods $i] [lindex $argses $i] $snamebase.$i.db]
+ error_check_good second_open($i) [is_valid_db $sdb] TRUE
+
+ error_check_good db_associate($i) \
+ [$pdb associate -auto_commit [callback_n $i] $sdb] 0
+ lappend sdbs $sdb
+ }
+
+ puts "\tSindex00$tnum.a: Put loop"
+ set did [open $dict]
+ for { set n 0 } { [gets $did str] != -1 && $n < $nentries } { incr n } {
+ if { [is_record_based $pmethod] == 1 } {
+ set key [expr $n + 1]
+ set datum $str
+ } else {
+ set key $str
+ gets $did datum
+ }
+ set keys($n) $key
+ set data($n) [pad_data $pmethod $datum]
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$key [chop_data $pmethod $datum]}]
+ error_check_good put($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ close $did
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.a"
+
+ puts "\tSindex00$tnum.b: Put/overwrite loop"
+ for { set n 0 } { $n < $nentries } { incr n } {
+ set newd $data($n).$keys($n)
+
+ set txn [$env txn]
+ set ret [eval {$pdb put} -txn $txn \
+ {$keys($n) [chop_data $pmethod $newd]}]
+ error_check_good put_overwrite($n) $ret 0
+ set data($n) [pad_data $pmethod $newd]
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $nentries keys data "Sindex00$tnum.b"
+
+ # Delete the second half of the entries through the primary.
+ # We do the second half so we can just pass keys(0 ... n/2)
+ # to check_secondaries.
+ set half [expr $nentries / 2]
+ puts "\tSindex00$tnum.c: Primary delete loop: deleting $half entries"
+ for { set n $half } { $n < $nentries } { incr n } {
+ set txn [$env txn]
+ set ret [$pdb del -txn $txn $keys($n)]
+ error_check_good pdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $half keys data "Sindex00$tnum.c"
+
+ # Delete half of what's left, through the first secondary.
+ set quar [expr $half / 2]
+ puts "\tSindex00$tnum.d: Secondary delete loop: deleting $quar entries"
+ set sdb [lindex $sdbs 0]
+ set callback [callback_n 0]
+ for { set n $quar } { $n < $half } { incr n } {
+ set skey [$callback $keys($n) [pad_data $pmethod $data($n)]]
+ set txn [$env txn]
+ set ret [$sdb del -txn $txn $skey]
+ error_check_good sdel($n) $ret 0
+ error_check_good txn_commit($n) [$txn commit] 0
+ }
+ check_secondaries $pdb $sdbs $quar keys data "Sindex00$tnum.d"
+
+ puts "\tSindex00$tnum.e: Closing/disassociating primary first"
+ error_check_good primary_close [$pdb close] 0
+ foreach sdb $sdbs {
+ error_check_good secondary_close [$sdb close] 0
+ }
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/sindex.tcl b/storage/bdb/test/sindex.tcl
new file mode 100644
index 00000000000..fc2a0fc2f31
--- /dev/null
+++ b/storage/bdb/test/sindex.tcl
@@ -0,0 +1,259 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2001-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sindex.tcl,v 1.8 2002/05/07 17:15:46 krinsky Exp $
+#
+# Secondary index test driver and maintenance routines.
+#
+# Breaking from the usual convention, we put the driver function
+# for the secondary index tests here, in its own file. The reason
+# for this is that it's something which compartmentalizes nicely,
+# has little in common with other driver functions, and
+# is likely to be run on its own from time to time.
+#
+# The secondary index tests themselves live in si0*.tcl.
+
+# Standard number of secondary indices to create if a single-element
+# list of methods is passed into the secondary index tests.
+global nsecondaries
+set nsecondaries 2
+
+# Run the secondary index tests.
+proc sindex { {verbose 0} args } {
+ global verbose_check_secondaries
+ set verbose_check_secondaries $verbose
+
+ # Run basic tests with a single secondary index and a small number
+ # of keys, then again with a larger number of keys. (Note that
+ # we can't go above 5000, since we use two items from our
+ # 10K-word list for each key/data pair.)
+ foreach n { 200 5000 } {
+ foreach pm { btree hash recno frecno queue queueext } {
+ foreach sm { dbtree dhash ddbtree ddhash btree hash } {
+ sindex001 [list $pm $sm $sm] $n
+ sindex002 [list $pm $sm $sm] $n
+ # Skip tests 3 & 4 for large lists;
+ # they're not that interesting.
+ if { $n < 1000 } {
+ sindex003 [list $pm $sm $sm] $n
+ sindex004 [list $pm $sm $sm] $n
+ }
+
+ sindex006 [list $pm $sm $sm] $n
+ }
+ }
+ }
+
+ # Run secondary index join test. (There's no point in running
+ # this with both lengths, the primary is unhappy for now with fixed-
+ # length records (XXX), and we need unsorted dups in the secondaries.)
+ foreach pm { btree hash recno } {
+ foreach sm { btree hash } {
+ sindex005 [list $pm $sm $sm] 1000
+ }
+ sindex005 [list $pm btree hash] 1000
+ sindex005 [list $pm hash btree] 1000
+ }
+
+
+ # Run test with 50 secondaries.
+ foreach pm { btree hash } {
+ set methlist [list $pm]
+ for { set i 0 } { $i < 50 } { incr i } {
+ # XXX this should incorporate hash after #3726
+ if { $i % 2 == 0 } {
+ lappend methlist "dbtree"
+ } else {
+ lappend methlist "ddbtree"
+ }
+ }
+ sindex001 $methlist 500
+ sindex002 $methlist 500
+ sindex003 $methlist 500
+ sindex004 $methlist 500
+ }
+}
+
+# The callback function we use for each given secondary in most tests
+# is a simple function of its place in the list of secondaries (0-based)
+# and the access method (since recnos may need different callbacks).
+#
+# !!!
+# Note that callbacks 0-3 return unique secondary keys if the input data
+# are unique; callbacks 4 and higher may not, so don't use them with
+# the normal wordlist and secondaries that don't support dups.
+# The callbacks that incorporate a key don't work properly with recno
+# access methods, at least not in the current test framework (the
+# error_check_good lines test for e.g. 1foo, when the database has
+# e.g. 0x010x000x000x00foo).
+proc callback_n { n } {
+ switch $n {
+ 0 { return _s_reversedata }
+ 1 { return _s_noop }
+ 2 { return _s_concatkeydata }
+ 3 { return _s_concatdatakey }
+ 4 { return _s_reverseconcat }
+ 5 { return _s_truncdata }
+ 6 { return _s_alwayscocacola }
+ }
+ return _s_noop
+}
+
+proc _s_reversedata { a b } { return [reverse $b] }
+proc _s_truncdata { a b } { return [string range $b 1 end] }
+proc _s_concatkeydata { a b } { return $a$b }
+proc _s_concatdatakey { a b } { return $b$a }
+proc _s_reverseconcat { a b } { return [reverse $a$b] }
+proc _s_alwayscocacola { a b } { return "Coca-Cola" }
+proc _s_noop { a b } { return $b }
+
+# Should the check_secondary routines print lots of output?
+set verbose_check_secondaries 0
+
+# Given a primary database handle, a list of secondary handles, a
+# number of entries, and arrays of keys and data, verify that all
+# databases have what they ought to.
+proc check_secondaries { pdb sdbs nentries keyarr dataarr {pref "Check"} } {
+ upvar $keyarr keys
+ upvar $dataarr data
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair is in the primary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1: Each key/data pair is in the primary"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ error_check_good pdb_get($i) [$pdb get $keys($i)] \
+ [list [list $keys($i) $data($i)]]
+ }
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ # Make sure each key/data pair is in this secondary.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Each skey/key/data tuple is in secondary #$j"
+ }
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set sdb [lindex $sdbs $j]
+ set skey [[callback_n $j] $keys($i) $data($i)]
+ error_check_good sdb($j)_pget($i) \
+ [$sdb pget -get_both $skey $keys($i)] \
+ [list [list $skey $keys($i) $data($i)]]
+ }
+
+ # Make sure this secondary contains only $nentries
+ # items.
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.3: Secondary #$j has $nentries items"
+ }
+ set dbc [$sdb cursor]
+ error_check_good dbc($i) \
+ [is_valid_cursor $dbc $sdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } \
+ { incr k } { }
+ error_check_good numitems($i) $k $nentries
+ error_check_good dbc($i)_close [$dbc close] 0
+ }
+
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.4: Primary has $nentries items"
+ }
+ set dbc [$pdb cursor]
+ error_check_good pdbc [is_valid_cursor $dbc $pdb] TRUE
+ for { set k 0 } { [llength [$dbc get -next]] > 0 } { incr k } { }
+ error_check_good numitems $k $nentries
+ error_check_good pdbc_close [$dbc close] 0
+}
+
+# Given a primary database handle and a list of secondary handles, walk
+# through the primary and make sure all the secondaries are correct,
+# then walk through the secondaries and make sure the primary is correct.
+#
+# This is slightly less rigorous than the normal check_secondaries--we
+# use it whenever we don't have up-to-date "keys" and "data" arrays.
+proc cursor_check_secondaries { pdb sdbs nentries { pref "Check" } } {
+ global verbose_check_secondaries
+
+ # Make sure each key/data pair in the primary is in each secondary.
+ set pdbc [$pdb cursor]
+ error_check_good ccs_pdbc [is_valid_cursor $pdbc $pdb] TRUE
+ set i 0
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.1:\
+ Key/data in primary => key/data in secondaries"
+ }
+
+ for { set dbt [$pdbc get -first] } { [llength $dbt] > 0 } \
+ { set dbt [$pdbc get -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 0]
+ set pdata [lindex [lindex $dbt 0] 1]
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ set sdb [lindex $sdbs $j]
+ set sdbt [$sdb pget -get_both \
+ [[callback_n $j] $pkey $pdata] $pkey]
+ error_check_good pkey($pkey,$j) \
+ [lindex [lindex $sdbt 0] 1] $pkey
+ error_check_good pdata($pdata,$j) \
+ [lindex [lindex $sdbt 0] 2] $pdata
+ }
+ }
+ error_check_good ccs_pdbc_close [$pdbc close] 0
+ error_check_good primary_has_nentries $i $nentries
+
+ for { set j 0 } { $j < [llength $sdbs] } { incr j } {
+ if { $verbose_check_secondaries } {
+ puts "\t\t$pref.2:\
+ Key/data in secondary #$j => key/data in primary"
+ }
+ set sdb [lindex $sdbs $j]
+ set sdbc [$sdb cursor]
+ error_check_good ccs_sdbc($j) [is_valid_cursor $sdbc $sdb] TRUE
+ set i 0
+ for { set dbt [$sdbc pget -first] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -next] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries $i $nentries
+
+ # To exercise pget -last/pget -prev, we do it backwards too.
+ set i 0
+ for { set dbt [$sdbc pget -last] } { [llength $dbt] > 0 } \
+ { set dbt [$sdbc pget -prev] } {
+ incr i
+ set pkey [lindex [lindex $dbt 0] 1]
+ set pdata [lindex [lindex $dbt 0] 2]
+ error_check_good pdb_get_bkwds($pkey/$pdata,$j) \
+ [$pdb get -get_both $pkey $pdata] \
+ [list [list $pkey $pdata]]
+ }
+ error_check_good secondary($j)_has_nentries_bkwds $i $nentries
+
+ error_check_good ccs_sdbc_close($j) [$sdbc close] 0
+ }
+}
+
+# The secondary index tests take a list of the access methods that
+# each array ought to use. Convert at one blow into a list of converted
+# argses and omethods for each method in the list.
+proc convert_argses { methods largs } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_args $m $largs]
+ }
+ return $ret
+}
+proc convert_methods { methods } {
+ set ret {}
+ foreach m $methods {
+ lappend ret [convert_method $m]
+ }
+ return $ret
+}
diff --git a/storage/bdb/test/sysscript.tcl b/storage/bdb/test/sysscript.tcl
new file mode 100644
index 00000000000..810b0df6cef
--- /dev/null
+++ b/storage/bdb/test/sysscript.tcl
@@ -0,0 +1,282 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: sysscript.tcl,v 11.17 2002/07/29 17:05:24 sue Exp $
+#
+# System integration test script.
+# This script runs a single process that tests the full functionality of
+# the system. The database under test contains nfiles files. Each process
+# randomly generates a key and some data. Both keys and data are bimodally
+# distributed between small keys (1-10 characters) and large keys (the avg
+# length is indicated via the command line parameter.
+# The process then decides on a replication factor between 1 and nfiles.
+# It writes the key and data to that many files and tacks on the file ids
+# of the files it writes to the data string. For example, let's say that
+# I randomly generate the key dog and data cat. Then I pick a replication
+# factor of 3. I pick 3 files from the set of n (say 1, 3, and 5). I then
+# rewrite the data as 1:3:5:cat. I begin a transaction, add the key/data
+# pair to each file and then commit. Notice that I may generate replication
+# of the form 1:3:3:cat in which case I simply add a duplicate to file 3.
+#
+# Usage: sysscript dir nfiles key_avg data_avg
+#
+# dir: DB_HOME directory
+# nfiles: number of files in the set
+# key_avg: average big key size
+# data_avg: average big data size
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set mypid [pid]
+
+set usage "sysscript dir nfiles key_avg data_avg method"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+puts [concat "Argc: " $argc " Argv: " $argv]
+
+# Initialize arguments
+set dir [lindex $argv 0]
+set nfiles [ lindex $argv 1 ]
+set key_avg [ lindex $argv 2 ]
+set data_avg [ lindex $argv 3 ]
+set method [ lindex $argv 4 ]
+
+# Initialize seed
+global rand_init
+berkdb srand $rand_init
+
+puts "Beginning execution for $mypid"
+puts "$dir DB_HOME"
+puts "$nfiles files"
+puts "$key_avg average key length"
+puts "$data_avg average data length"
+
+flush stdout
+
+# Create local environment
+set dbenv [berkdb_env -txn -home $dir]
+set err [catch {error_check_good $mypid:dbenv [is_substr $dbenv env] 1} ret]
+if {$err != 0} {
+ puts $ret
+ return
+}
+
+# Now open the files
+for { set i 0 } { $i < $nfiles } { incr i } {
+ set file test044.$i.db
+ set db($i) [berkdb open -auto_commit -env $dbenv $method $file]
+ set err [catch {error_check_bad $mypid:dbopen $db($i) NULL} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_bad $mypid:dbopen [is_substr $db($i) \
+ error] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+set record_based [is_record_based $method]
+while { 1 } {
+ # Decide if we're going to create a big key or a small key
+ # We give small keys a 70% chance.
+ if { [berkdb random_int 1 10] < 8 } {
+ set k [random_data 5 0 0 $record_based]
+ } else {
+ set k [random_data $key_avg 0 0 $record_based]
+ }
+ set data [chop_data $method [random_data $data_avg 0 0]]
+
+ set txn [$dbenv txn]
+ set err [catch {error_check_good $mypid:txn_begin [is_substr $txn \
+ $dbenv.txn] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+
+ # Open cursors
+ for { set f 0 } {$f < $nfiles} {incr f} {
+ set cursors($f) [$db($f) cursor -txn $txn]
+ set err [catch {error_check_good $mypid:cursor_open \
+ [is_substr $cursors($f) $db($f)] 1} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set aborted 0
+
+ # Check to see if key is already in database
+ set found 0
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ set r [$db($i) get -txn $txn $k]
+ set r [$db($i) get -txn $txn $k]
+ if { $r == "-1" } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:txn_abort \
+ [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ set found 2
+ break
+ } elseif { $r != "Key $k not found." } {
+ set found 1
+ break
+ }
+ }
+ switch $found {
+ 2 {
+ # Transaction aborted, no need to do anything.
+ }
+ 0 {
+ # Key was not found, decide how much to replicate
+ # and then create a list of that many file IDs.
+ set repl [berkdb random_int 1 $nfiles]
+ set fset ""
+ for { set i 0 } { $i < $repl } {incr i} {
+ set f [berkdb random_int 0 [expr $nfiles - 1]]
+ lappend fset $f
+ set data [chop_data $method $f:$data]
+ }
+
+ foreach i $fset {
+ set r [$db($i) put -txn $txn $k $data]
+ if {$r == "-1"} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ }
+ }
+ 1 {
+ # Key was found. Make sure that all the data values
+ # look good.
+ set f [zero_list $nfiles]
+ set data $r
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $f $fnum] == 0 } {
+ #set flag -set
+ set full [record $cursors($fnum) get -set $k]
+ } else {
+ #set flag -next
+ set full [record $cursors($fnum) get -next]
+ }
+ if {[llength $full] == 0} {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good \
+ $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good \
+ $mypid:txn_abort [$txn abort] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set aborted 1
+ break
+ }
+ set err [catch {error_check_bad \
+ $mypid:curs_get($k,$data,$fnum,$flag) \
+ [string length $full] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [pad_data $method [lindex [lindex $full 0] 1]]
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:key $key $k} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set err [catch {error_check_good \
+ $mypid:dbget_$fnum:data($k) $rec $data} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ set f [lreplace $f $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+ if { $aborted == 0 } {
+ for {set f 0 } {$f < $nfiles} {incr f} {
+ set err [catch {error_check_good $mypid:cursor_close \
+ [$cursors($f) close] 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+ set err [catch {error_check_good $mypid:commit [$txn commit] \
+ 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+ }
+}
+
+# Close files
+for { set i 0 } { $i < $nfiles} { incr i } {
+ set r [$db($i) close]
+ set err [catch {error_check_good $mypid:db_close:$i $r 0} ret]
+ if {$err != 0} {
+ puts $ret
+ return
+ }
+}
+
+# Close tm and environment
+$dbenv close
+
+puts "[timestamp] [pid] Complete"
+flush stdout
+
+filecheck $file 0
diff --git a/storage/bdb/test/test.tcl b/storage/bdb/test/test.tcl
new file mode 100644
index 00000000000..10ee9425b7a
--- /dev/null
+++ b/storage/bdb/test/test.tcl
@@ -0,0 +1,1863 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test.tcl,v 11.225 2002/09/10 18:51:38 sue Exp $
+
+source ./include.tcl
+
+# Load DB's TCL API.
+load $tcllib
+
+if { [file exists $testdir] != 1 } {
+ file mkdir $testdir
+}
+
+global __debug_print
+global __debug_on
+global __debug_test
+global util_path
+
+#
+# Test if utilities work to figure out the path. Most systems
+# use ., but QNX has a problem with execvp of shell scripts which
+# causes it to break.
+#
+set stat [catch {exec ./db_printlog -?} ret]
+if { [string first "exec format error" $ret] != -1 } {
+ set util_path ./.libs
+} else {
+ set util_path .
+}
+set __debug_print 0
+set encrypt 0
+set old_encrypt 0
+set passwd test_passwd
+
+# This is where the test numbering and parameters now live.
+source $test_path/testparams.tcl
+
+# Error stream that (should!) always go to the console, even if we're
+# redirecting to ALL.OUT.
+set consoleerr stderr
+
+foreach sub $subs {
+ if { [info exists num_test($sub)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ continue
+ }
+ set end $num_test($sub)
+ for { set i 1 } { $i <= $end } {incr i} {
+ set name [format "%s%03d.tcl" $sub $i]
+ source $test_path/$name
+ }
+}
+
+source $test_path/archive.tcl
+source $test_path/byteorder.tcl
+source $test_path/dbm.tcl
+source $test_path/hsearch.tcl
+source $test_path/join.tcl
+source $test_path/logtrack.tcl
+source $test_path/ndbm.tcl
+source $test_path/parallel.tcl
+source $test_path/reputils.tcl
+source $test_path/sdbutils.tcl
+source $test_path/shelltest.tcl
+source $test_path/sindex.tcl
+source $test_path/testutils.tcl
+source $test_path/upgrade.tcl
+
+set dict $test_path/wordlist
+set alphabet "abcdefghijklmnopqrstuvwxyz"
+set datastr "abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz"
+
+# Random number seed.
+global rand_init
+set rand_init 101301
+
+# Default record length and padding character for
+# fixed record length access method(s)
+set fixed_len 20
+set fixed_pad 0
+
+set recd_debug 0
+set log_log_record_types 0
+set ohandles {}
+
+# Normally, we're not running an all-tests-in-one-env run. This matters
+# for error stream/error prefix settings in berkdb_open.
+global is_envmethod
+set is_envmethod 0
+
+# For testing locker id wrap around.
+global lock_curid
+global lock_maxid
+set lock_curid 0
+set lock_maxid 2147483647
+global txn_curid
+global txn_maxid
+set txn_curid 2147483648
+set txn_maxid 4294967295
+
+# Set up any OS-specific values
+global tcl_platform
+set is_windows_test [is_substr $tcl_platform(os) "Win"]
+set is_hp_test [is_substr $tcl_platform(os) "HP-UX"]
+set is_qnx_test [is_substr $tcl_platform(os) "QNX"]
+
+# From here on out, test.tcl contains the procs that are used to
+# run all or part of the test suite.
+
+proc run_std { args } {
+ global num_test
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set am_only 0
+ set no_am 0
+ set std_only 1
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ A {
+ set std_only 0
+ }
+ M {
+ set no_am 1
+ puts "run_std: all but access method tests."
+ }
+ m {
+ set am_only 1
+ puts "run_std: access method tests only."
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ if { $std_only == 1 } {
+ fileremove -f ALL.OUT
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ }
+
+ set test_list {
+ {"environment" "env"}
+ {"archive" "archive"}
+ {"locking" "lock"}
+ {"logging" "log"}
+ {"memory pool" "memp"}
+ {"mutex" "mutex"}
+ {"transaction" "txn"}
+ {"deadlock detection" "dead"}
+ {"subdatabase" "sdb"}
+ {"byte-order" "byte"}
+ {"recno backing file" "rsrc"}
+ {"DBM interface" "dbm"}
+ {"NDBM interface" "ndbm"}
+ {"Hsearch interface" "hsearch"}
+ {"secondary index" "sindex"}
+ }
+
+ if { $am_only == 0 } {
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $cmd" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test"
+ close $o
+ }
+ }
+
+ # Run recovery tests.
+ #
+ # XXX These too are broken into separate tclsh instantiations
+ # so we don't require so much memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # Note that we still wrap the test in an exec so that
+ # its output goes to ALL.OUT. run_recd will wrap each test
+ # so that both error streams go to stdout (which here goes
+ # to ALL.OUT); information that run_recd wishes to print
+ # to the "real" stderr, but outside the wrapping for each test,
+ # such as which tests are being skipped, it can still send to
+ # stderr.
+ puts "Running recovery tests"
+ if [catch {
+ exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags recd" \
+ 2>@ stderr >> ALL.OUT
+ } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: recd tests"
+ close $o
+ }
+
+ # Run join test
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ puts "Running join test"
+ foreach i "join1 join2 join3 join4 join5 join6" {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; r $rflags $i" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $i test"
+ close $o
+ }
+ }
+ }
+
+ if { $no_am == 0 } {
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so we don't
+ # require so much memory.
+ foreach i \
+ "btree hash queue queueext recno rbtree frecno rrecno" {
+ puts "Running $i tests"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_method -$i $j $j $display $run $o
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_method -$i $j $j $display $run"\
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL:\
+ [format "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ # If running in the context of the larger 'run_all' we don't
+ # check for failure here either.
+ if { $run == 0 || $std_only == 0 } {
+ return
+ }
+
+ set failed [check_failed_run ALL.OUT]
+
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+proc check_failed_run { file {text "^FAIL"}} {
+ set failed 0
+ set o [open $file r]
+ while { [gets $o line] >= 0 } {
+ set ret [regexp $text $line]
+ if { $ret != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+
+ return $failed
+}
+
+proc r { args } {
+ global num_test
+ source ./include.tcl
+
+ set exflgs [eval extractflags $args]
+ set args [lindex $exflgs 0]
+ set flags [lindex $exflgs 1]
+
+ set display 1
+ set run 1
+ set saveflags "--"
+ foreach f $flags {
+ switch $f {
+ n {
+ set display 1
+ set run 0
+ set saveflags "-n $saveflags"
+ }
+ }
+ }
+
+ if {[catch {
+ set sub [ lindex $args 0 ]
+ switch $sub {
+ byte {
+ if { $display } {
+ puts "run_test byteorder"
+ }
+ if { $run } {
+ check_handles
+ run_test byteorder
+ }
+ }
+ archive -
+ dbm -
+ hsearch -
+ ndbm -
+ shelltest -
+ sindex {
+ if { $display } { puts "r $sub" }
+ if { $run } {
+ check_handles
+ $sub
+ }
+ }
+ bigfile -
+ dead -
+ env -
+ lock -
+ log -
+ memp -
+ mutex -
+ rsrc -
+ sdbtest -
+ txn {
+ if { $display } { run_subsystem $sub 1 0 }
+ if { $run } {
+ run_subsystem $sub
+ }
+ }
+ join {
+ eval r $saveflags join1
+ eval r $saveflags join2
+ eval r $saveflags join3
+ eval r $saveflags join4
+ eval r $saveflags join5
+ eval r $saveflags join6
+ }
+ join1 {
+ if { $display } { puts jointest }
+ if { $run } {
+ check_handles
+ jointest
+ }
+ }
+ joinbench {
+ puts "[timestamp]"
+ eval r $saveflags join1
+ eval r $saveflags join2
+ puts "[timestamp]"
+ }
+ join2 {
+ if { $display } { puts "jointest 512" }
+ if { $run } {
+ check_handles
+ jointest 512
+ }
+ }
+ join3 {
+ if { $display } {
+ puts "jointest 8192 0 -join_item"
+ }
+ if { $run } {
+ check_handles
+ jointest 8192 0 -join_item
+ }
+ }
+ join4 {
+ if { $display } { puts "jointest 8192 2" }
+ if { $run } {
+ check_handles
+ jointest 8192 2
+ }
+ }
+ join5 {
+ if { $display } { puts "jointest 8192 3" }
+ if { $run } {
+ check_handles
+ jointest 8192 3
+ }
+ }
+ join6 {
+ if { $display } { puts "jointest 512 3" }
+ if { $run } {
+ check_handles
+ jointest 512 3
+ }
+ }
+ recd {
+ check_handles
+ run_recds $run $display [lrange $args 1 end]
+ }
+ rep {
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_repmethod 0 $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_repmethod 0 $j $j
+ }
+ }
+ for { set i 1 } \
+ { $i <= $num_test(rep) } {incr i} {
+ set test [format "%s%03d" $sub $i]
+ if { $i == 2 } {
+ if { $run } {
+ puts "Skipping rep002 \
+ (waiting on SR #6195)"
+ }
+ continue
+ }
+ if { $display } {
+ puts "run_test $test"
+ }
+ if { $run } {
+ run_test $test
+ }
+ }
+ }
+ rpc {
+ if { $display } { puts "r $sub" }
+ global rpc_svc svc_list
+ set old_rpc_src $rpc_svc
+ foreach rpc_svc $svc_list {
+ if { !$run || \
+ ![file exist $util_path/$rpc_svc] } {
+ continue
+ }
+ run_subsystem rpc
+ if { [catch {run_rpcmethod -txn} ret] != 0 } {
+ puts $ret
+ }
+ run_test run_rpcmethod
+ }
+ set rpc_svc $old_rpc_src
+ }
+ sec {
+ if { $display } {
+ run_subsystem $sub 1 0
+ }
+ if { $run } {
+ run_subsystem $sub 0 1
+ }
+ for { set j 1 } { $j <= $num_test(test) } \
+ { incr j } {
+ if { $display } {
+ puts "eval run_test \
+ run_secmethod $j $j"
+ puts "eval run_test \
+ run_secenv $j $j"
+ }
+ if { $run } {
+ eval run_test \
+ run_secmethod $j $j
+ eval run_test \
+ run_secenv $j $j
+ }
+ }
+ }
+ sdb {
+ if { $display } {
+ puts "eval r $saveflags sdbtest"
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ puts "eval run_test \
+ subdb $j $j"
+ }
+ }
+ if { $run } {
+ eval r $saveflags sdbtest
+ for { set j 1 } \
+ { $j <= $num_test(sdb) } \
+ { incr j } {
+ eval run_test subdb $j $j
+ }
+ }
+ }
+ btree -
+ rbtree -
+ hash -
+ queue -
+ queueext -
+ recno -
+ frecno -
+ rrecno {
+ eval run_method [lindex $args 0] \
+ 1 0 $display $run [lrange $args 1 end]
+ }
+
+ default {
+ error \
+ "FAIL:[timestamp] r: $args: unknown command"
+ }
+ }
+ flush stdout
+ flush stderr
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp] r: $args: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_subsystem { prefix { display 0 } { run 1} } {
+ global num_test
+ if { [info exists num_test($prefix)] != 1 } {
+ puts stderr "Subsystem $sub has no number of tests specified in\
+ testparams.tcl; skipping."
+ return
+ }
+ for { set i 1 } { $i <= $num_test($prefix) } {incr i} {
+ set name [format "%s%03d" $prefix $i]
+ if { $display } {
+ puts "eval $name"
+ }
+ if { $run } {
+ check_handles
+ catch {eval $name}
+ }
+ }
+}
+
+proc run_test { testname args } {
+ source ./include.tcl
+ foreach method "hash queue queueext recno rbtree frecno rrecno btree" {
+ check_handles
+ eval $testname -$method $args
+ verify_dir $testdir "" 1
+ }
+}
+
+proc run_method { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ if { $run == 1 } {
+ puts $outfile "run_method: $method $start $stop $args"
+ }
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args"
+ puts $outfile " ; verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $args
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ # verify all databases the test leaves behind
+ verify_dir $testdir "" 1
+ if { $__debug_on != 0 } {
+ debug
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_method: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_rpcmethod { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ global is_envmethod
+ global rpc_svc
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_rpcmethod: $method $start $stop $largs"
+
+ set save_largs $largs
+ if { [string compare $rpc_server "localhost"] == 0 } {
+ set dpid [exec $util_path/$rpc_svc -h $rpc_testdir &]
+ } else {
+ set dpid [exec rsh $rpc_server $rpc_path/$rpc_svc \
+ -h $rpc_testdir &]
+ }
+ puts "\tRun_rpcmethod.a: starting server, pid $dpid"
+ tclsleep 10
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ set stat [catch {eval txn003} res]
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1}}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ tclkill $dpid
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ tclkill $dpid
+}
+
+proc run_rpcnoserver { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global num_test
+ global parms
+ global is_envmethod
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_rpcnoserver: $method $start $stop $largs"
+
+ set save_largs $largs
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set home [file tail $rpc_testdir]
+
+ set is_envmethod 1
+ set use_txn 0
+ if { [string first "txn" $method] != -1 } {
+ set use_txn 1
+ }
+ if { $use_txn == 1 } {
+ if { $start == 1 } {
+ set ntxns 32
+ } else {
+ set ntxns $start
+ }
+ set i 1
+ check_handles
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ set env [eval {berkdb_env -create -mode 0644 -home $home \
+ -server $rpc_server -client_timeout 10000} -txn]
+ error_check_good env_open [is_valid_env $env] TRUE
+
+ set stat [catch {eval txn001_suba $ntxns $env} res]
+ if { $stat == 0 } {
+ set stat [catch {eval txn001_subb $ntxns $env} res]
+ }
+ error_check_good envclose [$env close] 0
+ } else {
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i]\
+ disabled in testparams.tcl;\
+ skipping."
+ continue
+ }
+ remote_cleanup $rpc_server $rpc_testdir $testdir
+ #
+ # Set server cachesize to 1Mb. Otherwise some
+ # tests won't fit (like test084 -btree).
+ #
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $home -server $rpc_server \
+ -client_timeout 10000 \
+ -cachesize {0 1048576 1} }]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ }
+ } res]
+ }
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_rpcnoserver: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+#
+# Run method tests in secure mode.
+#
+proc run_secmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global passwd
+
+ append largs " -encryptaes $passwd "
+ eval run_method $method $start $stop $display $run $outfile $largs
+}
+
+#
+# Run method tests in its own, new secure environment.
+#
+proc run_secenv { method {start 1} {stop 0} {largs ""} } {
+ global __debug_on
+ global __debug_print
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ }
+ puts "run_secenv: $method $start $stop $largs"
+
+ set save_largs $largs
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -mode 0644 \
+ -home $testdir -encryptaes $passwd \
+ -cachesize {0 1048576 1}}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+
+ #
+ # Run each test multiple times in the secure env.
+ # Once with a secure env + clear database
+ # Once with a secure env + secure database
+ #
+ eval $name $method $parms($name) $largs
+ append largs " -encrypt "
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir -encryptaes $passwd] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_secenv: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ set is_envmethod 0
+ }
+
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_reptest { method test {droppct 0} {nclients 1} {do_del 0} \
+ {do_sec 0} {do_oob 0} {largs "" } } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ berkdb srand $rand_init
+ set c [string index $test 0]
+ if { $c == "s" } {
+ set i [string range $test 1 end]
+ set name [format "subdb%03d" $i]
+ } else {
+ set i $test
+ set name [format "test%03d" $i]
+ }
+ puts "run_reptest: $method $name"
+
+ env_cleanup $testdir
+ set is_envmethod 1
+ set stat [catch {
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup \
+ $envargs $largs $test $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline \
+ "Repl: $name: dropping $droppct%, $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_reptest: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+}
+
+#
+# Run replication method tests in master and client env.
+#
+proc run_repmethod { method {numcl 0} {start 1} {stop 0} {display 0}
+ {run 1} {outfile stdout} {largs ""} } {
+ source ./include.tcl
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ global passwd
+ global rand_init
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ berkdb srand $rand_init
+
+ #
+ # We want to run replication both normally and with crypto.
+ # So run it once and then run again with crypto.
+ #
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_repmethod $method \
+ 0 $i $i 0 1 stdout $largs"
+ }
+ }
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Use an array for number of clients because we really don't
+ # want to evenly-weight all numbers of clients. Favor smaller
+ # numbers but test more clients occasionally.
+ set drop_list { 0 0 0 0 0 1 1 5 5 10 20 }
+ set drop_len [expr [llength $drop_list] - 1]
+ set client_list { 1 1 2 1 1 1 2 2 3 1 }
+ set cl_len [expr [llength $client_list] - 1]
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ if { $numcl == 0 } {
+ set clindex [berkdb random_int 0 $cl_len]
+ set nclients [lindex $client_list $clindex]
+ } else {
+ set nclients $numcl
+ }
+ set drindex [berkdb random_int 0 $drop_len]
+ set droppct [lindex $drop_list $drindex]
+ set do_sec [berkdb random_int 0 1]
+ set do_oob [berkdb random_int 0 1]
+ set do_del [berkdb random_int 0 1]
+
+ if { $do_sec } {
+ set envargs "-encryptaes $passwd"
+ append largs " -encrypt "
+ } else {
+ set envargs ""
+ }
+ check_handles
+ #
+ # This will set up the master and client envs
+ # and will return us the args to pass to the
+ # test.
+ set largs [repl_envsetup $envargs $largs \
+ $i $nclients $droppct $do_oob]
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] \
+ disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ puts -nonewline "Repl: $name: dropping $droppct%, \
+ $nclients clients "
+ if { $do_del } {
+ puts -nonewline " with delete verification;"
+ } else {
+ puts -nonewline " no delete verification;"
+ }
+ if { $do_sec } {
+ puts -nonewline " with security;"
+ } else {
+ puts -nonewline " no security;"
+ }
+ if { $do_oob } {
+ puts -nonewline " with out-of-order msgs;"
+ } else {
+ puts -nonewline " no out-of-order msgs;"
+ }
+ puts ""
+
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ repl_envprocq $i $nclients $do_oob
+ repl_envver0 $i $method $nclients
+ if { $do_del } {
+ repl_verdel $i $method $nclients
+ }
+ repl_envclose $i $envargs
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_repmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+#
+# Run method tests, each in its own, new environment. (As opposed to
+# run_envmethod1 which runs all the tests in a single environment.)
+#
+proc run_envmethod { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout } { largs "" } } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+
+ set save_largs $largs
+ env_cleanup $testdir
+
+ if { $display == 1 } {
+ for { set i $start } { $i <= $stop } { incr i } {
+ puts $outfile "eval run_envmethod $method \
+ $i $i 0 1 stdout $largs"
+ }
+ }
+
+ if { $run == 1 } {
+ set is_envmethod 1
+ #
+ # Run both subdb and normal tests for as long as there are
+ # some of each type. Start with the subdbs:
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ # Subdb tests are done, now run through the regular tests:
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ check_handles
+ set env [eval {berkdb_env -create -txn \
+ -mode 0644 -home $testdir}]
+ error_check_good env_open \
+ [is_valid_env $env] TRUE
+ append largs " -env $env "
+
+ puts "[timestamp]"
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr \
+ "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ eval $name $method $parms($name) $largs
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ set largs $save_largs
+ error_check_good envclose [$env close] 0
+ error_check_good envremove [berkdb envremove \
+ -home $testdir] 0
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set is_envmethod 0
+ }
+}
+
+proc subdb { method {start 1} {stop 0} {display 0} {run 1} \
+ {outfile stdout} args} {
+ global num_test testdir
+ global parms
+
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts -nonewline $outfile "eval $name $method"
+ puts -nonewline $outfile " $parms($name) $args;"
+ puts $outfile "verify_dir $testdir \"\" 1"
+ }
+ if { $run } {
+ check_handles $outfile
+ eval $name $method $parms($name) $args
+ verify_dir $testdir "" 1
+ }
+ flush stdout
+ flush stderr
+ }
+}
+
+proc run_recd { method {start 1} {stop 0} {run 1} {display 0} args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global parms
+ global num_test
+ global log_log_record_types
+ source ./include.tcl
+
+ if { $stop == 0 } {
+ set stop $num_test(recd)
+ }
+ if { $run == 1 } {
+ puts "run_recd: $method $start $stop $args"
+ }
+
+ if {[catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "recd%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Recd%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $display } {
+ puts "eval $name $method $parms($name) $args"
+ }
+ if { $run } {
+ check_handles
+ puts "[timestamp]"
+ # By redirecting stdout to stdout, we make exec
+ # print output rather than simply returning it.
+ # By redirecting stderr to stdout too, we make
+ # sure everything winds up in the ALL.OUT file.
+ set ret [catch { exec $tclsh_path << \
+ "source $test_path/test.tcl; \
+ set log_log_record_types \
+ $log_log_record_types; eval $name \
+ $method $parms($name) $args" \
+ >&@ stdout
+ } res]
+
+ # Don't die if the test failed; we want
+ # to just proceed.
+ if { $ret != 0 } {
+ puts "FAIL:[timestamp] $res"
+ }
+
+ if { $__debug_print != 0 } {
+ puts ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ flush stdout
+ flush stderr
+ }
+ }
+ } res] != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_recd: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+}
+
+proc run_recds { {run 1} {display 0} args } {
+ global log_log_record_types
+
+ set log_log_record_types 1
+ logtrack_init
+ foreach method \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ check_handles
+ if { [catch {eval \
+ run_recd -$method 1 0 $run $display $args} ret ] != 0 } {
+ puts $ret
+ }
+ }
+ if { $run } {
+ logtrack_summary
+ }
+ set log_log_record_types 0
+}
+
+proc run_all { args } {
+ global num_test
+ source ./include.tcl
+
+ fileremove -f ALL.OUT
+
+ set exflgs [eval extractflags $args]
+ set flags [lindex $exflgs 1]
+ set display 1
+ set run 1
+ set am_only 0
+ set parallel 0
+ set nparalleltests 0
+ set rflags {--}
+ foreach f $flags {
+ switch $f {
+ m {
+ set am_only 1
+ }
+ n {
+ set display 1
+ set run 0
+ set rflags [linsert $rflags 0 "-n"]
+ }
+ }
+ }
+
+ set o [open ALL.OUT a]
+ if { $run == 1 } {
+ puts -nonewline "Test suite run started at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts [berkdb version -string]
+
+ puts -nonewline $o "Test suite run started at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ puts $o [berkdb version -string]
+ }
+ close $o
+ #
+ # First run standard tests. Send in a -A to let run_std know
+ # that it is part of the "run_all" run, so that it doesn't
+ # print out start/end times.
+ #
+ lappend args -A
+ eval {run_std} $args
+
+ set test_pagesizes [get_test_pagesizes]
+ set args [lindex $exflgs 0]
+ set save_args $args
+
+ foreach pgsz $test_pagesizes {
+ set args $save_args
+ append args " -pagesize $pgsz -chksum"
+ if { $am_only == 0 } {
+ # Run recovery tests.
+ #
+ # XXX These don't actually work at multiple pagesizes;
+ # disable them for now.
+ #
+ # XXX These too are broken into separate tclsh
+ # instantiations so we don't require so much
+ # memory, but I think it's cleaner
+ # and more useful to do it down inside proc r than here,
+ # since "r recd" gets done a lot and needs to work.
+ #
+ # XXX See comment in run_std for why this only directs
+ # stdout and not stderr. Don't worry--the right stuff
+ # happens.
+ #puts "Running recovery tests with pagesize $pgsz"
+ #if [catch {exec $tclsh_path \
+ # << "source $test_path/test.tcl; \
+ # r $rflags recd $args" \
+ # 2>@ stderr >> ALL.OUT } res] {
+ # set o [open ALL.OUT a]
+ # puts $o "FAIL: recd test:"
+ # puts $o $res
+ # close $o
+ #}
+ }
+
+ # Access method tests.
+ #
+ # XXX
+ # Broken up into separate tclsh instantiations so
+ # we don't require so much memory.
+ foreach i \
+ "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests with pagesize $pgsz"
+ for { set j 1 } { $j <= $num_test(test) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {run_method -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {run_method -$i $j $j \
+ $display $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: [format \
+ "test%03d" $j] $i"
+ close $o
+ }
+ }
+ }
+
+ #
+ # Run subdb tests with varying pagesizes too.
+ #
+ for { set j 1 } { $j <= $num_test(sdb) } {incr j} {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ eval {subdb -$i $j $j $display \
+ $run $o} $args
+ close $o
+ }
+ if { $run == 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ eval {subdb -$i $j $j $display \
+ $run stdout} $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: subdb -$i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ }
+ set args $save_args
+ #
+ # Run access method tests at default page size in one env.
+ #
+ foreach i "btree rbtree hash queue queueext recno frecno rrecno" {
+ puts "Running $i tests in a txn env"
+ for { set j 1 } { $j <= $num_test(test) } { incr j } {
+ if { $run == 0 } {
+ set o [open ALL.OUT a]
+ run_envmethod -$i $j $j $display \
+ $run $o $args
+ close $o
+ }
+ if { $run } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ run_envmethod -$i $j $j \
+ $display $run stdout $args" \
+ >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o \
+ "FAIL: run_envmethod $i $j $j"
+ close $o
+ }
+ }
+ }
+ }
+ #
+ # Run tests using proc r. The replication tests have been
+ # moved from run_std to run_all.
+ #
+ set test_list {
+ {"replication" "rep"}
+ {"security" "sec"}
+ }
+ #
+ # If configured for RPC, then run rpc tests too.
+ #
+ if { [file exists ./berkeley_db_svc] ||
+ [file exists ./berkeley_db_cxxsvc] ||
+ [file exists ./berkeley_db_javasvc] } {
+ append test_list {{"RPC" "rpc"}}
+ }
+
+ foreach pair $test_list {
+ set msg [lindex $pair 0]
+ set cmd [lindex $pair 1]
+ puts "Running $msg tests"
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl; \
+ r $rflags $cmd $args" >>& ALL.OUT } res] {
+ set o [open ALL.OUT a]
+ puts $o "FAIL: $cmd test"
+ close $o
+ }
+ }
+
+ # If not actually running, no need to check for failure.
+ if { $run == 0 } {
+ return
+ }
+
+ set failed 0
+ set o [open ALL.OUT r]
+ while { [gets $o line] >= 0 } {
+ if { [regexp {^FAIL} $line] != 0 } {
+ set failed 1
+ }
+ }
+ close $o
+ set o [open ALL.OUT a]
+ if { $failed == 0 } {
+ puts "Regression Tests Succeeded"
+ puts $o "Regression Tests Succeeded"
+ } else {
+ puts "Regression Tests Failed; see ALL.OUT for log"
+ puts $o "Regression Tests Failed"
+ }
+
+ puts -nonewline "Test suite run completed at: "
+ puts [clock format [clock seconds] -format "%H:%M %D"]
+ puts -nonewline $o "Test suite run completed at: "
+ puts $o [clock format [clock seconds] -format "%H:%M %D"]
+ close $o
+}
+
+#
+# Run method tests in one environment. (As opposed to run_envmethod
+# which runs each test in its own, new environment.)
+#
+proc run_envmethod1 { method {start 1} {stop 0} {display 0} {run 1} \
+ { outfile stdout } args } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+ global is_envmethod
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set stopsdb $num_test(sdb)
+ if { $stop == 0 } {
+ set stop $num_test(test)
+ } else {
+ if { $stopsdb > $stop } {
+ set stopsdb $stop
+ }
+ }
+ if { $run == 1 } {
+ puts "run_envmethod1: $method $start $stop $args"
+ }
+
+ set is_envmethod 1
+ if { $run == 1 } {
+ check_handles
+ env_cleanup $testdir
+ error_check_good envremove [berkdb envremove -home $testdir] 0
+ set env [eval {berkdb_env -create -cachesize {0 10000000 0}} \
+ {-mode 0644 -home $testdir}]
+ error_check_good env_open [is_valid_env $env] TRUE
+ append largs " -env $env "
+ }
+
+ if { $display } {
+ # The envmethod1 tests can't be split up, since they share
+ # an env.
+ puts $outfile "eval run_envmethod1 $method $args"
+ }
+
+ set stat [catch {
+ for { set i $start } { $i <= $stopsdb } {incr i} {
+ set name [format "subdb%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Subdb%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ set stat [catch {
+ for { set i $start } { $i <= $stop } {incr i} {
+ set name [format "test%03d" $i]
+ if { [info exists parms($name)] != 1 } {
+ puts stderr "[format Test%03d $i] disabled in\
+ testparams.tcl; skipping."
+ continue
+ }
+ if { $run } {
+ puts $outfile "[timestamp]"
+ eval $name $method $parms($name) $largs
+ if { $__debug_print != 0 } {
+ puts $outfile ""
+ }
+ if { $__debug_on != 0 } {
+ debug $__debug_test
+ }
+ }
+ flush stdout
+ flush stderr
+ }
+ } res]
+ if { $stat != 0} {
+ global errorInfo;
+
+ set fnl [string first "\n" $errorInfo]
+ set theError [string range $errorInfo 0 [expr $fnl - 1]]
+ if {[string first FAIL $errorInfo] == -1} {
+ error "FAIL:[timestamp]\
+ run_envmethod1: $method $i: $theError"
+ } else {
+ error $theError;
+ }
+ }
+ if { $run == 1 } {
+ error_check_good envclose [$env close] 0
+ check_handles $outfile
+ }
+ set is_envmethod 0
+
+}
+
+# We want to test all of 512b, 8Kb, and 64Kb pages, but chances are one
+# of these is the default pagesize. We don't want to run all the AM tests
+# twice, so figure out what the default page size is, then return the
+# other two.
+proc get_test_pagesizes { } {
+ # Create an in-memory database.
+ set db [berkdb_open -create -btree]
+ error_check_good gtp_create [is_valid_db $db] TRUE
+ set statret [$db stat]
+ set pgsz 0
+ foreach pair $statret {
+ set fld [lindex $pair 0]
+ if { [string compare $fld {Page size}] == 0 } {
+ set pgsz [lindex $pair 1]
+ }
+ }
+
+ error_check_good gtp_close [$db close] 0
+
+ error_check_bad gtp_pgsz $pgsz 0
+ switch $pgsz {
+ 512 { return {8192 32768} }
+ 8192 { return {512 32768} }
+ 32768 { return {512 8192} }
+ default { return {512 8192 32768} }
+ }
+ error_check_good NOTREACHED 0 1
+}
diff --git a/storage/bdb/test/test001.tcl b/storage/bdb/test/test001.tcl
new file mode 100644
index 00000000000..f0b562bbf24
--- /dev/null
+++ b/storage/bdb/test/test001.tcl
@@ -0,0 +1,247 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test001.tcl,v 11.28 2002/08/08 15:38:11 bostic Exp $
+#
+# TEST test001
+# TEST Small keys/data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test001 { method {nentries 10000} {start 0} {tnum "01"} {noclean 0} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ # If we are not using an external env, then test setting
+ # the database cache size and using multiple caches.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ append args " -cachesize {0 1048576 3} "
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries equal key/data pairs"
+ if { $start != 0 } {
+ # Sadly enough, we are using start in two different ways.
+ # In test090, it is used to test really big records numbers
+ # in queue. In replication, it is used to be able to run
+ # different iterations of this test using different key/data
+ # pairs. We try to hide all that magic here.
+ puts "\tStarting at $start"
+
+ if { $tnum != 90 } {
+ set did [open $dict]
+ for { set nlines 0 } { [gets $did str] != -1 } \
+ { incr nlines} {
+ }
+ close $did
+ if { $start + $nentries > $nlines } {
+ set start [expr $nlines - $nentries]
+ }
+ }
+ }
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ if { $noclean == 0 } {
+ cleanup $testdir $env
+ }
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test001_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test001.check
+ }
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ if { $start != 0 && $tnum != 90 } {
+ # Skip over "start" entries
+ for { set count 0 } { $count < $start } { incr count } {
+ gets $did str
+ }
+ set count 0
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1 + $start]
+ if { 0xffffffff > 0 && $key > 0xffffffff } {
+ set key [expr $key - 0x100000000]
+ }
+ if { $key == 0 || $key - 0xffffffff == 1 } {
+ incr key
+ incr count
+ }
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ set str [reverse $str]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ if { $count % 50 == 0 } {
+ error_check_good txn_checkpoint($count) \
+ [$env txn_checkpoint] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for success
+ set ret [$db get -get_both $key [pad_data $method $str]]
+ error_check_good \
+ getboth $ret [list [list $key [pad_data $method $str]]]
+
+ # Test DB_GET_BOTH for failure
+ set ret [$db get -get_both $key [pad_data $method BAD$str]]
+ error_check_good getbothBAD [llength $ret] 0
+
+ incr count
+ }
+ close $did
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ dump_file $db $txn $t1 $checkfunc
+ #
+ # dump_file should just have been "get" calls, so
+ # aborting a get should really be a no-op. Abort
+ # just for the fun of it.
+ if { $txnenv == 1 } {
+ error_check_good txn [$t abort] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ # If this is test 90, we're checking wrap and we really
+ # only added nentries number of items starting at start.
+ # However, if this isn't 90, then we started at start and
+ # added an addition nentries number of items.
+ if { $tnum == 90 } {
+ for {set i 1} {$i <= $nentries} {incr i} {
+ set j [expr $i + $start]
+ if { 0xffffffff > 0 && $j > 0xffffffff } {
+ set j [expr $j - 0x100000000]
+ }
+ if { $j == 0 } {
+ incr i
+ incr j
+ }
+ puts $oid $j
+ }
+ } else {
+ for { set i 1 } { $i <= $nentries + $start } {incr i} {
+ puts $oid $i
+ }
+ }
+ close $oid
+ } else {
+ set q q
+ # We assume that when this is used with start != 0, the
+ # test database accumulates data
+ filehead [expr $nentries + $start] $dict $t2
+ }
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test001; keys and data are identical
+proc test001.check { key data } {
+ error_check_good "key/data mismatch" $data [reverse $key]
+}
+
+proc test001_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test002.tcl b/storage/bdb/test/test002.tcl
new file mode 100644
index 00000000000..bc28994d6a7
--- /dev/null
+++ b/storage/bdb/test/test002.tcl
@@ -0,0 +1,161 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test002.tcl,v 11.19 2002/05/22 15:42:43 sue Exp $
+#
+# TEST test002
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original. Close file, reopen, do retrieve and re-verify.
+
+proc test002 { method {nentries 10000} args } {
+ global datastr
+ global pad_datastr
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test002.db
+ set env NULL
+ } else {
+ set testfile test002.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ # Create the database and open the dictionary
+ puts "Test002: $method ($args) $nentries key <fixed data> pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ set pad_datastr [pad_data $method $datastr]
+ puts "\tTest002.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+
+ error_check_good get $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest002.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test002.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest002.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest002.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test002.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test002:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test002; data should be fixed are identical
+proc test002.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
diff --git a/storage/bdb/test/test003.tcl b/storage/bdb/test/test003.tcl
new file mode 100644
index 00000000000..c7bfe6c15ad
--- /dev/null
+++ b/storage/bdb/test/test003.tcl
@@ -0,0 +1,210 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test003.tcl,v 11.25 2002/05/22 18:32:18 sue Exp $
+#
+# TEST test003
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Dump file
+# TEST Close, reopen
+# TEST Dump file
+# TEST
+# TEST Take the source files and dbtest executable and enter their names
+# TEST as the key with their contents as data. After all are entered,
+# TEST retrieve all; compare output to original. Close file, reopen, do
+# TEST retrieve and re-verify.
+proc test003 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if {[is_fixed_length $method] == 1} {
+ puts "Test003 skipping for method $method"
+ return
+ }
+ puts "Test003: $method ($args) filename=key filecontents=data pairs"
+
+ # Create the database and open the dictionary
+ set limit 0
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test003.db
+ set env NULL
+ } else {
+ set testfile test003.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set limit 100
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set pflags ""
+ set gflags ""
+ set txn ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test003_recno.check
+ append gflags "-recno"
+ } else {
+ set checkfunc test003.check
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+ if { $limit } {
+ if { [llength $file_list] > $limit } {
+ set file_list [lrange $file_list 1 $limit]
+ }
+ }
+ set len [llength $file_list]
+ puts "\tTest003.a: put/get loop $len entries"
+ set count 0
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set names([expr $count + 1]) $f
+ } else {
+ set key $f
+ }
+
+ # Should really catch errors
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid [pad_data $method $data]
+ }
+ close $fid
+
+ error_check_good \
+ Test003:diff($f,$t4) [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest003.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the entries in the
+ # current directory
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ if { [string compare [file type $f] "file"] != 0 } {
+ continue
+ }
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest003.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest003.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_bin_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 1 } {
+ filesort $t1 $t3 -n
+ }
+
+ error_check_good \
+ Test003:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test003; key should be file name; data should be contents
+proc test003.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test003:datamismatch($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
+proc test003_recno.check { binfile tmpfile } {
+ global names
+ source ./include.tcl
+
+ set fname $names($binfile)
+ error_check_good key"$binfile"_exists [info exists names($binfile)] 1
+ error_check_good Test003:datamismatch($fname,$tmpfile) \
+ [filecmp $fname $tmpfile] 0
+}
diff --git a/storage/bdb/test/test004.tcl b/storage/bdb/test/test004.tcl
new file mode 100644
index 00000000000..7bea6f88eca
--- /dev/null
+++ b/storage/bdb/test/test004.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test004.tcl,v 11.21 2002/05/22 18:32:35 sue Exp $
+#
+# TEST test004
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database.
+# TEST Read through the database sequentially using cursors and
+# TEST delete each element.
+proc test004 { method {nentries 10000} {reopen 4} {build_only 0} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set tnum test00$reopen
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "$tnum:\
+ $method ($args) $nentries delete small key; medium data pairs"
+ if {$reopen == 5} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ set kvals ""
+ puts "\tTest00$reopen.a: put/get loop"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ lappend kvals $str
+ } else {
+ set key $str
+ }
+
+ set datastr [ make_data_str $str ]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tnum:put" $ret \
+ [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+ if { $build_only == 1 } {
+ return $db
+ }
+ if { $reopen == 5 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+ puts "\tTest00$reopen.b: get/delete loop"
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set outf [open $t1 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set c [eval {$db cursor} $txn]
+
+ set count 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ if { [is_record_based $method] == 1 } {
+ set datastr \
+ [make_data_str [lindex $kvals [expr $k - 1]]]
+ } else {
+ set datastr [make_data_str $k]
+ }
+ error_check_good $tnum:$k $d2 [pad_data $method $datastr]
+ puts $outf $k
+ $c del
+ if { [is_record_based $method] == 1 && \
+ $do_renumber == 1 } {
+ set kvals [lreplace $kvals 0 0]
+ }
+ incr count
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ error_check_good test00$reopen:keys_deleted $count $nentries
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test005.tcl b/storage/bdb/test/test005.tcl
new file mode 100644
index 00000000000..f3e37f2149d
--- /dev/null
+++ b/storage/bdb/test/test005.tcl
@@ -0,0 +1,19 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test005.tcl,v 11.7 2002/01/11 15:53:40 bostic Exp $
+#
+# TEST test005
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Sequential (cursor) get/delete
+# TEST
+# TEST Check that cursor operations work. Create a database; close
+# TEST it and reopen it. Then read through the database sequentially
+# TEST using cursors and delete each element.
+proc test005 { method {nentries 10000} args } {
+ eval {test004 $method $nentries 5 0} $args
+}
diff --git a/storage/bdb/test/test006.tcl b/storage/bdb/test/test006.tcl
new file mode 100644
index 00000000000..fbaebfe8ac8
--- /dev/null
+++ b/storage/bdb/test/test006.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test006.tcl,v 11.19 2002/05/22 15:42:44 sue Exp $
+#
+# TEST test006
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Keyed delete and verify
+# TEST
+# TEST Keyed delete test.
+# TEST Create database.
+# TEST Go through database, deleting all entries by key.
+proc test006 { method {nentries 10000} {reopen 0} {tnum 6} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { $tnum < 10 } {
+ set tname Test00$tnum
+ set dbname test00$tnum
+ } else {
+ set tname Test0$tnum
+ set dbname test0$tnum
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$dbname.db
+ set env NULL
+ } else {
+ set testfile $dbname.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "$tname: $method ($args) "
+ puts -nonewline "$nentries equal small key; medium data pairs"
+ if {$reopen == 1} {
+ puts " (with close)"
+ } else {
+ puts ""
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1 ]
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: put $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+ incr count
+ }
+ close $did
+
+ if { $reopen == 1 } {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original, then delete it.
+ set count 0
+ set did [open $dict]
+ set key 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { $do_renumber == 1 } {
+ set key 1
+ } elseif { [is_record_based $method] == 1 } {
+ incr key
+ } else {
+ set key $str
+ }
+
+ set datastr [make_data_str $str]
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good "$tname: get $datastr got $ret" \
+ $ret [list [list $key [pad_data $method $datastr]]]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del:$key $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test007.tcl b/storage/bdb/test/test007.tcl
new file mode 100644
index 00000000000..1e99d107a2d
--- /dev/null
+++ b/storage/bdb/test/test007.tcl
@@ -0,0 +1,19 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test007.tcl,v 11.8 2002/01/11 15:53:40 bostic Exp $
+#
+# TEST test007
+# TEST Small keys/medium data
+# TEST Put/get per key
+# TEST Close, reopen
+# TEST Keyed delete
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
+proc test007 { method {nentries 10000} {tnum 7} args} {
+ eval {test006 $method $nentries 1 $tnum} $args
+}
diff --git a/storage/bdb/test/test008.tcl b/storage/bdb/test/test008.tcl
new file mode 100644
index 00000000000..0af97a40110
--- /dev/null
+++ b/storage/bdb/test/test008.tcl
@@ -0,0 +1,200 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test008.tcl,v 11.23 2002/05/22 15:42:45 sue Exp $
+#
+# TEST test008
+# TEST Small keys/large data
+# TEST Put/get per key
+# TEST Loop through keys by steps (which change)
+# TEST ... delete each key at step
+# TEST ... add each key back
+# TEST ... change step
+# TEST Confirm that overflow pages are getting reused
+# TEST
+# TEST Take the source files and dbtest executable and enter their names as
+# TEST the key with their contents as data. After all are entered, begin
+# TEST looping through the entries; deleting some pairs and then readding them.
+proc test008 { method {reopen 8} {debug 0} args} {
+ source ./include.tcl
+
+ set tnum test00$reopen
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test00$reopen skipping for method $method"
+ return
+ }
+
+ puts -nonewline "$tnum: $method filename=key filecontents=data pairs"
+ if {$reopen == 9} {
+ puts "(with close)"
+ } else {
+ puts ""
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/$tnum.db
+ set env NULL
+ } else {
+ set testfile $tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644} \
+ $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ set count 0
+ puts "\tTest00$reopen.a: Initial put/get loop"
+ foreach f $file_list {
+ set names($count) $f
+ set key $f
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $f
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ get_file $db $txn $gflags $f $t4
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good Test00$reopen:diff($f,$t4) \
+ [filecmp $f $t4] 0
+
+ incr count
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now we will get step through keys again (by increments) and
+ # delete all the entries, then re-insert them.
+
+ puts "\tTest00$reopen.b: Delete re-add loop"
+ foreach i "1 2 4 8 16" {
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db del} $txn {$names($ndx)}]
+ error_check_good db_del:$names($ndx) $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ for {set ndx 0} {$ndx < $count} { incr ndx $i} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file $db $txn $pflags $names($ndx)
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ if {$reopen == 9} {
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ # Now, reopen the file and make sure the key/data pairs look right.
+ puts "\tTest00$reopen.c: Dump contents forward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file $db $txn $t1 test008.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest00$reopen.d: Dump contents backward"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_bin_file_direction $db $txn $t1 test008.check "-last" "-prev"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ filesort $t1 $t3
+
+ error_check_good Test00$reopen:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+ error_check_good close:$db [$db close] 0
+}
+
+proc test008.check { binfile tmpfile } {
+ global tnum
+ source ./include.tcl
+
+ error_check_good diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/storage/bdb/test/test009.tcl b/storage/bdb/test/test009.tcl
new file mode 100644
index 00000000000..7ef46d8c818
--- /dev/null
+++ b/storage/bdb/test/test009.tcl
@@ -0,0 +1,18 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test009.tcl,v 11.8 2002/05/22 15:42:45 sue Exp $
+#
+# TEST test009
+# TEST Small keys/large data
+# TEST Same as test008; close and reopen database
+# TEST
+# TEST Check that we reuse overflow pages. Create database with lots of
+# TEST big key/data pairs. Go through and delete and add keys back
+# TEST randomly. Then close the DB and make sure that we have everything
+# TEST we think we should.
+proc test009 { method args} {
+ eval {test008 $method 9 0} $args
+}
diff --git a/storage/bdb/test/test010.tcl b/storage/bdb/test/test010.tcl
new file mode 100644
index 00000000000..0b5f5531795
--- /dev/null
+++ b/storage/bdb/test/test010.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test010.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
+#
+# TEST test010
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test010 { method {nentries 10000} {ndups 5} {tnum 10} args } {
+ source ./include.tcl
+
+ set omethod $method
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ small $ndups dup key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.a: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test011.tcl b/storage/bdb/test/test011.tcl
new file mode 100644
index 00000000000..63e2203efe4
--- /dev/null
+++ b/storage/bdb/test/test011.tcl
@@ -0,0 +1,470 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test011.tcl,v 11.27 2002/06/11 14:09:56 sue Exp $
+#
+# TEST test011
+# TEST Duplicate test
+# TEST Small key/data pairs.
+# TEST Test DB_KEYFIRST, DB_KEYLAST, DB_BEFORE and DB_AFTER.
+# TEST To test off-page duplicates, run with small pagesize.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; add duplicate records for each.
+# TEST Then do some key_first/key_last add_before, add_after operations.
+# TEST This does not work for recno
+# TEST
+# TEST To test if dups work when they fall off the main page, run this with
+# TEST a very tiny page size.
+proc test011 { method {nentries 10000} {ndups 5} {tnum 11} args } {
+ global dlist
+ global rand_init
+ source ./include.tcl
+
+ set dlist ""
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ test011_recno $method $nentries $tnum $args
+ return
+ }
+ if {$ndups < 5} {
+ set ndups 5
+ }
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts -nonewline "Test0$tnum: $method $nentries small $ndups dup "
+ puts "key/data pairs, cursor ops"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644} [concat $args "-dup"] {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add dups with values 1, 3, ... $ndups. Then we'll add
+ # 0 and $ndups+1 using keyfirst/keylast. We'll add 2 and 4 using
+ # add before and add after.
+ puts "\tTest0$tnum.a: put and get duplicate keys."
+ set i ""
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ lappend dlist $i
+ }
+ set maxodd $i
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i 2 } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$str $datastr}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str ]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ if {[llength $ret] == 0} {
+ break
+ }
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+
+ error_check_good Test0$tnum:put $d $str
+ set id [ id_of $datastr ]
+ error_check_good Test0$tnum:dup# $id $x
+ incr x 2
+ }
+ error_check_good Test0$tnum:numdups $x $maxodd
+ error_check_good curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: \
+ traverse entire file checking duplicates before close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: \
+ traverse entire file checking duplicates after close."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now compare the keys to see if they match the dictionary entries
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.d: Testing key_first functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keyfirst" 0 0
+ set dlist [linsert $dlist 0 0]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.e: Testing key_last functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-keylast" [expr $maxodd - 1] 0
+ lappend dlist [expr $maxodd - 1]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.f: Testing add_before functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-before" 2 3
+ set dlist [linsert $dlist 2 2]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest0$tnum.g: Testing add_after functionality"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ add_dup $db $txn $nentries "-after" 4 4
+ set dlist [linsert $dlist 4 4]
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good db_close [$db close] 0
+}
+
+proc add_dup {db txn nentries flag dataval iter} {
+ source ./include.tcl
+
+ set dbc [eval {$db cursor} $txn]
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set datastr $dataval:$str
+ set ret [$dbc get "-set" $str]
+ error_check_bad "cget(SET)" [is_substr $ret Error] 1
+ for { set i 1 } { $i < $iter } { incr i } {
+ set ret [$dbc get "-next"]
+ error_check_bad "cget(NEXT)" [is_substr $ret Error] 1
+ }
+
+ if { [string compare $flag "-before"] == 0 ||
+ [string compare $flag "-after"] == 0 } {
+ set ret [$dbc put $flag $datastr]
+ } else {
+ set ret [$dbc put $flag $str $datastr]
+ }
+ error_check_good "$dbc put $flag" $ret 0
+ incr count
+ }
+ close $did
+ $dbc close
+}
+
+proc test011_recno { method {nentries 10000} {tnum 11} largs } {
+ global dlist
+ source ./include.tcl
+
+ set largs [convert_args $method $largs]
+ set omethod [convert_method $method]
+ set renum [is_rrecno $method]
+
+ puts "Test0$tnum: \
+ $method ($largs) $nentries test cursor insert functionality"
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $largs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $largs $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append largs " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ if {$renum == 1} {
+ append largs " -renumber"
+ }
+ set db [eval {berkdb_open \
+ -create -mode 0644} $largs {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # The basic structure of the test is that we pick a random key
+ # in the database and then add items before, after, ?? it. The
+ # trickiness is that with RECNO, these are not duplicates, they
+ # are creating new keys. Therefore, every time we do this, the
+ # keys assigned to other values change. For this reason, we'll
+ # keep the database in tcl as a list and insert properly into
+ # it to verify that the right thing is happening. If we do not
+ # have renumber set, then the BEFORE and AFTER calls should fail.
+
+ # Seed the database with an initial record
+ gets $did str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 [chop_data $method $str]}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good put $ret 0
+ set count 1
+
+ set dlist "NULL $str"
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ puts "\tTest0$tnum.a: put and get entries"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Pick a random key
+ set key [berkdb random_int 1 $count]
+ set ret [$dbc get -set $key]
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:SET:key $k $key
+ error_check_good \
+ cget:SET $d [pad_data $method [lindex $dlist $key]]
+
+ # Current
+ set ret [$dbc put -current [chop_data $method $str]]
+ error_check_good cput:$key $ret 0
+ set dlist [lreplace $dlist $key $key [pad_data $method $str]]
+
+ # Before
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+
+ if { $renum == 1 } {
+ set ret [$dbc put \
+ -before [chop_data $method $str]]
+ error_check_good cput:$key:BEFORE $ret $key
+ set dlist [linsert $dlist $key $str]
+ incr count
+
+ # After
+ if { [gets $did str] == -1 } {
+ continue;
+ }
+ set ret [$dbc put \
+ -after [chop_data $method $str]]
+ error_check_good cput:$key:AFTER $ret [expr $key + 1]
+ set dlist [linsert $dlist [expr $key + 1] $str]
+ incr count
+ }
+
+ # Now verify that the keys are in the right place
+ set i 0
+ for {set ret [$dbc get "-set" $key]} \
+ {[string length $ret] != 0 && $i < 3} \
+ {set ret [$dbc get "-next"] } {
+ set check_key [expr $key + $i]
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good cget:$key:loop $k $check_key
+
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good cget:data $d \
+ [pad_data $method [lindex $dlist $check_key]]
+ incr i
+ }
+ }
+ close $did
+ error_check_good cclose [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create check key file.
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $count} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test011_check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-first" "-next"
+ error_check_good Test0$tnum:diff($t2,$t1) \
+ [filecmp $t2 $t1] 0
+
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test011_check \
+ dump_file_direction "-last" "-prev"
+
+ filesort $t1 $t3 -n
+ error_check_good Test0$tnum:diff($t2,$t3) \
+ [filecmp $t2 $t3] 0
+}
+
+proc test011_check { key data } {
+ global dlist
+
+ error_check_good "get key $key" $data [lindex $dlist $key]
+}
diff --git a/storage/bdb/test/test012.tcl b/storage/bdb/test/test012.tcl
new file mode 100644
index 00000000000..e7237d27267
--- /dev/null
+++ b/storage/bdb/test/test012.tcl
@@ -0,0 +1,139 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test012.tcl,v 11.20 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test012
+# TEST Large keys/small data
+# TEST Same as test003 except use big keys (source files and
+# TEST executables) and small data (the file/executable names).
+# TEST
+# TEST Take the source files and dbtest executable and enter their contents
+# TEST as the key with their names as data. After all are entered, retrieve
+# TEST all; compare output to original. Close file, reopen, do retrieve and
+# TEST re-verify.
+proc test012 { method args} {
+ global names
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ puts "Test012 skipping for method $method"
+ return
+ }
+
+ puts "Test012: $method ($args) filename=data filecontents=key pairs"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test012.db
+ set env NULL
+ } else {
+ set testfile test012.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list]
+
+ puts "\tTest012.a: put/get loop"
+ set count 0
+ foreach f $file_list {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ put_file_as_key $db $txn $pflags $f
+
+ set kd [get_file_as_key $db $txn $gflags $f]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest012.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_binkey_file $db $txn $t1 test012.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the data to see if they match the .o and dbtest files
+ set oid [open $t2.tmp w]
+ foreach f $file_list {
+ puts $oid $f
+ }
+ close $oid
+ filesort $t2.tmp $t2
+ fileremove $t2.tmp
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest012.c: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test012.check \
+ dump_binkey_file_direction "-first" "-next"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest012.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test012.check\
+ dump_binkey_file_direction "-last" "-prev"
+
+ filesort $t1 $t3
+
+ error_check_good Test012:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test012; key should be file name; data should be contents
+proc test012.check { binfile tmpfile } {
+ source ./include.tcl
+
+ error_check_good Test012:diff($binfile,$tmpfile) \
+ [filecmp $binfile $tmpfile] 0
+}
diff --git a/storage/bdb/test/test013.tcl b/storage/bdb/test/test013.tcl
new file mode 100644
index 00000000000..96d7757b0d8
--- /dev/null
+++ b/storage/bdb/test/test013.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test013.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test013
+# TEST Partial put test
+# TEST Overwrite entire records using partial puts.
+# TEST Make surethat NOOVERWRITE flag works.
+# TEST
+# TEST 1. Insert 10000 keys and retrieve them (equal key/data pairs).
+# TEST 2. Attempt to overwrite keys with NO_OVERWRITE set (expect error).
+# TEST 3. Actually overwrite each one with its datum reversed.
+# TEST
+# TEST No partial testing here.
+proc test013 { method {nentries 10000} args } {
+ global errorCode
+ global errorInfo
+ global fixed_pad
+ global fixed_len
+
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test013.db
+ set env NULL
+ } else {
+ set testfile test013.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test013: $method ($args) $nentries equal key/data pairs, put test"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test013_recno.check
+ append gflags " -recno"
+ global kvals
+ } else {
+ set checkfunc test013.check
+ }
+ puts "\tTest013.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will try to overwrite each datum, but set the
+ # NOOVERWRITE flag.
+ puts "\tTest013.b: overwrite values with NOOVERWRITE flag."
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags \
+ {-nooverwrite $key [chop_data $method $str]}]
+ error_check_good put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Value should be unchanged.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will replace each item with its datum capitalized.
+ puts "\tTest013.c: overwrite values with capitalized datum"
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set rstr [string toupper $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $rstr]}]
+ error_check_good put $r 0
+
+ # Value should be changed.
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $rstr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest013.d: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {incr i} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest013.e: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-first" "-next"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest013.f: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction "-last" "-prev"
+
+ if { [is_record_based $method] == 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test013:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test013; keys and data are identical
+proc test013.check { key data } {
+ error_check_good \
+ "key/data mismatch for $key" $data [string toupper $key]
+}
+
+proc test013_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good \
+ "data mismatch for $key" $data [string toupper $kvals($key)]
+}
diff --git a/storage/bdb/test/test014.tcl b/storage/bdb/test/test014.tcl
new file mode 100644
index 00000000000..00d69d3352e
--- /dev/null
+++ b/storage/bdb/test/test014.tcl
@@ -0,0 +1,253 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test014.tcl,v 11.24 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test014
+# TEST Exercise partial puts on short data
+# TEST Run 5 combinations of numbers of characters to replace,
+# TEST and number of times to increase the size by.
+# TEST
+# TEST Partial put test, small data, replacing with same size. The data set
+# TEST consists of the first nentries of the dictionary. We will insert them
+# TEST (and retrieve them) as we do in test 1 (equal key/data pairs). Then
+# TEST we'll try to perform partial puts of some characters at the beginning,
+# TEST some at the end, and some at the middle.
+proc test014 { method {nentries 10000} args } {
+ set fixed 0
+ set args [convert_args $method $args]
+
+ if { [is_fixed_length $method] == 1 } {
+ set fixed 1
+ }
+
+ puts "Test014: $method ($args) $nentries equal key/data pairs, put test"
+
+ # flagp indicates whether this is a postpend or a
+ # normal partial put
+ set flagp 0
+
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 2 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ eval {test014_body $method $flagp 2 16 $nentries} $args
+ if { $fixed == 0 } {
+ eval {test014_body $method $flagp 0 1 $nentries} $args
+ eval {test014_body $method $flagp 0 4 $nentries} $args
+ eval {test014_body $method $flagp 0 128 $nentries} $args
+
+ # POST-PENDS :
+ # partial put data after the end of the existent record
+ # chars: number of empty spaces that will be padded with null
+ # increase: is the length of the str to be appended (after pad)
+ #
+ set flagp 1
+ eval {test014_body $method $flagp 1 1 $nentries} $args
+ eval {test014_body $method $flagp 4 1 $nentries} $args
+ eval {test014_body $method $flagp 128 1 $nentries} $args
+ eval {test014_body $method $flagp 1 4 $nentries} $args
+ eval {test014_body $method $flagp 1 128 $nentries} $args
+ }
+ puts "Test014 complete."
+}
+
+proc test014_body { method flagp chars increase {nentries 10000} args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 && $chars != $increase } {
+ puts "Test014: $method: skipping replace\
+ $chars chars with string $increase times larger."
+ return
+ }
+
+ if { $flagp == 1} {
+ puts "Test014: Postpending string of len $increase with \
+ gap $chars."
+ } else {
+ puts "Test014: Replace $chars chars with string \
+ $increase times larger"
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test014.db
+ set env NULL
+ } else {
+ set testfile test014.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set gflags ""
+ set pflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest014.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $flagp == 1 } {
+ # this is for postpend only
+ global dvals
+
+ # initial put
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $str}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbput $ret 0
+
+ set offset [string length $str]
+
+ # increase is the actual number of new bytes
+ # to be postpended (besides the null padding)
+ set data [repeat "P" $increase]
+
+ # chars is the amount of padding in between
+ # the old data and the new
+ set len [expr $offset + $chars + $increase]
+ set dvals($key) [binary format \
+ a[set offset]x[set chars]a[set increase] \
+ $str $data]
+ set offset [expr $offset + $chars]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put -partial [list $offset 0]} \
+ $txn {$key $data}]
+ error_check_good dbput:post $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ } else {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ partial_put $method $db $txn \
+ $gflags $key $str $chars $increase
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest014.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test014.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ puts "\tTest014.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env \
+ $t1 test014.check dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest014.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 \
+ test014.check dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good \
+ Test014:diff($t3,$t2) [filecmp $t3 $t2] 0
+}
+
+# Check function for test014; keys and data are identical
+proc test014.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/storage/bdb/test/test015.tcl b/storage/bdb/test/test015.tcl
new file mode 100644
index 00000000000..f129605a405
--- /dev/null
+++ b/storage/bdb/test/test015.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test015.tcl,v 11.27 2002/05/31 16:57:25 sue Exp $
+#
+# TEST test015
+# TEST Partial put test
+# TEST Partial put test where the key does not initially exist.
+proc test015 { method {nentries 7500} { start 0 } args } {
+ global fixed_len testdir
+
+ set low_range 50
+ set mid_range 100
+ set high_range 1000
+
+ if { [is_fixed_length $method] } {
+ set low_range [expr $fixed_len/2 - 2]
+ set mid_range [expr $fixed_len/2]
+ set high_range $fixed_len
+ }
+
+ set t_table {
+ { 1 { 1 1 1 } }
+ { 2 { 1 1 5 } }
+ { 3 { 1 1 $low_range } }
+ { 4 { 1 $mid_range 1 } }
+ { 5 { $mid_range $high_range 5 } }
+ { 6 { 1 $mid_range $low_range } }
+ }
+
+ puts "Test015: \
+ $method ($args) $nentries equal key/data pairs, partial put test"
+ test015_init
+ if { $start == 0 } {
+ set start { 1 2 3 4 5 6 }
+ }
+ foreach entry $t_table {
+ set this [lindex $entry 0]
+ if { [lsearch $start $this] == -1 } {
+ continue
+ }
+ puts -nonewline "$this: "
+ eval [concat test015_body $method [lindex $entry 1] \
+ $nentries $args]
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+puts "Verifying testdir $testdir"
+
+ error_check_good verify [verify_dir $testdir "\tTest015.e: "] 0
+ }
+}
+
+proc test015_init { } {
+ global rand_init
+
+ berkdb srand $rand_init
+}
+
+proc test015_body { method off_low off_hi rcount {nentries 10000} args } {
+ global dvals
+ global fixed_len
+ global testdir
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set checkfunc test015.check
+
+ if { [is_fixed_length $method] && \
+ [string compare $omethod "-recno"] == 0} {
+ # is fixed recno method
+ set checkfunc test015.check
+ }
+
+ puts "Put $rcount strings random offsets between $off_low and $off_hi"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test015.db
+ set env NULL
+ } else {
+ set testfile test015.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries > 5000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set retdir $testdir
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ puts "\tTest015.a: put/get loop for $nentries entries"
+
+ # Here is the loop where we put and get each key/data pair
+ # Each put is a partial put of a record that does not exist.
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ if { [string length $str] > $fixed_len } {
+ continue
+ }
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ if { 0 } {
+ set data [replicate $str $rcount]
+ set off [ berkdb random_int $off_low $off_hi ]
+ set offn [expr $off + 1]
+ if { [is_fixed_length $method] && \
+ [expr [string length $data] + $off] >= $fixed_len} {
+ set data [string range $data 0 [expr $fixed_len-$offn]]
+ }
+ set dvals($key) [partial_shift $data $off right]
+ } else {
+ set data [chop_data $method [replicate $str $rcount]]
+
+ # This is a hack. In DB we will store the records with
+ # some padding, but these will get lost if we just return
+ # them in TCL. As a result, we're going to have to hack
+ # get to check for 0 padding and return a list consisting
+ # of the number of 0's and the actual data.
+ set off [ berkdb random_int $off_low $off_hi ]
+
+ # There is no string concatenation function in Tcl
+ # (although there is one in TclX), so we have to resort
+ # to this hack. Ugh.
+ set slen [string length $data]
+ if {[is_fixed_length $method] && \
+ $slen > $fixed_len - $off} {
+ set $slen [expr $fixed_len - $off]
+ }
+ set a "a"
+ set dvals($key) [pad_data \
+ $method [eval "binary format x$off$a$slen" {$data}]]
+ }
+ if {[is_fixed_length $method] && \
+ [string length $data] > ($fixed_len - $off)} {
+ set slen [expr $fixed_len - $off]
+ set data [eval "binary format a$slen" {$data}]
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn \
+ {-partial [list $off [string length $data]] $key $data}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ incr count
+ }
+ close $did
+
+ # Now make sure that everything looks OK
+ puts "\tTest015.b: check entire file contents"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest015.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-first" "-next"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest015.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 \
+ $checkfunc dump_file_direction "-last" "-prev"
+
+ if { [string compare $omethod "-recno"] != 0 } {
+ filesort $t1 $t3
+ }
+
+ error_check_good Test015:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ unset dvals
+}
+
+# Check function for test015; keys and data are identical
+proc test015.check { key data } {
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ binary scan $data "c[string length $data]" a
+ binary scan $dvals($key) "c[string length $dvals($key)]" b
+ error_check_good "mismatch on padding for key $key" $a $b
+}
+
+proc test015.fixed.check { key data } {
+ global dvals
+ global fixed_len
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ if { [string length $data] > $fixed_len } {
+ error_check_bad \
+ "data length:[string length $data] \
+ for fixed:$fixed_len" 1 1
+ }
+ puts "$data : $dvals($key)"
+ error_check_good compare_data($data,$dvals($key) \
+ $dvals($key) $data
+}
diff --git a/storage/bdb/test/test016.tcl b/storage/bdb/test/test016.tcl
new file mode 100644
index 00000000000..af289f866f4
--- /dev/null
+++ b/storage/bdb/test/test016.tcl
@@ -0,0 +1,207 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test016.tcl,v 11.23 2002/05/22 15:42:46 sue Exp $
+#
+# TEST test016
+# TEST Partial put test
+# TEST Partial put where the datum gets shorter as a result of the put.
+# TEST
+# TEST Partial put test where partial puts make the record smaller.
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, go back and do partial puts,
+# TEST replacing a random-length string with the key value.
+# TEST Then verify.
+
+proc test016 { method {nentries 10000} args } {
+ global datastr
+ global dvals
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_fixed_length $method] == 1 } {
+ puts "Test016: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test016.db
+ set env NULL
+ } else {
+ set testfile test016.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test016: $method ($args) $nentries partial put shorten"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest016.a: put/get loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $datastr]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Next we will do a partial put replacement, making the data
+ # shorter
+ puts "\tTest016.b: partial put loop"
+ set did [open $dict]
+ set count 0
+ set len [string length $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+
+ set repl_len [berkdb random_int [string length $key] $len]
+ set repl_off [berkdb random_int 0 [expr $len - $repl_len] ]
+ set s1 [string range $datastr 0 [ expr $repl_off - 1] ]
+ set s2 [string toupper $key]
+ set s3 [string range $datastr [expr $repl_off + $repl_len] end ]
+ set dvals($key) [pad_data $method $s1$s2$s3]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-partial \
+ [list $repl_off $repl_len] $key [chop_data $method $s2]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ put $ret [list [list $key [pad_data $method $s1$s2$s3]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest016.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test016.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest016.d: close, open, and dump file"
+ open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-first" "-next"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest016.e: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 test016.check \
+ dump_file_direction "-last" "-prev"
+
+ if { [ is_record_based $method ] == 0 } {
+ filesort $t1 $t3
+ }
+ error_check_good Test016:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test016; data should be whatever is set in dvals
+proc test016.check { key data } {
+ global datastr
+ global dvals
+
+ error_check_good key"$key"_exists [info exists dvals($key)] 1
+ error_check_good "data mismatch for key $key" $data $dvals($key)
+}
diff --git a/storage/bdb/test/test017.tcl b/storage/bdb/test/test017.tcl
new file mode 100644
index 00000000000..1f99aa328fb
--- /dev/null
+++ b/storage/bdb/test/test017.tcl
@@ -0,0 +1,306 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test017.tcl,v 11.23 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test017
+# TEST Basic offpage duplicate test.
+# TEST
+# TEST Run duplicates with small page size so that we test off page duplicates.
+# TEST Then after we have an off-page database, test with overflow pages too.
+proc test017 { method {contents 0} {ndups 19} {tnum 17} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test0$tnum: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ set file_list [get_file_list 1]
+ if { $txnenv == 1 } {
+ set flen [llength $file_list]
+ reduce_dups flen ndups
+ set file_list [lrange $file_list 0 $flen]
+ }
+ puts "Test0$tnum: $method ($args) Off page duplicate tests with $ndups duplicates"
+
+ set ovfl ""
+ # Here is the loop where we put and get each key/data pair
+ puts -nonewline "\tTest0$tnum.a: Creating duplicates with "
+ if { $contents != 0 } {
+ puts "file contents as key/data"
+ } else {
+ puts "file name as key/data"
+ }
+ foreach f $file_list {
+ if { $contents != 0 } {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ #
+ # Prepend file name to guarantee uniqueness
+ set filecont [read $fid]
+ set str $f:$filecont
+ close $fid
+ } else {
+ set str $f
+ }
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ #
+ # Save 10% files for overflow test
+ #
+ if { $contents == 0 && [expr $count % 10] == 0 } {
+ lappend ovfl $f
+ }
+ # Now retrieve all the keys matching this key
+ set ret [$db get $str]
+ error_check_bad $f:dbget_dups [llength $ret] 0
+ error_check_good $f:dbget_dups1 [llength $ret] $ndups
+ set x 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ for {set ret [$dbc get "-set" $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get "-next"] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ if {[string length $d] == 0} {
+ break
+ }
+ error_check_good "Test0$tnum:get" $d $str
+ set id [ id_of $datastr ]
+ error_check_good "Test0$tnum:$f:dup#" $id $x
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" [expr $x - 1] $ndups
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ set dlist ""
+ for { set i 1 } { $i <= $ndups } {incr i} {
+ lappend dlist $i
+ }
+ set oid [open $t2.tmp w]
+ set o1id [open $t4.tmp w]
+ foreach f $file_list {
+ for {set i 1} {$i <= $ndups} {incr i} {
+ puts $o1id $f
+ }
+ puts $oid $f
+ }
+ close $oid
+ close $o1id
+ filesort $t2.tmp $t2
+ filesort $t4.tmp $t4
+ fileremove $t2.tmp
+ fileremove $t4.tmp
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if {$contents == 0} {
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ # Now compare the keys to see if they match the file names
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test017.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t4) [filecmp $t3 $t4] 0
+ }
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.c: Checking file for correct duplicates after close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if {$contents == 0} {
+ # Now compare the keys to see if they match the filenames
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.d: Verify off page duplicates and overflow status"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set stat [$db stat]
+ if { [is_btree $method] } {
+ error_check_bad stat:offpage \
+ [is_substr $stat "{{Internal pages} 0}"] 1
+ }
+ if {$contents == 0} {
+ # This check doesn't work in hash, since overflow
+ # pages count extra pages in buckets as well as true
+ # P_OVERFLOW pages.
+ if { [is_hash $method] == 0 } {
+ error_check_good overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ } else {
+ error_check_bad overflow \
+ [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+
+ #
+ # If doing overflow test, do that now. Else we are done.
+ # Add overflow pages by adding a large entry to a duplicate.
+ #
+ if { [llength $ovfl] == 0} {
+ error_check_good db_close [$db close] 0
+ return
+ }
+
+ puts "\tTest0$tnum.e: Add overflow duplicate entries"
+ set ovfldup [expr $ndups + 1]
+ foreach f $ovfl {
+ #
+ # This is just like put_file, but prepends the dup number
+ #
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set fdata [read $fid]
+ close $fid
+ set data $ovfldup:$fdata:$fdata:$fdata:$fdata
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$f $data}]
+ error_check_good ovfl_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.f: Verify overflow duplicate entries"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist $ovfldup
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ filesort $t1 $t3
+ error_check_good Test0$tnum:diff($t3,$t2) [filecmp $t3 $t2] 0
+
+ set stat [$db stat]
+ if { [is_hash [$db get_type]] } {
+ error_check_bad overflow1_hash [is_substr $stat \
+ "{{Number of big pages} 0}"] 1
+ } else {
+ error_check_bad \
+ overflow1 [is_substr $stat "{{Overflow pages} 0}"] 1
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Check function; verify data contains key
+proc test017.check { key data } {
+ error_check_good "data mismatch for key $key" $key [data_of $data]
+}
diff --git a/storage/bdb/test/test018.tcl b/storage/bdb/test/test018.tcl
new file mode 100644
index 00000000000..8fc8a14e95e
--- /dev/null
+++ b/storage/bdb/test/test018.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test018.tcl,v 11.6 2002/01/11 15:53:43 bostic Exp $
+#
+# TEST test018
+# TEST Offpage duplicate test
+# TEST Key_{first,last,before,after} offpage duplicates.
+# TEST Run duplicates with small page size so that we test off page
+# TEST duplicates.
+proc test018 { method {nentries 10000} args} {
+ puts "Test018: Off page duplicate tests"
+ eval {test011 $method $nentries 19 18 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test019.tcl b/storage/bdb/test/test019.tcl
new file mode 100644
index 00000000000..aa3a58a0bcd
--- /dev/null
+++ b/storage/bdb/test/test019.tcl
@@ -0,0 +1,131 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test019.tcl,v 11.21 2002/05/22 15:42:47 sue Exp $
+#
+# TEST test019
+# TEST Partial get test.
+proc test019 { method {nentries 10000} args } {
+ global fixed_len
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test019.db
+ set env NULL
+ } else {
+ set testfile test019.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test019: $method ($args) $nentries partial get test"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ berkdb srand $rand_init
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest019.a: put/get loop"
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set repl [berkdb random_int $fixed_len 100]
+ set data [chop_data $method [replicate $str $repl]]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {-nooverwrite $key $data}]
+ error_check_good dbput:$key $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ dbget:$key $ret [list [list $key [pad_data $method $data]]]
+ set kvals($key) $repl
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ close $did
+
+ puts "\tTest019.b: partial get loop"
+ set did [open $dict]
+ for { set i 0 } { [gets $did str] != -1 && $i < $nentries } \
+ { incr i } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key $str
+ }
+ set data [pad_data $method [replicate $str $kvals($key)]]
+
+ set maxndx [expr [string length $data] - 1]
+
+ set beg [berkdb random_int 0 [expr $maxndx - 1]]
+ set len [berkdb random_int 0 [expr $maxndx * 2]]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} \
+ $txn {-partial [list $beg $len]} $gflags {$key}]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # In order for tcl to handle this, we have to overwrite the
+ # last character with a NULL. That makes the length one less
+ # than we expect.
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good dbget_key $k $key
+
+ error_check_good dbget_data $d \
+ [string range $data $beg [expr $beg + $len - 1]]
+
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
diff --git a/storage/bdb/test/test020.tcl b/storage/bdb/test/test020.tcl
new file mode 100644
index 00000000000..9b6d939acad
--- /dev/null
+++ b/storage/bdb/test/test020.tcl
@@ -0,0 +1,137 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test020.tcl,v 11.17 2002/05/22 15:42:47 sue Exp $
+#
+# TEST test020
+# TEST In-Memory database tests.
+proc test020 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test020 skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # Check if we are using an env.
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test020: $method ($args) $nentries equal key/data pairs"
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test020_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test020.check
+ }
+ puts "\tTest020.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest020.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+ error_check_good Test020:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+}
+
+# Check function for test020; keys and data are identical
+proc test020.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test020_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test021.tcl b/storage/bdb/test/test021.tcl
new file mode 100644
index 00000000000..56936da389a
--- /dev/null
+++ b/storage/bdb/test/test021.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test021.tcl,v 11.15 2002/05/22 15:42:47 sue Exp $
+#
+# TEST test021
+# TEST Btree range tests.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self, reversed as key and self as data.
+# TEST After all are entered, retrieve each using a cursor SET_RANGE, and
+# TEST getting about 20 keys sequentially after it (in some cases we'll
+# TEST run out towards the end of the file).
+proc test021 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test021.db
+ set env NULL
+ } else {
+ set testfile test021.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test021: $method ($args) $nentries equal key/data pairs"
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test021_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test021.check
+ }
+ puts "\tTest021.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key [reverse $str]
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and retrieve about 20
+ # records after it.
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest021.b: test ranges"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $dict]
+ set i 0
+ while { [gets $did str] != -1 && $i < $count } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $i + 1]
+ } else {
+ set key [reverse $str]
+ }
+
+ set r [$dbc get -set_range $key]
+ error_check_bad dbc_get:$key [string length $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+
+ for { set nrecs 0 } { $nrecs < 20 } { incr nrecs } {
+ set r [$dbc get "-next"]
+ # no error checking because we may run off the end
+ # of the database
+ if { [llength $r] == 0 } {
+ continue;
+ }
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ $checkfunc $k $d
+ }
+ incr i
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for test021; keys and data are reversed
+proc test021.check { key data } {
+ error_check_good "key/data mismatch for $key" $data [reverse $key]
+}
+
+proc test021_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "data mismatch: key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test022.tcl b/storage/bdb/test/test022.tcl
new file mode 100644
index 00000000000..d25d7ecdffe
--- /dev/null
+++ b/storage/bdb/test/test022.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test022.tcl,v 11.14 2002/05/22 15:42:48 sue Exp $
+#
+# TEST test022
+# TEST Test of DB->getbyteswapped().
+proc test022 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test022 ($args) $omethod: DB->getbyteswapped()"
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile1 "$testdir/test022a.db"
+ set testfile2 "$testdir/test022b.db"
+ set env NULL
+ } else {
+ set testfile1 "test022a.db"
+ set testfile2 "test022b.db"
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create two databases, one in each byte order.
+ set db1 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 1234} $testfile1]
+ error_check_good db1_open [is_valid_db $db1] TRUE
+
+ set db2 [eval {berkdb_open -create \
+ -mode 0644} $omethod $args {-lorder 4321} $testfile2]
+ error_check_good db2_open [is_valid_db $db2] TRUE
+
+ # Call DB->get_byteswapped on both of them.
+ set db1_order [$db1 is_byteswapped]
+ set db2_order [$db2 is_byteswapped]
+
+ # Make sure that both answers are either 1 or 0,
+ # and that exactly one of them is 1.
+ error_check_good is_byteswapped_sensible_1 \
+ [expr ($db1_order == 1 && $db2_order == 0) || \
+ ($db1_order == 0 && $db2_order == 1)] 1
+
+ error_check_good db1_close [$db1 close] 0
+ error_check_good db2_close [$db2 close] 0
+ puts "\tTest022 complete."
+}
diff --git a/storage/bdb/test/test023.tcl b/storage/bdb/test/test023.tcl
new file mode 100644
index 00000000000..c37539a0f55
--- /dev/null
+++ b/storage/bdb/test/test023.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test023.tcl,v 11.18 2002/05/22 15:42:48 sue Exp $
+#
+# TEST test023
+# TEST Duplicate test
+# TEST Exercise deletes and cursor operations within a duplicate set.
+# TEST Add a key with duplicates (first time on-page, second time off-page)
+# TEST Number the dups.
+# TEST Delete dups and make sure that CURRENT/NEXT/PREV work correctly.
+proc test023 { method args } {
+ global alphabet
+ global dupnum
+ global dupstr
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test023: $method delete duplicates/check cursor operations"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test023: skipping for method $omethod"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test023.db
+ set env NULL
+ } else {
+ set testfile test023.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ foreach i { onpage offpage } {
+ if { $i == "onpage" } {
+ set dupstr DUP
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ puts "\tTest023.a: Insert key w/$i dups"
+ set key "duplicate_val_test"
+ for { set count 0 } { $count < 20 } { incr count } {
+ set ret \
+ [eval {$db put} $txn $pflags {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ # Now let's get all the items and make sure they look OK.
+ puts "\tTest023.b: Check initial duplicates"
+ set dupnum 0
+ dump_file $db $txn $t1 test023.check
+
+ # Delete a couple of random items (FIRST, LAST one in middle)
+ # Make sure that current returns an error and that NEXT and
+ # PREV do the right things.
+
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ puts "\tTest023.c: Delete first and try gets"
+ # This should be the first duplicate
+ error_check_good \
+ dbc_get:SET $ret [list [list duplicate_val_test 0$dupstr]]
+
+ # Now delete it.
+ set ret [$dbc del]
+ error_check_good dbc_del:FIRST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good dbc_get:CURRENT $ret [list [list [] []]]
+
+ # Now Prev should fail
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:prev0 [llength $ret] 0
+
+ # Now 10 nexts should work to get us in the middle
+ for { set j 1 } { $j <= 10 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.d: Delete middle and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:10 $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Prev and Next should work
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 11$dupstr
+
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 9$dupstr
+
+ # Now go to the last one
+ for { set j 11 } { $j <= 19 } { incr j } {
+ set ret [$dbc get -next]
+ error_check_good \
+ dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] $j$dupstr
+ }
+
+ puts "\tTest023.e: Delete last and try gets"
+ # Now do the delete on the current key.
+ set ret [$dbc del]
+ error_check_good dbc_del:LAST $ret 0
+
+ # Now current should fail
+ set ret [$dbc get -current]
+ error_check_good \
+ dbc_get:deleted $ret [list [list [] []]]
+
+ # Next should fail
+ set ret [$dbc get -next]
+ error_check_good dbc_get:next19 [llength $ret] 0
+
+ # Prev should work
+ set ret [$dbc get -prev]
+ error_check_good dbc_get:next [llength [lindex $ret 0]] 2
+ error_check_good \
+ dbc_get:next [lindex [lindex $ret 0] 1] 18$dupstr
+
+ # Now overwrite the current one, then count the number
+ # of data items to make sure that we have the right number.
+
+ puts "\tTest023.f: Count keys, overwrite current, count again"
+ # At this point we should have 17 keys the (initial 20 minus
+ # 3 deletes)
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor:2 [is_substr $dbc2 $db] 1
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+
+ set ret [$dbc put -current OVERWRITE]
+ error_check_good dbc_put:current $ret 0
+
+ set count_check 0
+ for { set rec [$dbc2 get -first] } {
+ [llength $rec] != 0 } { set rec [$dbc2 get -next] } {
+ incr count_check
+ }
+ error_check_good numdups $count_check 17
+ error_check_good dbc2_close [$dbc2 close] 0
+
+ # Done, delete all the keys for next iteration
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_delete $ret 0
+
+ # database should be empty
+
+ set ret [$dbc get -first]
+ error_check_good first_after_empty [llength $ret] 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test023; keys and data are identical
+proc test023.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "bad key" $key duplicate_val_test
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/storage/bdb/test/test024.tcl b/storage/bdb/test/test024.tcl
new file mode 100644
index 00000000000..bbdc8fb2253
--- /dev/null
+++ b/storage/bdb/test/test024.tcl
@@ -0,0 +1,268 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test024.tcl,v 11.19 2002/05/22 15:42:48 sue Exp $
+#
+# TEST test024
+# TEST Record number retrieval test.
+# TEST Test the Btree and Record number get-by-number functionality.
+proc test024 { method {nentries 10000} args} {
+ source ./include.tcl
+ global rand_init
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test024: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test024 skipping for method HASH"
+ return
+ }
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test024.db
+ set env NULL
+ } else {
+ set testfile test024.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest024.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ puts "\tTest024.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good put $ret 0
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $k]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest024.c: dump file"
+
+ # Put sorted keys in file
+ set oid [open $t1 w]
+ foreach k $sorted_keys {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ # Instead of using dump_file; get all the keys by keynum
+ set oid [open $t2 w]
+ if { [string compare $omethod "-btree"] == 0 } {
+ set do_renumber 1
+ }
+
+ set gflags " -recno"
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ close $oid
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ error_check_good Test024.c:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again.
+ puts "\tTest024.d: close, open, and dump file"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k 1 } { $k <= $count } { incr k } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.d:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now, reopen the file and run the last test again in reverse direction.
+ puts "\tTest024.e: close, open, and dump file in reverse direction"
+ set db [eval {berkdb_open -rdonly} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Put sorted keys in file
+ set rsorted ""
+ foreach k $sorted_keys {
+ set rsorted [linsert $rsorted 0 $k]
+ }
+ set oid [open $t1 w]
+ foreach k $rsorted {
+ puts $oid [pad_data $method $k]
+ }
+ close $oid
+
+ set oid [open $t2 w]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set k $count } { $k > 0 } { incr k -1 } {
+ set ret [eval {$db get} $txn $gflags {$k}]
+ puts $oid [lindex [lindex $ret 0] 1]
+ error_check_good recnum_get [lindex [lindex $ret 0] 1] \
+ [pad_data $method [lindex $sorted_keys [expr $k - 1]]]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $oid
+ error_check_good db_close [$db close] 0
+ error_check_good Test024.e:diff($t1,$t2) \
+ [filecmp $t1 $t2] 0
+
+ # Now try deleting elements and making sure they work
+ puts "\tTest024.f: delete test"
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ while { $count > 0 } {
+ set kndx [berkdb random_int 1 $count]
+ set kval [lindex $keys [expr $kndx - 1]]
+ set recno [expr [lsearch $sorted_keys $kval] + 1]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1 } {
+ set ret [eval {$db del} $txn {$recno}]
+ } else {
+ set ret [eval {$db del} $txn {$kval}]
+ }
+ error_check_good delete $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Remove the key from the key list
+ set ndx [expr $kndx - 1]
+ set keys [lreplace $keys $ndx $ndx]
+
+ if { $do_renumber == 1 } {
+ set r [expr $recno - 1]
+ set sorted_keys [lreplace $sorted_keys $r $r]
+ }
+
+ # Check that the keys after it have been renumbered
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $do_renumber == 1 && $recno != $count } {
+ set r [expr $recno - 1]
+ set ret [eval {$db get} $txn $gflags {$recno}]
+ error_check_good get_after_del \
+ [lindex [lindex $ret 0] 1] [lindex $sorted_keys $r]
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Decrement count
+ incr count -1
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test025.tcl b/storage/bdb/test/test025.tcl
new file mode 100644
index 00000000000..180a1aa2939
--- /dev/null
+++ b/storage/bdb/test/test025.tcl
@@ -0,0 +1,146 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test025.tcl,v 11.19 2002/05/24 15:24:54 sue Exp $
+#
+# TEST test025
+# TEST DB_APPEND flag test.
+proc test025 { method {nentries 10000} {start 0 } {tnum "25" } args} {
+ global kvals
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ puts "Test0$tnum: $method ($args)"
+
+ if { [string compare $omethod "-btree"] == 0 } {
+ puts "Test0$tnum skipping for method BTREE"
+ return
+ }
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test0$tnum skipping for method HASH"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ puts "\tTest0$tnum.a: put/get loop"
+ set gflags " -recno"
+ set pflags " -append"
+ set txn ""
+ set checkfunc test025_check
+
+ # Here is the loop where we put and get each key/data pair
+ set count $start
+ set nentries [expr $start + $nentries]
+ if { $count != 0 } {
+ gets $did str
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$k [chop_data $method $str]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set k [expr $count + 1]
+ set kvals($k) [pad_data $method $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {[chop_data $method $str]}]
+ error_check_good db_put $ret $k
+
+ set ret [eval {$db get} $txn $gflags {$k}]
+ error_check_good \
+ get $ret [list [list $k [pad_data $method $str]]]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # The recno key will be count + 1, so when we hit
+ # UINT32_MAX - 1, reset to 0.
+ if { $count == [expr 0xfffffffe] } {
+ set count 0
+ } else {
+ incr count
+ }
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: close, open, and dump file"
+ # Now, reopen the file and run the last test again.
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -first -next
+
+ # Now, reopen the file and run the last test again in the
+ # reverse direction.
+ puts "\tTest0$tnum.d: close, open, and dump file in reverse direction"
+ open_and_dump_file $testfile $env $t1 $checkfunc \
+ dump_file_direction -last -prev
+}
+
+proc test025_check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good " key/data mismatch for |$key|" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test026.tcl b/storage/bdb/test/test026.tcl
new file mode 100644
index 00000000000..ce65e925d35
--- /dev/null
+++ b/storage/bdb/test/test026.tcl
@@ -0,0 +1,155 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test026.tcl,v 11.20 2002/06/11 14:09:56 sue Exp $
+#
+# TEST test026
+# TEST Small keys/medium data w/duplicates
+# TEST Put/get per key.
+# TEST Loop through keys -- delete each key
+# TEST ... test that cursors delete duplicates correctly
+# TEST
+# TEST Keyed delete test through cursor. If ndups is small; this will
+# TEST test on-page dups; if it's large, it will test off-page dups.
+proc test026 { method {nentries 2000} {ndups 5} {tnum 26} args} {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the defaults down a bit.
+ # If we are wanting a lot of dups, set that
+ # down a bit or repl testing takes very long.
+ #
+ if { $nentries == 2000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ puts "Test0$tnum: $method ($args) $nentries keys\
+ with $ndups dups; cursor delete test"
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+
+ puts "\tTest0$tnum.a: Put loop"
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < [expr $nentries * $ndups] } {
+ set datastr [ make_data_str $str ]
+ for { set j 1 } { $j <= $ndups} {incr j} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $j$datastr]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ }
+ close $did
+
+ error_check_good db_close [$db close] 0
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Now we will sequentially traverse the database getting each
+ # item and deleting it.
+ set count 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest0$tnum.b: Get/delete loop"
+ set i 1
+ for { set ret [$dbc get -first] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+
+ set key [lindex [lindex $ret 0] 0]
+ set data [lindex [lindex $ret 0] 1]
+ if { $i == 1 } {
+ set curkey $key
+ }
+ error_check_good seq_get:key $key $curkey
+ error_check_good \
+ seq_get:data $data [pad_data $method $i[make_data_str $key]]
+
+ if { $i == $ndups } {
+ set i 1
+ } else {
+ incr i
+ }
+
+ # Now delete the key
+ set ret [$dbc del]
+ error_check_good db_del:$key $ret 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: Verify empty file"
+ # Double check that file is now empty
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+ set ret [$dbc get -first]
+ error_check_good get_on_empty [string length $ret] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test027.tcl b/storage/bdb/test/test027.tcl
new file mode 100644
index 00000000000..a0f6dfa4dcb
--- /dev/null
+++ b/storage/bdb/test/test027.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test027.tcl,v 11.7 2002/01/11 15:53:45 bostic Exp $
+#
+# TEST test027
+# TEST Off-page duplicate test
+# TEST Test026 with parameters to force off-page duplicates.
+# TEST
+# TEST Check that delete operations work. Create a database; close
+# TEST database and reopen it. Then issues delete by key for each
+# TEST entry.
+proc test027 { method {nentries 100} args} {
+ eval {test026 $method $nentries 100 27} $args
+}
diff --git a/storage/bdb/test/test028.tcl b/storage/bdb/test/test028.tcl
new file mode 100644
index 00000000000..a546744fdac
--- /dev/null
+++ b/storage/bdb/test/test028.tcl
@@ -0,0 +1,222 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test028.tcl,v 11.20 2002/07/01 15:03:45 krinsky Exp $
+#
+# TEST test028
+# TEST Cursor delete test
+# TEST Test put operations after deleting through a cursor.
+proc test028 { method args } {
+ global dupnum
+ global dupstr
+ global alphabet
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test028: $method put after cursor delete test"
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test028 skipping for method $method"
+ return
+ }
+ if { [is_record_based $method] == 1 } {
+ set key 10
+ } else {
+ append args " -dup"
+ set key "put_after_cursor_del"
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test028.db
+ set env NULL
+ } else {
+ set testfile test028.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set ndups 20
+ set txn ""
+ set pflags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set gflags " -recno"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ foreach i { offpage onpage } {
+ foreach b { bigitem smallitem } {
+ if { $i == "onpage" } {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr DUP
+ }
+ } else {
+ if { $b == "bigitem" } {
+ set dupstr [repeat $alphabet 100]
+ } else {
+ set dupstr [repeat $alphabet 50]
+ }
+ }
+
+ if { $b == "bigitem" } {
+ set dupstr [repeat $dupstr 10]
+ }
+ puts "\tTest028: $i/$b"
+
+ puts "\tTest028.a: Insert key with single data item"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ # Now let's get the item and make sure its OK.
+ puts "\tTest028.b: Check initial entry"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get \
+ $ret [list [list $key [pad_data $method $dupstr]]]
+
+ # Now try a put with NOOVERWRITE SET (should be error)
+ puts "\tTest028.c: No_overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete the item with a cursor
+ puts "\tTest028.d: Delete test"
+ set ret [$dbc get -set $key]
+ error_check_bad dbc_get:SET [llength $ret] 0
+
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ puts "\tTest028.e: Reput the item"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key [chop_data $method $dupstr]}]
+ error_check_good db_put $ret 0
+
+ puts "\tTest028.f: Retrieve the item"
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $ret \
+ [list [list $key [pad_data $method $dupstr]]]
+
+ # Delete the key to set up for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+
+ # Now repeat the above set of tests with
+ # duplicates (if not RECNO).
+ if { [is_record_based $method] == 1 } {
+ continue;
+ }
+
+ puts "\tTest028.g: Insert key with duplicates"
+ for { set count 0 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $count$dupstr]}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.h: Check dups"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Try no_overwrite
+ puts "\tTest028.i: No_overwrite test"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key $dupstr}]
+ error_check_good \
+ db_put [is_substr $ret "DB_KEYEXIST"] 1
+
+ # Now delete all the elements with a cursor
+ puts "\tTest028.j: Cursor Deletes"
+ set count 0
+ for { set ret [$dbc get -set $key] } {
+ [string length $ret] != 0 } {
+ set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good db_seq(key) $k $key
+ error_check_good db_seq(data) $d $count$dupstr
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+ incr count
+ if { $count == [expr $ndups - 1] } {
+ puts "\tTest028.k:\
+ Duplicate No_Overwrite test"
+ set ret [eval {$db put} $txn \
+ {-nooverwrite $key $dupstr}]
+ error_check_good db_put [is_substr \
+ $ret "DB_KEYEXIST"] 1
+ }
+ }
+
+ # Make sure all the items are gone
+ puts "\tTest028.l: Get after delete"
+ set ret [$dbc get -set $key]
+ error_check_good get_after_del [string length $ret] 0
+
+ puts "\tTest028.m: Reput the item"
+ set ret [eval {$db put} \
+ $txn {-nooverwrite $key 0$dupstr}]
+ error_check_good db_put $ret 0
+ for { set count 1 } { $count < $ndups } { incr count } {
+ set ret [eval {$db put} $txn \
+ {$key $count$dupstr}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest028.n: Retrieve the item"
+ set dupnum 0
+ dump_file $db $txn $t1 test028.check
+
+ # Clean out in prep for next test
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
+
+# Check function for test028; keys and data are identical
+proc test028.check { key data } {
+ global dupnum
+ global dupstr
+ error_check_good "Bad key" $key put_after_cursor_del
+ error_check_good "data mismatch for $key" $data $dupnum$dupstr
+ incr dupnum
+}
diff --git a/storage/bdb/test/test029.tcl b/storage/bdb/test/test029.tcl
new file mode 100644
index 00000000000..8e4b8aa6e41
--- /dev/null
+++ b/storage/bdb/test/test029.tcl
@@ -0,0 +1,245 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test029.tcl,v 11.20 2002/06/29 13:44:44 bostic Exp $
+#
+# TEST test029
+# TEST Test the Btree and Record number renumbering.
+proc test029 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test029: $method ($args)"
+
+ if { [string compare $omethod "-hash"] == 0 } {
+ puts "Test029 skipping for method HASH"
+ return
+ }
+ if { [is_record_based $method] == 1 && $do_renumber != 1 } {
+ puts "Test029 skipping for method RECNO (w/out renumbering)"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test029.db
+ set env NULL
+ } else {
+ set testfile test029.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ # Do not set nentries down to 100 until we
+ # fix SR #5958.
+ set nentries 1000
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Read the first nentries dictionary elements and reverse them.
+ # Keep a list of these (these will be the keys).
+ puts "\tTest029.a: initialization"
+ set keys ""
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ lappend keys [reverse $str]
+ incr count
+ }
+ close $did
+
+ # Generate sorted order for the keys
+ set sorted_keys [lsort $keys]
+
+ # Save the first and last keys
+ set last_key [lindex $sorted_keys end]
+ set last_keynum [llength $sorted_keys]
+
+ set first_key [lindex $sorted_keys 0]
+ set first_keynum 1
+
+ # Create the database
+ if { [string compare $omethod "-btree"] == 0 } {
+ set db [eval {berkdb_open -create \
+ -mode 0644 -recnum} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ } else {
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ }
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest029.b: put/get loop"
+ foreach k $keys {
+ if { [is_record_based $method] == 1 } {
+ set key [lsearch $sorted_keys $k]
+ incr key
+ } else {
+ set key $k
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $k]}]
+ error_check_good dbput $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good dbget [lindex [lindex $ret 0] 1] $k
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Now delete the first key in the database
+ puts "\tTest029.c: delete and verify renumber"
+
+ # Delete the first key in the file
+ if { [is_record_based $method] == 1 } {
+ set key $first_keynum
+ } else {
+ set key $first_key
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key}]
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now we are ready to retrieve records based on
+ # record number
+ if { [string compare $omethod "-btree"] == 0 } {
+ append gflags " -recno"
+ }
+
+ # First try to get the old last key (shouldn't exist)
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_after_del $ret [list]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Now try to get what we think should be the last key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_last_after_del [lindex [lindex $ret 0] 1] $last_key
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Create a cursor; we need it for the next test and we
+ # need it for recno here.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # OK, now re-put the first key and make sure that we
+ # renumber the last key appropriately.
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$db put} $txn \
+ {$key [chop_data $method $first_key]}]
+ error_check_good db_put $ret 0
+ } else {
+ # Recno
+ set ret [$dbc get -first]
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ getn_last_after_put [lindex [lindex $ret 0] 1] $last_key
+
+ # Now delete the first key in the database using a cursor
+ puts "\tTest029.d: delete with cursor and verify renumber"
+
+ set ret [$dbc get -first]
+ error_check_good dbc_first $ret [list [list $key $first_key]]
+
+ # Now delete at the cursor
+ set ret [$dbc del]
+ error_check_good dbc_del $ret 0
+
+ # Now check the record numbers of the last keys again.
+ # First try to get the old last key (shouldn't exist)
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good get_last_after_cursor_del:$ret $ret [list]
+
+ # Now try to get what we think should be the last key
+ set ret [eval {$db get} $txn $gflags {[expr $last_keynum - 1]}]
+ error_check_good \
+ getn_after_cursor_del [lindex [lindex $ret 0] 1] $last_key
+
+ # Re-put the first key and make sure that we renumber the last
+ # key appropriately.
+ puts "\tTest029.e: put with cursor and verify renumber"
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [eval {$dbc put} \
+ $pflags {-current $first_key}]
+ error_check_good dbc_put:DB_CURRENT $ret 0
+ } else {
+ set ret [eval {$dbc put} $pflags {-before $first_key}]
+ error_check_bad dbc_put:DB_BEFORE $ret 0
+ }
+
+ # Now check that the last record matches the last record number
+ set ret [eval {$db get} $txn $gflags {$last_keynum}]
+ error_check_good \
+ get_after_cursor_reput [lindex [lindex $ret 0] 1] $last_key
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test030.tcl b/storage/bdb/test/test030.tcl
new file mode 100644
index 00000000000..d91359f07a0
--- /dev/null
+++ b/storage/bdb/test/test030.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test030.tcl,v 11.18 2002/05/22 15:42:50 sue Exp $
+#
+# TEST test030
+# TEST Test DB_NEXT_DUP Functionality.
+proc test030 { method {nentries 10000} args } {
+ global rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 ||
+ [is_rbtree $method] == 1 } {
+ puts "Test030 skipping for method $method"
+ return
+ }
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test030.db
+ set cntfile $testdir/cntfile.db
+ set env NULL
+ } else {
+ set testfile test030.db
+ set cntfile cntfile.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test030: $method ($args) $nentries DB_NEXT_DUP testing"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create \
+ -mode 0644 -dup} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Use a second DB to keep track of how many duplicates
+ # we enter per key
+
+ set cntdb [eval {berkdb_open -create \
+ -mode 0644} $args {-btree $cntfile}]
+ error_check_good dbopen:cntfile [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ # We will add between 1 and 10 dups with values 1 ... dups
+ # We'll verify each addition.
+
+ set did [open $dict]
+ puts "\tTest030.a: put and get duplicate keys."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set ndup [berkdb random_int 1 10]
+
+ for { set i 1 } { $i <= $ndup } { incr i 1 } {
+ set ctxn ""
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn \
+ [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb put} \
+ $ctxn $pflags {$str [chop_data $method $ndup]}]
+ error_check_good put_cnt $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr x
+
+ if { [llength $ret] == 0 } {
+ break
+ }
+
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ error_check_good Test030:put $d $str
+
+ set id [ id_of $datastr ]
+ error_check_good Test030:dup# $id $x
+ }
+ error_check_good Test030:numdups $x $ndup
+ incr count
+ }
+ close $did
+
+ # Verify on sequential pass of entire file
+ puts "\tTest030.b: sequential check"
+
+ # We can't just set lastkey to a null string, since that might
+ # be a key now!
+ set lastkey "THIS STRING WILL NEVER BE A KEY"
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+
+ # Outer loop should always get a new key
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_bad outer_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good outer_get_loop:data $d $k
+ error_check_good outer_get_loop:id $id 1
+
+ set lastkey $k
+ # Figure out how may dups we should have
+ if { $txnenv == 1 } {
+ set ct [$env txn]
+ error_check_good txn [is_valid_txn $ct $env] TRUE
+ set ctxn "-txn $ct"
+ }
+ set ret [eval {$cntdb get} $ctxn $pflags {$k}]
+ set ndup [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$ct commit] 0
+ }
+
+ set howmany 1
+ for { set ret [$dbc get -nextdup] } \
+ { [llength $ret] != 0 } \
+ { set ret [$dbc get -nextdup] } {
+ incr howmany
+
+ set k [lindex [lindex $ret 0] 0]
+ error_check_good inner_get_loop:key $k $lastkey
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $howmany
+
+ }
+ error_check_good ndups_found $howmany $ndup
+ }
+
+ # Verify on key lookup
+ puts "\tTest030.c: keyed check"
+ set cnt_dbc [$cntdb cursor]
+ for {set ret [$cnt_dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$cnt_dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+
+ set howmany [lindex [lindex $ret 0] 1]
+ error_check_bad cnt_seq:data [string length $howmany] 0
+
+ set i 0
+ for {set ret [$dbc get -set $k]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ incr i
+
+ set k [lindex [lindex $ret 0] 0]
+
+ set datastr [lindex [lindex $ret 0] 1]
+ set d [data_of $datastr]
+ set id [ id_of $datastr ]
+
+ error_check_good inner_get_loop:data $d $k
+ error_check_good inner_get_loop:id $id $i
+ }
+ error_check_good keyed_count $i $howmany
+
+ }
+ error_check_good cnt_curs_close [$cnt_dbc close] 0
+ error_check_good db_curs_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good cnt_file_close [$cntdb close] 0
+ error_check_good db_file_close [$db close] 0
+}
diff --git a/storage/bdb/test/test031.tcl b/storage/bdb/test/test031.tcl
new file mode 100644
index 00000000000..0006deb2d99
--- /dev/null
+++ b/storage/bdb/test/test031.tcl
@@ -0,0 +1,230 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test031.tcl,v 11.24 2002/06/26 06:22:44 krinsky Exp $
+#
+# TEST test031
+# TEST Duplicate sorting functionality
+# TEST Make sure DB_NODUPDATA works.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and "ndups" duplicates
+# TEST For the data field, prepend random five-char strings (see test032)
+# TEST that we force the duplicate sorting code to do something.
+# TEST Along the way, test that we cannot insert duplicate duplicates
+# TEST using DB_NODUPDATA.
+# TEST
+# TEST By setting ndups large, we can make this an off-page test
+# TEST After all are entered, retrieve all; verify output.
+# TEST Close file, reopen, do retrieve and re-verify.
+# TEST This does not work for recno
+proc test031 { method {nentries 10000} {ndups 5} {tnum 31} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small $ndups sorted dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create \
+ -mode 0644} $args {$omethod -dup -dupsort $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop, check nodupdata"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ if { $i == 2 } {
+ set nodupstr $datastr
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+
+ # Test DB_NODUPDATA using the DB handle
+ set ret [eval {$db put -nodupdata} \
+ $txn $pflags {$str [chop_data $method $nodupstr]}]
+ error_check_good db_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ # Test DB_NODUPDATA using cursor handle
+ set ret [$dbc get -set $str]
+ error_check_bad dbc_get [llength $ret] 0
+ set datastr [lindex [lindex $ret 0] 1]
+ error_check_bad dbc_data [string length $datastr] 0
+ set ret [eval {$dbc put -nodupdata} \
+ {$str [chop_data $method $datastr]}]
+ error_check_good dbc_nodupdata [is_substr $ret "DB_KEYEXIST"] 1
+
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare \
+ $lastdup [pad_data $method $datastr]] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open(2) [is_valid_cursor $dbc $db] TRUE
+
+ set lastkey "THIS WILL NEVER BE A KEY VALUE"
+ # no need to delete $lastkey
+ set firsttimethru 1
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ if { [string compare $k $lastkey] != 0 } {
+ # Remove last key from the checkdb
+ if { $firsttimethru != 1 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+ set firsttimethru 0
+ set lastdup ""
+ set lastkey $k
+ set dups [lindex [lindex [eval {$check_db get} \
+ $txn {$k}] 0] 1]
+ error_check_good check_db:get:$k \
+ [string length $dups] [expr $ndups * 4]
+ }
+
+ if { [string compare $lastdup $d] > 0 } {
+ error_check_good dup_check:$k:$d 0 1
+ }
+ set lastdup $d
+
+ set pref [string range $d 0 3]
+ set ndx [string first $pref $dups]
+ error_check_good valid_duplicate [expr $ndx >= 0] 1
+ set a [string range $dups 0 [expr $ndx - 1]]
+ set b [string range $dups [expr $ndx + 4] end]
+ set dups $a$b
+ }
+ # Remove last key from the checkdb
+ if { [string length $lastkey] != 0 } {
+ error_check_good check_db:del:$lastkey \
+ [eval {$check_db del} $txn {$lastkey}] 0
+ }
+
+ # Make sure there is nothing left in check_db
+
+ set check_c [eval {$check_db cursor} $txn]
+ set ret [$check_c get -first]
+ error_check_good check_c:get:$ret [llength $ret] 0
+ error_check_good check_c:close [$check_c close] 0
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test032.tcl b/storage/bdb/test/test032.tcl
new file mode 100644
index 00000000000..2076b744851
--- /dev/null
+++ b/storage/bdb/test/test032.tcl
@@ -0,0 +1,231 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test032.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test032
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH functionality by retrieving each dup in the file
+# TEST explicitly. Test the DB_GET_BOTH_RANGE functionality by retrieving
+# TEST the unique key prefix (cursor only). Finally test the failure case.
+proc test032 { method {nentries 10000} {ndups 5} {tnum 32} args } {
+ global alphabet rand_init
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) $nentries small sorted $ndups dup key/data pairs"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644} $args {-hash $checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Re-initialize random string generator
+ randstring_init $ndups
+
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref [randstring]
+ set dups $dups$pref
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good \
+ sorted_dups($lastdup,$datastr) 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: Checking file for correct duplicates (no cursor)"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good check_c_open(2) \
+ [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good \
+ get_both_data:$k $ret [list [list $k $data]]
+ }
+ }
+
+ $db sync
+
+ # Now repeat the above test using cursor ops
+ puts "\tTest0$tnum.c: Checking file for correct duplicates (cursor)"
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ for {set ndx 0} {$ndx < [expr 4 * $ndups]} {incr ndx 4} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set pref [string range $d $ndx [expr $ndx + 3]]
+ set data $pref:$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good \
+ curs_get_both_data:$k $ret [list [list $k $data]]
+
+ set ret [eval {$dbc get} {-get_both_range $k $pref}]
+ error_check_good \
+ curs_get_both_range:$k $ret [list [list $k $data]]
+ }
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.d: Check error case (no cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ # Now check the error case
+ puts "\tTest0$tnum.e: Check error case (cursor)"
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set data XXX$k
+ set ret [eval {$dbc get} {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test033.tcl b/storage/bdb/test/test033.tcl
new file mode 100644
index 00000000000..a7796ce99d6
--- /dev/null
+++ b/storage/bdb/test/test033.tcl
@@ -0,0 +1,176 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test033.tcl,v 11.24 2002/08/08 15:38:11 bostic Exp $
+#
+# TEST test033
+# TEST DB_GET_BOTH without comparison function
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and data; add duplicate records for each. After all are
+# TEST entered, retrieve all and verify output using DB_GET_BOTH (on DB and
+# TEST DBC handles) and DB_GET_BOTH_RANGE (on a DBC handle) on existent and
+# TEST nonexistent keys.
+# TEST
+# TEST XXX
+# TEST This does not work for rbtree.
+proc test033 { method {nentries 10000} {ndups 5} {tnum 33} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum: $method ($args) $nentries small $ndups dup key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ # Duplicate data entries are not allowed in record based methods.
+ if { [is_record_based $method] == 1 } {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args {$testfile}]
+ } else {
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ }
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ # Allocate a cursor for DB_GET_BOTH_RANGE.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest0$tnum.a: Put/get loop."
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ set ret [eval {$db put} $txn $pflags \
+ {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ } else {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good db_put $ret 0
+ }
+ }
+
+ # Now retrieve all the keys matching this key and dup
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+
+ close $did
+
+ puts "\tTest0$tnum.b: Verifying DB_GET_BOTH after creation."
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ # Now retrieve all the keys matching this key
+ # for non-record based AMs.
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ test033_recno.check $db $dbc $method $str $txn $key
+ } else {
+ test033_check $db $dbc $method $str $txn $ndups
+ }
+ incr count
+ }
+ close $did
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# No testing of dups is done on record-based methods.
+proc test033_recno.check {db dbc method str txn key} {
+ set ret [eval {$db get} $txn {-recno $key}]
+ error_check_good "db_get:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ set ret [$dbc get -get_both $key [pad_data $method $str]]
+ error_check_good "db_get_both:$method" \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+}
+
+# Testing of non-record-based methods includes duplicates
+# and get_both_range.
+proc test033_check {db dbc method str txn ndups} {
+ for {set i 1} {$i <= $ndups } { incr i } {
+ set datastr $i:$str
+
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good "db_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good "dbc_get_both:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good "dbc_get_both_range:dup#" \
+ [lindex [lindex $ret 0] 1] $datastr
+ }
+
+ # Now retrieve non-existent dup (i is ndups + 1)
+ set datastr $i:$str
+ set ret [eval {$db get} $txn {-get_both $str $datastr}]
+ error_check_good db_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both $str $datastr]
+ error_check_good dbc_get_both:dupfailure [llength $ret] 0
+ set ret [$dbc get -get_both_range $str $datastr]
+ error_check_good dbc_get_both_range [llength $ret] 0
+}
diff --git a/storage/bdb/test/test034.tcl b/storage/bdb/test/test034.tcl
new file mode 100644
index 00000000000..647ad940815
--- /dev/null
+++ b/storage/bdb/test/test034.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test034.tcl,v 11.8 2002/01/11 15:53:46 bostic Exp $
+#
+# TEST test034
+# TEST test032 with off-page duplicates
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE functionality with off-page duplicates.
+proc test034 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test032 $method $nentries 20 34 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test032 $method [expr $nentries / 10] 100 34 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test035.tcl b/storage/bdb/test/test035.tcl
new file mode 100644
index 00000000000..06796b1e9aa
--- /dev/null
+++ b/storage/bdb/test/test035.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test035.tcl,v 11.8 2002/07/22 17:00:39 sue Exp $
+#
+# TEST test035
+# TEST Test033 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test035 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test033 $method $nentries 20 35 -pagesize 512} $args
+ # Test with multiple pages of off-page duplicates
+ eval {test033 $method [expr $nentries / 10] 100 35 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test036.tcl b/storage/bdb/test/test036.tcl
new file mode 100644
index 00000000000..4e54f363ff8
--- /dev/null
+++ b/storage/bdb/test/test036.tcl
@@ -0,0 +1,173 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test036.tcl,v 11.18 2002/05/22 15:42:51 sue Exp $
+#
+# TEST test036
+# TEST Test KEYFIRST and KEYLAST when the key doesn't exist
+# TEST Put nentries key/data pairs (from the dictionary) using a cursor
+# TEST and KEYFIRST and KEYLAST (this tests the case where use use cursor
+# TEST put for non-existent keys).
+proc test036 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_record_based $method] == 1 } {
+ puts "Test036 skipping for method recno"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test036.db
+ set env NULL
+ } else {
+ set testfile test036.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test036: $method ($args) $nentries equal key/data pairs"
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test036_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test036.check
+ }
+ puts "\tTest036.a: put/get loop KEYFIRST"
+ # Here is the loop where we put and get each key/data pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $pflags {-keyfirst $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest036.a: put/get loop KEYLAST"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) $str
+ } else {
+ set key $str
+ }
+ set ret [eval {$dbc put} $txn $pflags {-keylast $key $str}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get [lindex [lindex $ret 0] 1] $str
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest036.c: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ file rename -force $t1 $t3
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ filesort $t1 $t3
+ }
+
+}
+
+# Check function for test036; keys and data are identical
+proc test036.check { key data } {
+ error_check_good "key/data mismatch" $data $key
+}
+
+proc test036_recno.check { key data } {
+ global dict
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
diff --git a/storage/bdb/test/test037.tcl b/storage/bdb/test/test037.tcl
new file mode 100644
index 00000000000..0b2e2989949
--- /dev/null
+++ b/storage/bdb/test/test037.tcl
@@ -0,0 +1,196 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test037.tcl,v 11.18 2002/03/15 16:30:54 sue Exp $
+#
+# TEST test037
+# TEST Test DB_RMW
+proc test037 { method {nentries 100} args } {
+ global encrypt
+
+ source ./include.tcl
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test037 skipping for env $env"
+ return
+ }
+
+ puts "Test037: RMW $method"
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ # Create the database
+ env_cleanup $testdir
+ set testfile test037.db
+
+ set local_env \
+ [eval {berkdb_env -create -mode 0644 -txn} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ set db [eval {berkdb_open \
+ -env $local_env -create -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest037.a: Creating database"
+ # Here is the loop where we put and get each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $txn $gflags {$key}]
+ error_check_good get \
+ [lindex [lindex $ret 0] 1] [pad_data $method $str]
+ incr count
+ }
+ close $did
+ error_check_good dbclose [$db close] 0
+ error_check_good envclode [$local_env close] 0
+
+ puts "\tTest037.b: Setting up environments"
+
+ # Open local environment
+ set env_cmd [concat berkdb_env -create -txn $encargs -home $testdir]
+ set local_env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $local_env] TRUE
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote environment
+ set f1 [open |$tclsh_path r+]
+ puts $f1 "source $test_path/test.tcl"
+
+ set remote_env [send_cmd $f1 $env_cmd]
+ error_check_good remote:env_open [is_valid_env $remote_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good \
+ remote:txn_open [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now try put test without RMW. Gets on one site should not
+ # lock out gets on another.
+
+ # Open databases and dictionary
+ puts "\tTest037.c: Opening databases"
+ set did [open $dict]
+ set rkey 0
+
+ set db [berkdb_open -auto_commit -env $local_env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set rdb [send_cmd $f1 \
+ "berkdb_open -auto_commit -env $remote_env -mode 0644 $testfile"]
+ error_check_good remote:dbopen [is_valid_db $rdb] TRUE
+
+ puts "\tTest037.d: Testing without RMW"
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn} $gflags {$key}]
+ error_check_good local_get [lindex [lindex $rec 0] 1] \
+ [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good no_rmw_get:remote_time [expr $remote_time <= 1] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ puts "\tTest037.e: Testing with RMW"
+
+ # Open local transaction
+ set local_txn [$local_env txn]
+ error_check_good \
+ txn_open [is_valid_txn $local_txn $local_env] TRUE
+
+ # Open remote transaction
+ set remote_txn [send_cmd $f1 "$remote_env txn"]
+ error_check_good remote:txn_open \
+ [is_valid_txn $remote_txn $remote_env] TRUE
+
+ # Now, get a key and try to "get" it from both DBs.
+ error_check_bad "gets on new open" [gets $did str] -1
+ incr rkey
+ if { [is_record_based $method] == 1 } {
+ set key $rkey
+ } else {
+ set key $str
+ }
+
+ set rec [eval {$db get -txn $local_txn -rmw} $gflags {$key}]
+ error_check_good \
+ local_get [lindex [lindex $rec 0] 1] [pad_data $method $str]
+
+ set r [send_timed_cmd $f1 0 "$rdb get -txn $remote_txn $gflags $key"]
+ error_check_good remote_send $r 0
+
+ # Now sleep before releasing local record lock
+ tclsleep 5
+ error_check_good local_commit [$local_txn commit] 0
+
+ # Now get the remote result
+ set remote_time [rcv_result $f1]
+ error_check_good rmw_get:remote_time [expr $remote_time > 4] 1
+
+ # Commit the remote
+ set r [send_cmd $f1 "$remote_txn commit"]
+ error_check_good remote_commit $r 0
+
+ # Close everything up: remote first
+ set r [send_cmd $f1 "$rdb close"]
+ error_check_good remote_db_close $r 0
+
+ set r [send_cmd $f1 "$remote_env close"]
+
+ # Close locally
+ error_check_good db_close [$db close] 0
+ $local_env close
+ close $did
+ close $f1
+}
diff --git a/storage/bdb/test/test038.tcl b/storage/bdb/test/test038.tcl
new file mode 100644
index 00000000000..3babde8fe0b
--- /dev/null
+++ b/storage/bdb/test/test038.tcl
@@ -0,0 +1,227 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test038.tcl,v 11.23 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test038
+# TEST DB_GET_BOTH, DB_GET_BOTH_RANGE on deleted items
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test038 { method {nentries 10000} {ndups 5} {tnum 38} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: \
+ $method ($args) $nentries small sorted dup key/data pairs"
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup -dupsort} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval {berkdb_open \
+ -create -mode 0644 -hash} $args {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ if {[string compare $lastdup $datastr] > 0} {
+ error_check_good sorted_dups($lastdup,$datastr)\
+ 0 1
+ }
+ incr x
+ set lastdup $datastr
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc get -get_both_range $k $pref]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+
+ # We should either not find anything (if deleting the
+ # largest duplicate in the set) or a duplicate that
+ # sorts larger than the one we deleted.
+ set ret [$dbc get -get_both_range $k $pref]
+ if { [llength $ret] != 0 } {
+ set datastr [lindex [lindex $ret 0] 1]]
+ if {[string compare \
+ $pref [lindex [lindex $ret 0] 1]] >= 0} {
+ error_check_good \
+ error_case_range:sorted_dups($pref,$datastr) 0 1
+ }
+ }
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret \
+ [eval {$db get} $txn {-get_both $k $data}]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test039.tcl b/storage/bdb/test/test039.tcl
new file mode 100644
index 00000000000..2bbc83ebe05
--- /dev/null
+++ b/storage/bdb/test/test039.tcl
@@ -0,0 +1,211 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test039.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test039
+# TEST DB_GET_BOTH/DB_GET_BOTH_RANGE on deleted items without comparison
+# TEST function.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary. Insert each with
+# TEST self as key and "ndups" duplicates. For the data field, prepend the
+# TEST letters of the alphabet in a random order so we force the duplicate
+# TEST sorting code to do something. By setting ndups large, we can make
+# TEST this an off-page test.
+# TEST
+# TEST Test the DB_GET_BOTH and DB_GET_BOTH_RANGE functionality by retrieving
+# TEST each dup in the file explicitly. Then remove each duplicate and try
+# TEST the retrieval again.
+proc test039 { method {nentries 10000} {ndups 5} {tnum 39} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set checkdb $testdir/checkdb.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ set checkdb checkdb.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method $nentries \
+ small $ndups unsorted dup key/data pairs"
+
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set check_db [eval \
+ {berkdb_open -create -mode 0644 -hash} $args {$checkdb}]
+ error_check_good dbopen:check_db [is_valid_db $check_db] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ puts "\tTest0$tnum.a: Put/get loop"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set dups ""
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set pref $pref[string \
+ index $alphabet [berkdb random_int 0 25]]
+ while { [string first $pref $dups] != -1 } {
+ set pref [string toupper $pref]
+ if { [string first $pref $dups] != -1 } {
+ set pref [string index $alphabet \
+ [berkdb random_int 0 25]]
+ set pref $pref[string index $alphabet \
+ [berkdb random_int 0 25]]
+ }
+ }
+ if { [string length $dups] == 0 } {
+ set dups $pref
+ } else {
+ set dups "$dups $pref"
+ }
+ set datastr $pref:$str
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ }
+ set ret [eval {$check_db put} \
+ $txn $pflags {$str [chop_data $method $dups]}]
+ error_check_good checkdb_put $ret 0
+
+ # Now retrieve all the keys matching this key
+ set x 0
+ set lastdup ""
+ for {set ret [$dbc get -set $str]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup] } {
+ set k [lindex [lindex $ret 0] 0]
+ if { [string compare $k $str] != 0 } {
+ break
+ }
+ set datastr [lindex [lindex $ret 0] 1]
+ if {[string length $datastr] == 0} {
+ break
+ }
+ set xx [expr $x * 3]
+ set check_data \
+ [string range $dups $xx [expr $xx + 1]]:$k
+ error_check_good retrieve $datastr $check_data
+ incr x
+ }
+ error_check_good "Test0$tnum:ndups:$str" $x $ndups
+ incr count
+ }
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ close $did
+
+ # Now check the duplicates, then delete then recheck
+ puts "\tTest0$tnum.b: Checking and Deleting duplicates"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+ set check_c [eval {$check_db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $check_c $check_db] TRUE
+
+ for {set ndx 0} {$ndx < $ndups} {incr ndx} {
+ for {set ret [$check_c get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$check_c get -next] } {
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_bad data_check:$d [string length $d] 0
+
+ set nn [expr $ndx * 3]
+ set pref [string range $d $nn [expr $nn + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good \
+ get_both_key:$k [lindex [lindex $ret 0] 0] $k
+ error_check_good \
+ get_both_data:$k [lindex [lindex $ret 0] 1] $data
+
+ set ret [$dbc del]
+ error_check_good del $ret 0
+
+ set ret [$dbc get -get_both $k $data]
+ error_check_good get_both:$k [llength $ret] 0
+
+ set ret [$dbc get -get_both_range $k $data]
+ error_check_good get_both_range:$k [llength $ret] 0
+
+ if {$ndx != 0} {
+ set n [expr ($ndx - 1) * 3]
+ set pref [string range $d $n [expr $n + 1]]
+ set data $pref:$k
+ set ret [$dbc get -get_both $k $data]
+ error_check_good error_case:$k [llength $ret] 0
+ }
+ }
+ }
+
+ error_check_good check_c:close [$check_c close] 0
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ error_check_good check_db:close [$check_db close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test040.tcl b/storage/bdb/test/test040.tcl
new file mode 100644
index 00000000000..1856f78fc2e
--- /dev/null
+++ b/storage/bdb/test/test040.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1998-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test040.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
+#
+# TEST test040
+# TEST Test038 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test040 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test038 $method $nentries 20 40 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test038 $method [expr $nentries / 10] 100 40 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test041.tcl b/storage/bdb/test/test041.tcl
new file mode 100644
index 00000000000..fdcbdbef3d7
--- /dev/null
+++ b/storage/bdb/test/test041.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test041.tcl,v 11.6 2002/01/11 15:53:47 bostic Exp $
+#
+# TEST test041
+# TEST Test039 with off-page duplicates
+# TEST DB_GET_BOTH functionality with off-page duplicates.
+proc test041 { method {nentries 10000} args} {
+ # Test with off-page duplicates
+ eval {test039 $method $nentries 20 41 -pagesize 512} $args
+
+ # Test with multiple pages of off-page duplicates
+ eval {test039 $method [expr $nentries / 10] 100 41 -pagesize 512} $args
+}
diff --git a/storage/bdb/test/test042.tcl b/storage/bdb/test/test042.tcl
new file mode 100644
index 00000000000..9f444b8349c
--- /dev/null
+++ b/storage/bdb/test/test042.tcl
@@ -0,0 +1,181 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test042.tcl,v 11.37 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST test042
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Multiprocess DB test; verify that locking is working for the
+# TEST concurrent access method product.
+# TEST
+# TEST Use the first "nentries" words from the dictionary. Insert each with
+# TEST self as key and a fixed, medium length data string. Then fire off
+# TEST multiple processes that bang on the database. Each one should try to
+# TEST read and write random keys. When they rewrite, they'll append their
+# TEST pid to the data string (sometimes doing a rewrite sometimes doing a
+# TEST partial put). Some will use cursors to traverse through a few keys
+# TEST before finding one to write.
+
+proc test042 { method {nentries 1000} args } {
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test042 skipping for env $env"
+ return
+ }
+
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test042 skipping for security"
+ return
+ }
+ test042_body $method $nentries 0 $args
+ test042_body $method $nentries 1 $args
+}
+
+proc test042_body { method nentries alldb args } {
+ source ./include.tcl
+
+ if { $alldb } {
+ set eflag "-cdb -cdb_alldb"
+ } else {
+ set eflag "-cdb"
+ }
+ puts "Test042: CDB Test ($eflag) $method $nentries"
+
+ # Set initial parameters
+ set do_exit 0
+ set iter 10000
+ set procs 5
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -dir { incr i; set testdir [lindex $args $i] }
+ -iter { incr i; set iter [lindex $args $i] }
+ -procs { incr i; set procs [lindex $args $i] }
+ -exit { set do_exit 1 }
+ default { append oargs " " [lindex $args $i] }
+ }
+ }
+
+ # Create the database and open the dictionary
+ set testfile test042.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Env is created, now set up database
+ test042_dbinit $env $nentries $method $oargs $testfile 0
+ if { $alldb } {
+ for { set i 1 } {$i < $procs} {incr i} {
+ test042_dbinit $env $nentries $method $oargs \
+ $testfile $i
+ }
+ }
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [berkdb envremove -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env -create} $eflag -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Now spawn off processes
+ berkdb debug_check
+ puts "\tTest042.b: forking off $procs children"
+ set pidlist {}
+
+ for { set i 0 } {$i < $procs} {incr i} {
+ if { $alldb } {
+ set tf $testfile$i
+ } else {
+ set tf ${testfile}0
+ }
+ puts "exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log \
+ $method $testdir $tf $nentries $iter $i $procs &"
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ mdbscript.tcl $testdir/test042.$i.log $method \
+ $testdir $tf $nentries $iter $i $procs &]
+ lappend pidlist $p
+ }
+ puts "Test042: $procs independent processes now running"
+ watch_procs $pidlist
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test042.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Test is done, blow away lock and mpool region
+ reset_env $env
+}
+
+# If we are renumbering, then each time we delete an item, the number of
+# items in the file is temporarily decreased, so the highest record numbers
+# do not exist. To make sure this doesn't happen, we never generate the
+# highest few record numbers as keys.
+#
+# For record-based methods, record numbers begin at 1, while for other keys,
+# we begin at 0 to index into an array.
+proc rand_key { method nkeys renum procs} {
+ if { $renum == 1 } {
+ return [berkdb random_int 1 [expr $nkeys - $procs]]
+ } elseif { [is_record_based $method] == 1 } {
+ return [berkdb random_int 1 $nkeys]
+ } else {
+ return [berkdb random_int 0 [expr $nkeys - 1]]
+ }
+}
+
+proc test042_dbinit { env nentries method oargs tf ext } {
+ global datastr
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$tf$ext}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest042.a: put loop $tf$ext"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+}
diff --git a/storage/bdb/test/test043.tcl b/storage/bdb/test/test043.tcl
new file mode 100644
index 00000000000..eea7ec86d54
--- /dev/null
+++ b/storage/bdb/test/test043.tcl
@@ -0,0 +1,192 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test043.tcl,v 11.17 2002/05/22 15:42:52 sue Exp $
+#
+# TEST test043
+# TEST Recno renumbering and implicit creation test
+# TEST Test the Record number implicit creation and renumbering options.
+proc test043 { method {nentries 10000} args} {
+ source ./include.tcl
+
+ set do_renumber [is_rrecno $method]
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test043: $method ($args)"
+
+ if { [is_record_based $method] != 1 } {
+ puts "Test043 skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test043.db
+ set env NULL
+ } else {
+ set testfile test043.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database
+ set db [eval {berkdb_open -create -mode 0644} $args \
+ {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set pflags ""
+ set gflags " -recno"
+ set txn ""
+
+ # First test implicit creation and retrieval
+ set count 1
+ set interval 5
+ if { $nentries < $interval } {
+ set nentries [expr $interval + 1]
+ }
+ puts "\tTest043.a: insert keys at $interval record intervals"
+ while { $count <= $nentries } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$count [chop_data $method $count]}]
+ error_check_good "$db put $count" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ set last $count
+ incr count $interval
+ }
+
+ puts "\tTest043.b: get keys using DB_FIRST/DB_NEXT"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good "$db cursor" [is_valid_cursor $dbc $db] TRUE
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [pad_data $method [lindex [lindex $rec 0] 1]]
+ error_check_good "$dbc get key==data" [pad_data $method $k] $d
+ error_check_good "$dbc get sequential" $k $check
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ incr check $interval
+ }
+
+ # Now make sure that we get DB_KEYEMPTY for non-existent keys
+ puts "\tTest043.c: Retrieve non-existent keys"
+ global errorInfo
+
+ set check 1
+ for { set rec [$dbc get -first] } { [llength $rec] != 0 } {
+ set rec [$dbc get -next] } {
+ set k [lindex [lindex $rec 0] 0]
+
+ set ret [eval {$db get} $txn $gflags {[expr $k + 1]}]
+ error_check_good "$db \
+ get [expr $k + 1]" $ret [list]
+
+ incr check $interval
+ # Make sure we don't do a retrieve past the end of file
+ if { $check >= $last } {
+ break
+ }
+ }
+
+ # Now try deleting and make sure the right thing happens.
+ puts "\tTest043.d: Delete tests"
+ set rec [$dbc get -first]
+ error_check_bad "$dbc get -first" [llength $rec] 0
+ error_check_good "$dbc get -first key" [lindex [lindex $rec 0] 0] 1
+ error_check_good "$dbc get -first data" \
+ [lindex [lindex $rec 0] 1] [pad_data $method 1]
+
+ # Delete the first item
+ error_check_good "$dbc del" [$dbc del] 0
+
+ # Retrieving 1 should always fail
+ set ret [eval {$db get} $txn $gflags {1}]
+ error_check_good "$db get 1" $ret [list]
+
+ # Now, retrieving other keys should work; keys will vary depending
+ # upon renumbering.
+ if { $do_renumber == 1 } {
+ set count [expr 0 + $interval]
+ set max [expr $nentries - 1]
+ } else {
+ set count [expr 1 + $interval]
+ set max $nentries
+ }
+
+ while { $count <= $max } {
+ set rec [eval {$db get} $txn $gflags {$count}]
+ if { $do_renumber == 1 } {
+ set data [expr $count + 1]
+ } else {
+ set data $count
+ }
+ error_check_good "$db get $count" \
+ [pad_data $method $data] [lindex [lindex $rec 0] 1]
+ incr count $interval
+ }
+ set max [expr $count - $interval]
+
+ puts "\tTest043.e: Verify LAST/PREV functionality"
+ set count $max
+ for { set rec [$dbc get -last] } { [llength $rec] != 0 } {
+ set rec [$dbc get -prev] } {
+ set k [lindex [lindex $rec 0] 0]
+ set d [lindex [lindex $rec 0] 1]
+ if { $do_renumber == 1 } {
+ set data [expr $k + 1]
+ } else {
+ set data $k
+ }
+ error_check_good \
+ "$dbc get key==data" [pad_data $method $data] $d
+ error_check_good "$dbc get sequential" $k $count
+ if { $k > $nentries } {
+ error_check_good "$dbc get key too large" $k $nentries
+ }
+ set count [expr $count - $interval]
+ if { $count < 1 } {
+ break
+ }
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test044.tcl b/storage/bdb/test/test044.tcl
new file mode 100644
index 00000000000..67cf3ea24b8
--- /dev/null
+++ b/storage/bdb/test/test044.tcl
@@ -0,0 +1,250 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test044.tcl,v 11.32 2002/07/16 20:53:04 bostic Exp $
+#
+# TEST test044
+# TEST Small system integration tests
+# TEST Test proper functioning of the checkpoint daemon,
+# TEST recovery, transactions, etc.
+# TEST
+# TEST System integration DB test: verify that locking, recovery, checkpoint,
+# TEST and all the other utilities basically work.
+# TEST
+# TEST The test consists of $nprocs processes operating on $nfiles files. A
+# TEST transaction consists of adding the same key/data pair to some random
+# TEST number of these files. We generate a bimodal distribution in key size
+# TEST with 70% of the keys being small (1-10 characters) and the remaining
+# TEST 30% of the keys being large (uniform distribution about mean $key_avg).
+# TEST If we generate a key, we first check to make sure that the key is not
+# TEST already in the dataset. If it is, we do a lookup.
+#
+# XXX
+# This test uses grow-only files currently!
+proc test044 { method {nprocs 5} {nfiles 10} {cont 0} args } {
+ source ./include.tcl
+ global encrypt
+ global rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ berkdb srand $rand_init
+
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test044 skipping for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Test044 skipping for security"
+ return
+ }
+
+ puts "Test044: system integration test db $method $nprocs processes \
+ on $nfiles files"
+
+ # Parse options
+ set otherargs ""
+ set key_avg 10
+ set data_avg 20
+ set do_exit 0
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -key_avg { incr i; set key_avg [lindex $args $i] }
+ -data_avg { incr i; set data_avg [lindex $args $i] }
+ -testdir { incr i; set testdir [lindex $args $i] }
+ -x.* { set do_exit 1 }
+ default {
+ lappend otherargs [lindex $args $i]
+ }
+ }
+ }
+
+ if { $cont == 0 } {
+ # Create the database and open the dictionary
+ env_cleanup $testdir
+
+ # Create an environment
+ puts "\tTest044.a: creating environment and $nfiles files"
+ set dbenv [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Create a bunch of files
+ set m $method
+
+ for { set i 0 } { $i < $nfiles } { incr i } {
+ if { $method == "all" } {
+ switch [berkdb random_int 1 2] {
+ 1 { set m -btree }
+ 2 { set m -hash }
+ }
+ } else {
+ set m $omethod
+ }
+
+ set db [eval {berkdb_open -env $dbenv -create \
+ -mode 0644 $m} $otherargs {test044.$i.db}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good db_close [$db close] 0
+ }
+ }
+
+ # Close the environment
+ $dbenv close
+
+ if { $do_exit == 1 } {
+ return
+ }
+
+ # Database is created, now fork off the kids.
+ puts "\tTest044.b: forking off $nprocs processes and utilities"
+ set cycle 1
+ set ncycles 3
+ while { $cycle <= $ncycles } {
+ set dbenv [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $dbenv] TRUE
+
+ # Fire off deadlock detector and checkpointer
+ puts "Beginning cycle $cycle"
+ set ddpid [exec $util_path/db_deadlock -h $testdir -t 5 &]
+ set cppid [exec $util_path/db_checkpoint -h $testdir -p 2 &]
+ puts "Deadlock detector: $ddpid Checkpoint daemon $cppid"
+
+ set pidlist {}
+ for { set i 0 } {$i < $nprocs} {incr i} {
+ set p [exec $tclsh_path \
+ $test_path/sysscript.tcl $testdir \
+ $nfiles $key_avg $data_avg $omethod \
+ >& $testdir/test044.$i.log &]
+ lappend pidlist $p
+ }
+ set sleep [berkdb random_int 300 600]
+ puts \
+"[timestamp] $nprocs processes running $pidlist for $sleep seconds"
+ tclsleep $sleep
+
+ # Now simulate a crash
+ puts "[timestamp] Crashing"
+
+ #
+ # The environment must remain open until this point to get
+ # proper sharing (using the paging file) on Win/9X. [#2342]
+ #
+ error_check_good env_close [$dbenv close] 0
+
+ tclkill $ddpid
+ tclkill $cppid
+
+ foreach p $pidlist {
+ tclkill $p
+ }
+
+ # Check for test failure
+ set e [eval findfail [glob $testdir/test044.*.log]]
+ error_check_good "FAIL: error message(s) in log files" $e 0
+
+ # Now run recovery
+ test044_verify $testdir $nfiles
+ incr cycle
+ }
+}
+
+proc test044_usage { } {
+ puts -nonewline "test044 method nentries [-d directory] [-i iterations]"
+ puts " [-p procs] -x"
+}
+
+proc test044_verify { dir nfiles } {
+ source ./include.tcl
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save1
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save1
+# }
+# }
+
+ # Run recovery and then read through all the database files to make
+ # sure that they all look good.
+
+ puts "\tTest044.verify: Running recovery and verifying file contents"
+ set stat [catch {exec $util_path/db_recover -h $dir} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ # Save everything away in case something breaks
+# for { set f 0 } { $f < $nfiles } {incr f} {
+# file copy -force $dir/test044.$f.db $dir/test044.$f.save2
+# }
+# foreach f [glob $dir/log.*] {
+# if { [is_substr $f save] == 0 } {
+# file copy -force $f $f.save2
+# }
+# }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ set db($f) [berkdb_open $dir/test044.$f.db]
+ error_check_good $f:dbopen [is_valid_db $db($f)] TRUE
+
+ set cursors($f) [$db($f) cursor]
+ error_check_bad $f:cursor_open $cursors($f) NULL
+ error_check_good \
+ $f:cursor_open [is_substr $cursors($f) $db($f)] 1
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ for {set d [$cursors($f) get -first] } \
+ { [string length $d] != 0 } \
+ { set d [$cursors($f) get -next] } {
+
+ set k [lindex [lindex $d 0] 0]
+ set d [lindex [lindex $d 0] 1]
+
+ set flist [zero_list $nfiles]
+ set r $d
+ while { [set ndx [string first : $r]] != -1 } {
+ set fnum [string range $r 0 [expr $ndx - 1]]
+ if { [lindex $flist $fnum] == 0 } {
+ set fl "-set"
+ } else {
+ set fl "-next"
+ }
+
+ if { $fl != "-set" || $fnum != $f } {
+ if { [string compare $fl "-set"] == 0} {
+ set full [$cursors($fnum) \
+ get -set $k]
+ } else {
+ set full [$cursors($fnum) \
+ get -next]
+ }
+ set key [lindex [lindex $full 0] 0]
+ set rec [lindex [lindex $full 0] 1]
+ error_check_good \
+ $f:dbget_$fnum:key $key $k
+ error_check_good \
+ $f:dbget_$fnum:data $rec $d
+ }
+
+ set flist [lreplace $flist $fnum $fnum 1]
+ incr ndx
+ set r [string range $r $ndx end]
+ }
+ }
+ }
+
+ for { set f 0 } { $f < $nfiles } { incr f } {
+ error_check_good $cursors($f) [$cursors($f) close] 0
+ error_check_good db_close:$f [$db($f) close] 0
+ }
+}
diff --git a/storage/bdb/test/test045.tcl b/storage/bdb/test/test045.tcl
new file mode 100644
index 00000000000..3825135facd
--- /dev/null
+++ b/storage/bdb/test/test045.tcl
@@ -0,0 +1,123 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test045.tcl,v 11.24 2002/02/07 17:50:10 sue Exp $
+#
+# TEST test045
+# TEST Small random tester
+# TEST Runs a number of random add/delete/retrieve operations.
+# TEST Tests both successful conditions and error conditions.
+# TEST
+# TEST Run the random db tester on the specified access method.
+#
+# Options are:
+# -adds <maximum number of keys before you disable adds>
+# -cursors <number of cursors>
+# -dataavg <average data size>
+# -delete <minimum number of keys before you disable deletes>
+# -dups <allow duplicates in file>
+# -errpct <Induce errors errpct of the time>
+# -init <initial number of entries in database>
+# -keyavg <average key size>
+proc test045 { method {nops 10000} args } {
+ source ./include.tcl
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test045 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test045 skipping for security"
+ return
+ }
+ set omethod [convert_method $method]
+
+ puts "Test045: Random tester on $method for $nops operations"
+
+ # Set initial parameters
+ set adds [expr $nops * 10]
+ set cursors 5
+ set dataavg 40
+ set delete $nops
+ set dups 0
+ set errpct 0
+ set init 0
+ if { [is_record_based $method] == 1 } {
+ set keyavg 10
+ } else {
+ set keyavg 25
+ }
+
+ # Process arguments
+ set oargs ""
+ for { set i 0 } { $i < [llength $args] } {incr i} {
+ switch -regexp -- [lindex $args $i] {
+ -adds { incr i; set adds [lindex $args $i] }
+ -cursors { incr i; set cursors [lindex $args $i] }
+ -dataavg { incr i; set dataavg [lindex $args $i] }
+ -delete { incr i; set delete [lindex $args $i] }
+ -dups { incr i; set dups [lindex $args $i] }
+ -errpct { incr i; set errpct [lindex $args $i] }
+ -init { incr i; set init [lindex $args $i] }
+ -keyavg { incr i; set keyavg [lindex $args $i] }
+ -extent { incr i;
+ lappend oargs "-extent" "100" }
+ default { lappend oargs [lindex $args $i] }
+ }
+ }
+
+ # Create the database and and initialize it.
+ set root $testdir/test045
+ set f $root.db
+ env_cleanup $testdir
+
+ # Run the script with 3 times the number of initial elements to
+ # set it up.
+ set db [eval {berkdb_open \
+ -create -mode 0644 $omethod} $oargs {$f}]
+ error_check_good dbopen:$f [is_valid_db $db] TRUE
+
+ set r [$db close]
+ error_check_good dbclose:$f $r 0
+
+ # We redirect standard out, but leave standard error here so we
+ # can see errors.
+
+ puts "\tTest045.a: Initializing database"
+ if { $init != 0 } {
+ set n [expr 3 * $init]
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f $n \
+ 1 $init $n $keyavg $dataavg $dups 0 -1 \
+ > $testdir/test045.init
+ }
+ # Check for test failure
+ set e [findfail $testdir/test045.init]
+ error_check_good "FAIL: error message(s) in init file" $e 0
+
+ puts "\tTest045.b: Now firing off berkdb rand dbscript, running: "
+ # Now the database is initialized, run a test
+ puts "$tclsh_path\
+ $test_path/dbscript.tcl $method $f $nops $cursors $delete $adds \
+ $keyavg $dataavg $dups $errpct > $testdir/test045.log"
+
+ exec $tclsh_path \
+ $test_path/dbscript.tcl $method $f \
+ $nops $cursors $delete $adds $keyavg \
+ $dataavg $dups $errpct \
+ > $testdir/test045.log
+
+ # Check for test failure
+ set e [findfail $testdir/test045.log]
+ error_check_good "FAIL: error message(s) in log file" $e 0
+
+}
diff --git a/storage/bdb/test/test046.tcl b/storage/bdb/test/test046.tcl
new file mode 100644
index 00000000000..4136f30aaa7
--- /dev/null
+++ b/storage/bdb/test/test046.tcl
@@ -0,0 +1,813 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test046.tcl,v 11.33 2002/05/24 15:24:55 sue Exp $
+#
+# TEST test046
+# TEST Overwrite test of small/big key/data with cursor checks.
+proc test046 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest046: Overwrite test with cursor and small/big key/data."
+ puts "\tTest046:\t$method $args"
+
+ if { [is_rrecno $method] == 1} {
+ puts "\tTest046: skipping for method $method."
+ return
+ }
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ if { [is_record_based $method] == 1} {
+ set key ""
+ }
+
+ puts "\tTest046: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test046.db
+ set env NULL
+ } else {
+ set testfile test046.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_record_based $method] == 1} {
+ set ret [eval {$db put} $txn {$i $data$i}]
+ } elseif { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]00$i \
+ [set data]00$i]
+ } elseif { $i < 100 } {
+ set ret [eval {$db put} $txn [set key]0$i \
+ [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest046.a: Deletes by key."
+ puts "\t\tTest046.a.1: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr $ret
+
+ # delete before cursor(n-1), make sure it is gone
+ set i [expr $i - 1]
+ error_check_good db_del [eval {$db del} $txn {$key_set($i)}] 0
+
+ # use set_range to get first key starting at n-1, should
+ # give us nth--but only works for btree
+ if { [is_btree $method] == 1 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([incr i])]
+ incr i -1
+ }
+ error_check_bad dbc_get:set(R)(post-delete) [llength $ret] 0
+ error_check_good dbc_get(match):set $ret $curr
+
+ puts "\t\tTest046.a.2: Delete cursor item by key."
+ # nth key, which cursor should be on now
+ set i [incr i]
+ set ret [eval {$db del} $txn {$key_set($i)}]
+ error_check_good db_del $ret 0
+
+ # this should return n+1 key/data, curr has nth key/data
+ if { [string compare $omethod "-btree"] == 0 } {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i+1])]
+ }
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\t\tTest046.a.3: Delete item after cursor."
+ # we'll delete n+2, since we have deleted n-1 and n
+ # i still equal to nth, cursor on n+1
+ set i [incr i]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set curr [$dbc get -next]
+ error_check_bad dbc_get:next [llength $curr] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $curr] 0
+ # delete *after* cursor pos.
+ error_check_good db:del [eval {$db del} $txn {$key_set([incr i])}] 0
+
+ # make sure item is gone, try to get it
+ if { [string compare $omethod "-btree"] == 0} {
+ set ret [$dbc get -set_range $key_set($i)]
+ } else {
+ if { [is_record_based $method] == 1 } {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good \
+ dbc_get:deleted(recno) [llength [lindex $ret 1]] 0
+ #error_check_good \
+ # catch:get [catch {$dbc get -set $key_set($i)} ret] 1
+ #error_check_good \
+ # dbc_get:deleted(recno) [is_substr $ret "KEYEMPTY"] 1
+ } else {
+ set ret [$dbc get -set $key_set($i)]
+ error_check_good dbc_get:deleted [llength $ret] 0
+ }
+ set ret [$dbc get -set $key_set([expr $i +1])]
+ }
+ error_check_bad dbc_get:set(_range) [llength $ret] 0
+ error_check_bad dbc_get:set(_range) $ret $curr
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 0] \
+ $key_set([expr $i+1])
+
+ puts "\tTest046.b: Deletes by cursor."
+ puts "\t\tTest046.b.1: Delete, do DB_NEXT."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [expr $i+2]
+ # i = n+4
+ error_check_good dbc_get:next(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.2: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [expr $i-3]
+ # i = n+1 (deleted all in between)
+ error_check_good dbc_get:prev(match) \
+ [lindex [lindex $ret 0] 0] $key_set($i)
+
+ puts "\t\tTest046.b.3: Delete, do DB_CURRENT."
+ error_check_good dbc:del [$dbc del] 0
+ # we just deleted, so current item should be KEYEMPTY, throws err
+ set ret [$dbc get -current]
+ error_check_good dbc_get:curr:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:get:current [catch {$dbc get -current} ret] 1
+ #error_check_good dbc_get:curr:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+
+ puts "\tTest046.c: Inserts (before/after), by key then cursor."
+ puts "\t\tTest046.c.1: Insert by key before the cursor."
+ # i is at curs pos, i=n+1, we want to go BEFORE
+ set i [incr i -1]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:before $ret 0
+
+ puts "\t\tTest046.c.2: Insert by key after the cursor."
+ set i [incr i +2]
+ set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ error_check_good db_put:after $ret 0
+
+ puts "\t\tTest046.c.3: Insert by curs with deleted curs (should fail)."
+ # cursor is on n+1, we'll change i to match
+ set i [incr i -1]
+
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+ if { [is_record_based $method] == 1} {
+ puts "\t\tSkipping the rest of test for method $method."
+ puts "\tTest046 ($method) complete."
+ return
+ } else {
+ # Reopen without printing __db_errs.
+ set db [eval {berkdb_open_noerr} $oflags $testfile.a]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor [is_valid_cursor $dbc $db] TRUE
+
+ # should fail with EINVAL (deleted cursor)
+ set errorCode NONE
+ error_check_good catch:put:before 1 \
+ [catch {$dbc put -before $data_set($i)} ret]
+ error_check_good dbc_put:deleted:before \
+ [is_substr $errorCode "EINVAL"] 1
+
+ # should fail with EINVAL
+ set errorCode NONE
+ error_check_good catch:put:after 1 \
+ [catch {$dbc put -after $data_set($i)} ret]
+ error_check_good dbc_put:deleted:after \
+ [is_substr $errorCode "EINVAL"] 1
+
+ puts "\t\tTest046.c.4:\
+ Insert by cursor before/after existent cursor."
+ # can't use before after w/o dup except renumber in recno
+ # first, restore an item so they don't fail
+ #set ret [eval {$db put} $txn {$key_set($i) $data_set($i)}]
+ #error_check_good db_put $ret 0
+
+ #set ret [$dbc get -set $key_set($i)]
+ #error_check_bad dbc_get:set [llength $ret] 0
+ #set i [incr i -2]
+ # i = n - 1
+ #set ret [$dbc get -prev]
+ #set ret [$dbc put -before $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:before $ret 0
+ # cursor pos is adjusted to match prev, recently inserted
+ #incr i
+ # i = n
+ #set ret [$dbc put -after $key_set($i) $data_set($i)]
+ #error_check_good dbc_put:after $ret 0
+ }
+
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ puts "\tTest046.d.0: Cleanup, close db, open new db with no dups."
+ error_check_good dbc:close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db:close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile.d]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.d.0: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set nkeys 20
+
+ # Prepare cursor on item
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of key/data
+ foreach ptype {init over} {
+ foreach size {big small} {
+ if { [string compare $size big] == 0 } {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 250]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 250]
+ } else {
+ set key_$ptype$size \
+ KEY_$size[repeat alphabet 10]
+ set data_$ptype$size \
+ DATA_$size[repeat alphabet 10]
+ }
+ }
+ }
+
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type {key_over curs_over} {
+ # Overwrite (i=initial) four different kinds of pairs
+ incr i
+ puts "\tTest046.d: Overwrites $type."
+ foreach i_pair {\
+ {small small} {big small} {small big} {big big} } {
+ # Overwrite (w=write) with four different kinds of data
+ foreach w_pair {\
+ {small small} {big small} {small big} {big big} } {
+
+ # we can only overwrite if key size matches
+ if { [string compare [lindex \
+ $i_pair 0] [lindex $w_pair 0]] != 0} {
+ continue
+ }
+
+ # first write the initial key/data
+ set ret [$dbc put -keyfirst \
+ key_init[lindex $i_pair 0] \
+ data_init[lindex $i_pair 1]]
+ error_check_good \
+ dbc_put:curr:init:$i_pair $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ data_init[lindex $i_pair 1]
+
+ # Now, try to overwrite: dups not supported in
+ # this db
+ if { [string compare $type key_over] == 0 } {
+ puts "\t\tTest046.d.$i: Key\
+ Overwrite:($i_pair) by ($w_pair)."
+ set ret [eval {$db put} $txn \
+ $"key_init[lindex $i_pair 0]" \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [eval {$db get} $txn \
+ $"key_init[lindex $i_pair 0]"]
+ error_check_bad \
+ db:get:check [llength $ret] 0
+ error_check_good db:get:compare_data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ } else {
+ # This is a cursor overwrite
+ puts \
+ "\t\tTest046.d.$i:Curs Overwrite:($i_pair) by ($w_pair)."
+ set ret [$dbc put -current \
+ $"data_over[lindex $w_pair 1]"]
+ error_check_good \
+ dbcput:over:i($i_pair):o($w_pair) $ret 0
+ # check value
+ set ret [$dbc get -current]
+ error_check_bad \
+ dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] \
+ $"data_over[lindex $w_pair 1]"
+ }
+ } ;# foreach write pair
+ } ;# foreach initial pair
+ } ;# foreach type big/small
+
+ puts "\tTest046.d.3: Cleanup for next part of test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\tSkipping the rest of Test046 for method $method."
+ puts "\tTest046 complete."
+ return
+ }
+
+ puts "\tTest046.e.1: Open db with sorted dups."
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+ set ndups 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest046.e.2:\
+ Put $nkeys small key/data pairs and $ndups sorted dups."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn [set key]0$i [set data]0$i]
+ } else {
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ }
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put 20 sorted duplicates on key in middle of page
+ set i [expr $nkeys/2]
+ set ret [$dbc get -set $key_set($i)]
+ error_check_bad dbc_get:set [llength $ret] 0
+
+ set keym $key_set($i)
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_0$i}]
+ } else {
+ set ret [eval {$db put} $txn {$keym DUPLICATE_$i}]
+ }
+ error_check_good db_put:DUP($i) $ret 0
+ }
+
+ puts "\tTest046.e.3: Check duplicate duplicates"
+ set ret [eval {$db put} $txn {$keym DUPLICATE_00}]
+ error_check_good dbput:dupdup [is_substr $ret "DB_KEYEXIST"] 1
+
+ # get dup ordering
+ for {set i 0; set ret [$dbc get -set $keym]} { [llength $ret] != 0} {\
+ set ret [$dbc get -nextdup] } {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # put cursor on item in middle of dups
+ set i [expr $ndups/2]
+ set ret [$dbc get -get_both $keym $dup_set($i)]
+ error_check_bad dbc_get:get_both [llength $ret] 0
+
+ puts "\tTest046.f: Deletes by cursor."
+ puts "\t\tTest046.f.1: Delete by cursor, do a DB_NEXT, check cursor."
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:nextdup [lindex [lindex $ret 0] 1] $dup_set([incr i])
+
+ puts "\t\tTest046.f.2: Delete by cursor, do DB_PREV, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ set i [incr i -2]
+ error_check_good dbc_get:prev [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.f.3: Delete by cursor, do DB_CURRENT, check cursor."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current:deleted [llength [lindex $ret 1]] 0
+ #error_check_good catch:dbc_get:curr [catch {$dbc get -current} ret] 1
+ #error_check_good \
+ # dbc_get:current:deleted [is_substr $ret "DB_KEYEMPTY"] 1
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # restore deleted keys
+ error_check_good db_put:1 [eval {$db put} $txn {$keym $dup_set($i)}] 0
+ error_check_good db_put:2 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ error_check_good db_put:3 [eval {$db put} $txn \
+ {$keym $dup_set([incr i])}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # tested above
+
+ # Reopen database without __db_err, reset cursor
+ error_check_good dbclose [$db close] 0
+ set db [eval {berkdb_open_noerr} $oflags -dup -dupsort $testfile.e]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret2 [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret2] 0
+ # match
+ error_check_good dbc_get:current/set(match) $ret $ret2
+ # right one?
+ error_check_good \
+ dbc_get:curr/set(matchdup) [lindex [lindex $ret 0] 1] $dup_set(0)
+
+ # cursor is on first dup
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # now on second dup
+ error_check_good dbc_get:next [lindex [lindex $ret 0] 1] $dup_set(1)
+ # check cursor
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbcget:curr(compare) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\tTest046.g: Inserts."
+ puts "\t\tTest046.g.1: Insert by key before cursor."
+ set i 0
+
+ # use "spam" to prevent a duplicate duplicate.
+ set ret [eval {$db put} $txn {$keym $dup_set($i)spam}]
+ error_check_good db_put:before $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:current(post-put) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.2: Insert by key after cursor."
+ set i [expr $i + 2]
+ # use "eggs" to prevent a duplicate duplicate
+ set ret [eval {$db put} $txn {$keym $dup_set($i)eggs}]
+ error_check_good db_put:after $ret 0
+ # make sure cursor was maintained
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr(post-put,after) [lindex [lindex $ret 0] 1] $dup_set(1)
+
+ puts "\t\tTest046.g.3: Insert by curs before/after curs (should fail)."
+ # should return EINVAL (dupsort specified)
+ error_check_good dbc_put:before:catch \
+ [catch {$dbc put -before $dup_set([expr $i -1])} ret] 1
+ error_check_good \
+ dbc_put:before:deleted [is_substr $errorCode "EINVAL"] 1
+ error_check_good dbc_put:after:catch \
+ [catch {$dbc put -after $dup_set([expr $i +2])} ret] 1
+ error_check_good \
+ dbc_put:after:deleted [is_substr $errorCode "EINVAL"] 1
+
+ puts "\tTest046.h: Cursor overwrites."
+ puts "\t\tTest046.h.1: Test that dupsort disallows current overwrite."
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ catch:dbc_put:curr [catch {$dbc put -current DATA_OVERWRITE} ret] 1
+ error_check_good dbc_put:curr:dupsort [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest046.h.2: New db (no dupsort)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} \
+ $oflags -dup $testfile.h]
+ error_check_good db_open [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ for {set i 0} {$i < $nkeys} {incr i} {
+ if { $i < 10 } {
+ set ret [eval {$db put} $txn {key0$i datum0$i}]
+ error_check_good db_put $ret 0
+ } else {
+ set ret [eval {$db put} $txn {key$i datum$i}]
+ error_check_good db_put $ret 0
+ }
+ if { $i == 0 } {
+ for {set j 0} {$j < $ndups} {incr j} {
+ if { $i < 10 } {
+ set keyput key0$i
+ } else {
+ set keyput key$i
+ }
+ if { $j < 10 } {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum0$j}]
+ } else {
+ set ret [eval {$db put} $txn \
+ {$keyput DUP_datum$j}]
+ }
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ }
+
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ for {set i 0; set ret [$dbc get -set key00]} {\
+ [llength $ret] != 0} {set ret [$dbc get -nextdup]} {
+ set dup_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+ set i 0
+ set keym key0$i
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good \
+ dbc_get:set(match) [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ set ret [$dbc get -nextdup]
+ error_check_bad dbc_get:nextdup [llength $ret] 0
+ error_check_good dbc_get:nextdup(match) \
+ [lindex [lindex $ret 0] 1] $dup_set([expr $i + 1])
+
+ puts "\t\tTest046.h.3: Insert by cursor before cursor (DB_BEFORE)."
+ set ret [$dbc put -before BEFOREPUT]
+ error_check_good dbc_put:before $ret 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good \
+ dbc_get:curr:match [lindex [lindex $ret 0] 1] BEFOREPUT
+ # make sure that this is actually a dup w/ dup before
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good dbc_get:prev:match \
+ [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -prev]
+ # should not be a dup
+ error_check_bad dbc_get:prev(no_dup) \
+ [lindex [lindex $ret 0] 0] $keym
+
+ puts "\t\tTest046.h.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -set $keym]
+
+ # delete next 3 when fix
+ #puts "[$dbc get -current]\
+ # [$dbc get -next] [$dbc get -next] [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set $keym]
+
+ error_check_bad dbc_get:set [llength $ret] 0
+ set ret [$dbc put -after AFTERPUT]
+ error_check_good dbc_put:after $ret 0
+ #puts [$dbc get -current]
+
+ # delete next 3 when fix
+ #set ret [$dbc get -set $keym]
+ #puts "[$dbc get -current] next: [$dbc get -next] [$dbc get -next]"
+ #set ret [$dbc get -set AFTERPUT]
+ #set ret [$dbc get -set $keym]
+ #set ret [$dbc get -next]
+ #puts $ret
+
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_get:curr:match [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -prev]
+ # now should be on first item (non-dup) of keym
+ error_check_bad dbc_get:prev1 [llength $ret] 0
+ error_check_good \
+ dbc_get:match [lindex [lindex $ret 0] 1] $dup_set($i)
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good \
+ dbc_get:match2 [lindex [lindex $ret 0] 1] AFTERPUT
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ # this is the dup we added previously
+ error_check_good \
+ dbc_get:match3 [lindex [lindex $ret 0] 1] BEFOREPUT
+
+ # now get rid of the dups we added
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev2 [llength $ret] 0
+ error_check_good dbc_del2 [$dbc del] 0
+ # put cursor on first dup item for the rest of test
+ set ret [$dbc get -set $keym]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good \
+ dbc_get:first:check [lindex [lindex $ret 0] 1] $dup_set($i)
+
+ puts "\t\tTest046.h.5: Overwrite small by small."
+ set ret [$dbc put -current DATA_OVERWRITE]
+ error_check_good dbc_put:current:overwrite $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/small) \
+ [lindex [lindex $ret 0] 1] DATA_OVERWRITE
+
+ puts "\t\tTest046.h.6: Overwrite small with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite:big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,small/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE] 1
+
+ puts "\t\tTest046.h.7: Overwrite big with big."
+ set ret [$dbc put -current DATA_BIG_OVERWRITE2[repeat $alphabet 200]]
+ error_check_good dbc_put:current:overwrite(2):big $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/big) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_BIG_OVERWRITE2] 1
+
+ puts "\t\tTest046.h.8: Overwrite big with small."
+ set ret [$dbc put -current DATA_OVERWRITE2]
+ error_check_good dbc_put:current:overwrite:small $ret 0
+ set ret [$dbc get -current]
+ error_check_good dbc_get:current(put,big/small) \
+ [is_substr [lindex [lindex $ret 0] 1] DATA_OVERWRITE2] 1
+
+ puts "\tTest046.i: Cleaning up from test."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest046 complete."
+}
diff --git a/storage/bdb/test/test047.tcl b/storage/bdb/test/test047.tcl
new file mode 100644
index 00000000000..61c1d0864c5
--- /dev/null
+++ b/storage/bdb/test/test047.tcl
@@ -0,0 +1,258 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test047.tcl,v 11.19 2002/08/05 19:23:51 sandstro Exp $
+#
+# TEST test047
+# TEST DBcursor->c_get get test with SET_RANGE option.
+proc test047 { method args } {
+ source ./include.tcl
+
+ set tstn 047
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of SET_RANGE interface to DB->c_get ($method)."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set eindex [lsearch -exact $args "-env"]
+ set txnenv 0
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set testfile1 $testdir/test0$tstn.a.db
+ set testfile2 $testdir/test0$tstn.b.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ set testfile1 test0$tstn.a.db
+ set testfile2 test0$tstn.b.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 -dup $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 20
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest$tstn.c: Get data with SET_RANGE, then delete by cursor."
+ set i 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ set curr $ret
+
+ # delete by cursor, make sure it is gone
+ error_check_good dbc_del [$dbc del] 0
+
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get(post-delete):set_range [llength $ret] 0
+ error_check_bad dbc_get(no-match):set_range $ret $curr
+
+ puts "\tTest$tstn.d: \
+ Use another cursor to fix item on page, delete by db."
+ set dbcurs2 [eval {$db cursor} $txn]
+ error_check_good db:cursor2 [is_valid_cursor $dbcurs2 $db] TRUE
+
+ set ret [$dbcurs2 get -set [lindex [lindex $ret 0] 0]]
+ error_check_bad dbc_get(2):set [llength $ret] 0
+ set curr $ret
+ error_check_good db:del [eval {$db del} $txn \
+ {[lindex [lindex $ret 0] 0]}] 0
+
+ # make sure item is gone
+ set ret [$dbcurs2 get -set_range [lindex [lindex $curr 0] 0]]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ error_check_bad dbc2_get:set_range $ret $curr
+
+ puts "\tTest$tstn.e: Close for second part of test, close db/cursors."
+ error_check_good dbc:close [$dbc close] 0
+ error_check_good dbc2:close [$dbcurs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ # open db
+ set db [eval {berkdb_open} $oflags $testfile1]
+ error_check_good dbopen2 [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tstn.f: Fill page with $nkeys pairs, one set of dups."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set j 0
+ for {set i 0} { $i < $nkeys } {incr i} {
+ # a dup set for same 1 key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i DUP_$data$i}]
+ error_check_good dbput($i):dup $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest$tstn.g: \
+ Get dups key w/ SET_RANGE, pin onpage with another cursor."
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+
+ error_check_good dbc_compare $ret $ret2
+ puts "\tTest$tstn.h: \
+ Delete duplicates' key, use SET_RANGE to get next dup."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $ret2
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $oflags $testfile2]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ set ndups 1000
+
+ puts "\tTest$tstn.i: Fill page with $nkeys pairs and $ndups dups."
+ for {set i 0} { $i < $nkeys } { incr i} {
+ # a pair
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput $ret 0
+
+ # dups for single pair
+ if { $i == 0} {
+ for {set j 0} { $j < $ndups } { incr j } {
+ set ret [eval {$db put} $txn \
+ {$key$i DUP_$data$i:$j}]
+ error_check_good dbput:dup $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set i 0
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ set dbc2 [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc2 $db] TRUE
+ puts "\tTest$tstn.j: \
+ Get key of first dup with SET_RANGE, fix with 2 curs."
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+
+ set ret2 [$dbc2 get -set_range $key$i]
+ error_check_bad dbc2_get:set_range [llength $ret] 0
+ set curr $ret2
+
+ error_check_good dbc_compare $ret $ret2
+
+ puts "\tTest$tstn.k: Delete item by cursor, use SET_RANGE to verify."
+ set ret [$dbc2 del]
+ error_check_good dbc2_del $ret 0
+ set ret [$dbc get -set_range $key$i]
+ error_check_bad dbc_get:set_range [llength $ret] 0
+ error_check_bad dbc_get:set_range $ret $curr
+
+ puts "\tTest$tstn.l: Cleanup."
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc2_close [$dbc2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test048.tcl b/storage/bdb/test/test048.tcl
new file mode 100644
index 00000000000..2131f6f553c
--- /dev/null
+++ b/storage/bdb/test/test048.tcl
@@ -0,0 +1,170 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test048.tcl,v 11.18 2002/07/29 20:27:49 sandstro Exp $
+#
+# TEST test048
+# TEST Cursor stability across Btree splits.
+proc test048 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 048
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ incr pgindex
+ if { [lindex $args $pgindex] > 8192 } {
+ puts "Test048: Skipping for large pagesizes"
+ return
+ }
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for {set i 0; set ret [$db get key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i \
+ [is_valid_cursor $dbc_set($i) $db] TRUE
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [eval {$db put} $txn {key0$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {key00$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {key000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure split happened."
+ # XXX We cannot call stat with active txns or we deadlock.
+ if { $txnenv != 1 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key0$i}] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key00$i}] 0
+ } else {
+ error_check_good db_del:$i \
+ [eval {$db del} $txn {key000$i}] 0
+ }
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ puts "\tTest$tstn.j: Verify reverse split."
+ error_check_good stat:check-reverse_split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test049.tcl b/storage/bdb/test/test049.tcl
new file mode 100644
index 00000000000..3040727c469
--- /dev/null
+++ b/storage/bdb/test/test049.tcl
@@ -0,0 +1,184 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test049.tcl,v 11.21 2002/05/22 15:42:53 sue Exp $
+#
+# TEST test049
+# TEST Cursor operations on uninitialized cursors.
+proc test049 { method args } {
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 049
+ set renum [is_rrecno $method]
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest$tstn: Test of cursor routines with uninitialized cursors."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+ set rflags ""
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ }
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $rflags $omethod $args"
+ if { [is_record_based $method] == 0 && [is_rbtree $method] != 1 } {
+ append oflags " -dup"
+ }
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 10
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i $data$i}]
+ error_check_good dbput:$i $ret 0
+ if { $i == 1 } {
+ for {set j 0} { $j < [expr $nkeys / 2]} {incr j} {
+ set ret [eval {$db put} $txn \
+ {$key$i DUPLICATE$j}]
+ error_check_good dbput:dup:$j $ret 0
+ }
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # DBC GET
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc_u $db] TRUE
+
+ puts "\tTest$tstn.c: Test dbc->get interfaces..."
+ set i 0
+ foreach flag { current first last next prev nextdup} {
+ puts "\t\t...dbc->get($flag)"
+ catch {$dbc_u get -$flag} ret
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ foreach flag { set set_range get_both} {
+ puts "\t\t...dbc->get($flag)"
+ if { [string compare $flag get_both] == 0} {
+ catch {$dbc_u get -$flag $key$i data0} ret
+ } else {
+ catch {$dbc_u get -$flag $key$i} ret
+ }
+ error_check_good dbc:get:$flag [is_substr $errorCode EINVAL] 1
+ }
+
+ puts "\t\t...dbc->get(current, partial)"
+ catch {$dbc_u get -current -partial {0 0}} ret
+ error_check_good dbc:get:partial [is_substr $errorCode EINVAL] 1
+
+ puts "\t\t...dbc->get(current, rmw)"
+ catch {$dbc_u get -rmw -current } ret
+ error_check_good dbc_get:rmw [is_substr $errorCode EINVAL] 1
+
+ puts "\tTest$tstn.d: Test dbc->put interface..."
+ # partial...depends on another
+ foreach flag { after before current keyfirst keylast } {
+ puts "\t\t...dbc->put($flag)"
+ if { [string match key* $flag] == 1 } {
+ if { [is_record_based $method] == 1 } {
+ # keyfirst/keylast not allowed in recno
+ puts "\t\t...Skipping dbc->put($flag) for $method."
+ continue
+ } else {
+ # keyfirst/last should succeed
+ puts "\t\t...dbc->put($flag)...should succeed for $method"
+ error_check_good dbcput:$flag \
+ [$dbc_u put -$flag $key$i data0] 0
+
+ # now uninitialize cursor
+ error_check_good dbc_close [$dbc_u close] 0
+ set dbc_u [eval {$db cursor} $txn]
+ error_check_good \
+ db_cursor [is_substr $dbc_u $db] 1
+ }
+ } elseif { [string compare $flag before ] == 0 ||
+ [string compare $flag after ] == 0 } {
+ if { [is_record_based $method] == 0 &&
+ [is_rbtree $method] == 0} {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ } elseif { $renum == 1 } {
+ # Renumbering recno will return a record number
+ set currecno \
+ [lindex [lindex [$dbc_u get -current] 0] 0]
+ set ret [$dbc_u put -$flag data0]
+ if { [string compare $flag after] == 0 } {
+ error_check_good "$dbc_u put $flag" \
+ $ret [expr $currecno + 1]
+ } else {
+ error_check_good "$dbc_u put $flag" \
+ $ret $currecno
+ }
+ } else {
+ puts "\t\tSkipping $flag for $method"
+ }
+ } else {
+ set ret [$dbc_u put -$flag data0]
+ error_check_good "$dbc_u:put:-$flag" $ret 0
+ }
+ }
+ # and partial
+ puts "\t\t...dbc->put(partial)"
+ catch {$dbc_u put -partial {0 0} $key$i $data$i} ret
+ error_check_good dbc_put:partial [is_substr $errorCode EINVAL] 1
+
+ # XXX dbc->dup, db->join (dbc->get join_item)
+ # dbc del
+ puts "\tTest$tstn.e: Test dbc->del interface."
+ catch {$dbc_u del} ret
+ error_check_good dbc_del [is_substr $errorCode EINVAL] 1
+
+ error_check_good dbc_close [$dbc_u close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test050.tcl b/storage/bdb/test/test050.tcl
new file mode 100644
index 00000000000..dfaeddd035c
--- /dev/null
+++ b/storage/bdb/test/test050.tcl
@@ -0,0 +1,221 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test050.tcl,v 11.21 2002/05/24 14:15:13 bostic Exp $
+#
+# TEST test050
+# TEST Overwrite test of small/big key/data with cursor checks for Recno.
+proc test050 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set tstn 050
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_rrecno $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ puts "\tTest$tstn:\
+ Overwrite test with cursor and small/big key/data ($method)."
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tstn.db
+ set env NULL
+ } else {
+ set testfile test0$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open_noerr} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ #
+ puts "\tTest$tstn: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i [chop_data $method $data$i]}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # open curs to db
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 0; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ # verify ordering: should be unnecessary, but hey, why take chances?
+ # key_set is zero indexed but keys start at 1
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good \
+ verify_order:$i $key_set($i) [pad_data $method [expr $i+1]]
+ }
+
+ puts "\tTest$tstn.a: Inserts before/after by cursor."
+ puts "\t\tTest$tstn.a.1:\
+ Insert with uninitialized cursor (should fail)."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+ catch {$dbc put -before DATA1} ret
+ error_check_good dbc_put:before:uninit [is_substr $errorCode EINVAL] 1
+
+ catch {$dbc put -after DATA2} ret
+ error_check_good dbc_put:after:uninit [is_substr $errorCode EINVAL] 1
+
+ puts "\t\tTest$tstn.a.2: Insert with deleted cursor (should succeed)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc put -current DATAOVER1]
+ error_check_good dbc_put:current:deleted $ret 0
+
+ puts "\t\tTest$tstn.a.3: Insert by cursor before cursor (DB_BEFORE)."
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -before DATAPUTBEFORE]
+ error_check_good dbc_put:before $ret $currecno
+ set old1 [$dbc get -next]
+ error_check_bad dbc_get:next [llength $old1] 0
+ error_check_good \
+ dbc_get:next(compare) [lindex [lindex $old1 0] 1] DATAOVER1
+
+ puts "\t\tTest$tstn.a.4: Insert by cursor after cursor (DB_AFTER)."
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+ set currecno [lindex [lindex [$dbc get -current] 0] 0]
+ set ret [$dbc put -after DATAPUTAFTER]
+ error_check_good dbc_put:after $ret [expr $currecno + 1]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+ error_check_good \
+ dbc_get:prev [lindex [lindex $ret 0] 1] DATAPUTBEFORE
+
+ puts "\t\tTest$tstn.a.5: Verify that all keys have been renumbered."
+ # should be $nkeys + 2 keys, starting at 1
+ for {set i 1; set ret [$dbc get -first]} { \
+ $i <= $nkeys && [llength $ret] != 0 } {\
+ incr i; set ret [$dbc get -next]} {
+ error_check_good check_renumber $i [lindex [lindex $ret 0] 0]
+ }
+
+ # tested above
+
+ puts "\tTest$tstn.b: Overwrite tests (cursor and key)."
+ # For the next part of the test, we need a db with no dups to test
+ # overwrites
+ #
+ # we should have ($nkeys + 2) keys, ordered:
+ # DATAPUTBEFORE, DATAPUTAFTER, DATAOVER1, data1, ..., data$nkeys
+ #
+ # Prepare cursor on item
+ #
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+
+ # Prepare unique big/small values for an initial
+ # and an overwrite set of data
+ set databig DATA_BIG_[repeat alphabet 250]
+ set datasmall DATA_SMALL
+
+ # Now, we want to overwrite data:
+ # by key and by cursor
+ # 1. small by small
+ # 2. small by big
+ # 3. big by small
+ # 4. big by big
+ #
+ set i 0
+ # Do all overwrites for key and cursor
+ foreach type { by_key by_cursor } {
+ incr i
+ puts "\tTest$tstn.b.$i: Overwrites $type."
+ foreach pair { {small small} \
+ {small big} {big small} {big big} } {
+ # put in initial type
+ set data $data[lindex $pair 0]
+ set ret [$dbc put -current $data]
+ error_check_good dbc_put:curr:init:($pair) $ret 0
+
+ # Now, try to overwrite: dups not supported in this db
+ if { [string compare $type by_key] == 0 } {
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair):$type"
+ set ret [eval {$db put} $txn \
+ 1 {OVER$pair$data[lindex $pair 1]}]
+ error_check_good dbput:over:($pair) $ret 0
+ } else {
+ # This is a cursor overwrite
+ puts "\t\tTest$tstn.b.$i:\
+ Overwrite:($pair) by cursor."
+ set ret [$dbc put \
+ -current OVER$pair$data[lindex $pair 1]]
+ error_check_good dbcput:over:($pair) $ret 0
+ }
+ } ;# foreach pair
+ } ;# foreach type key/cursor
+
+ puts "\tTest$tstn.c: Cleanup and close cursor."
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+}
diff --git a/storage/bdb/test/test051.tcl b/storage/bdb/test/test051.tcl
new file mode 100644
index 00000000000..830b7630788
--- /dev/null
+++ b/storage/bdb/test/test051.tcl
@@ -0,0 +1,219 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test051.tcl,v 11.21 2002/05/24 13:43:24 sue Exp $
+#
+# TEST test051
+# TEST Fixed-length record Recno test.
+# TEST 0. Test various flags (legal and illegal) to open
+# TEST 1. Test partial puts where dlen != size (should fail)
+# TEST 2. Partial puts for existent record -- replaces at beg, mid, and
+# TEST end of record, as well as full replace
+proc test051 { method { args "" } } {
+ global fixed_len
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test051: Test of the fixed length records."
+ if { [is_fixed_length $method] != 1 } {
+ puts "Test051: skipping for method $method"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test051.db
+ set testfile1 $testdir/test051a.db
+ set env NULL
+ } else {
+ set testfile test051.db
+ set testfile1 test051a.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set oflags "-create -mode 0644 $args"
+
+ # Test various flags (legal and illegal) to open
+ puts "\tTest051.a: Test correct flag behavior on open."
+ set errorCode NONE
+ foreach f { "-dup" "-dup -dupsort" "-recnum" } {
+ puts "\t\tTest051.a: Test flag $f"
+ set stat [catch {eval {berkdb_open_noerr} $oflags $f $omethod \
+ $testfile} ret]
+ error_check_good dbopen:flagtest:catch $stat 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ set errorCode NONE
+ }
+ set f "-renumber"
+ puts "\t\tTest051.a: Test $f"
+ if { [is_frecno $method] == 1 } {
+ set db [eval {berkdb_open} $oflags $f $omethod $testfile]
+ error_check_good dbopen:flagtest:$f [is_valid_db $db] TRUE
+ $db close
+ } else {
+ error_check_good \
+ dbopen:flagtest:catch [catch {eval {berkdb_open_noerr}\
+ $oflags $f $omethod $testfile} ret] 1
+ error_check_good \
+ dbopen:flagtest:$f [is_substr $errorCode EINVAL] 1
+ }
+
+ # Test partial puts where dlen != size (should fail)
+ # it is an error to specify a partial put w/ different
+ # dlen and size in fixed length recno/queue
+ set key 1
+ set data ""
+ set txn ""
+ set test_char "a"
+
+ set db [eval {berkdb_open_noerr} $oflags $omethod $testfile1]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\tTest051.b: Partial puts with dlen != size."
+ foreach dlen { 1 16 20 32 } {
+ foreach doff { 0 10 20 32 } {
+ # dlen < size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen+1]"
+ set data [repeat $test_char [expr $dlen + 1]]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+ #
+ # We don't get back the server error string just
+ # the result.
+ #
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+
+ # dlen > size
+ puts "\t\tTest051.e: dlen: $dlen, doff: $doff, \
+ size: [expr $dlen-1]"
+ set data [repeat $test_char [expr $dlen - 1]]
+ error_check_good catch:put 1 [catch {eval {$db put -partial \
+ [list $doff $dlen]} $txn {$key $data}} ret]
+ if { $eindex == -1 } {
+ error_check_good "dbput:partial: dlen > size" \
+ [is_substr $errorInfo "Length improper"] 1
+ } else {
+ error_check_good "dbput:partial: dlen < size" \
+ [is_substr $errorCode "EINVAL"] 1
+ }
+ }
+ }
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ $db close
+
+ # Partial puts for existent record -- replaces at beg, mid, and
+ # end of record, as well as full replace
+ puts "\tTest051.f: Partial puts within existent record."
+ set db [eval {berkdb_open} $oflags $omethod $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\t\tTest051.f: First try a put and then a full replace."
+ set data [repeat "a" $fixed_len]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+
+ set data [repeat "b" $fixed_len]
+ set ret [eval {$db put -partial [list 0 $fixed_len]} $txn {1 $data}]
+ error_check_good dbput $ret 0
+ set ret [eval {$db get} $txn {-recno 1}]
+ error_check_good dbget $data [lindex [lindex $ret 0] 1]
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set data "InitialData"
+ set pdata "PUT"
+ set dlen [string length $pdata]
+ set ilen [string length $data]
+ set mid [expr $ilen/2]
+
+ # put initial data
+ set key 0
+
+ set offlist [list 0 $mid [expr $ilen -1] [expr $fixed_len - $dlen]]
+ puts "\t\tTest051.g: Now replace at different offsets ($offlist)."
+ foreach doff $offlist {
+ incr key
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput:init $ret 0
+
+ puts "\t\t Test051.g: Replace at offset $doff."
+ set ret [eval {$db put -partial [list $doff $dlen]} $txn \
+ {$key $pdata}]
+ error_check_good dbput:partial $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $doff == 0} {
+ set beg ""
+ set end [string range $data $dlen $ilen]
+ } else {
+ set beg [string range $data 0 [expr $doff - 1]]
+ set end [string range $data [expr $doff + $dlen] $ilen]
+ }
+ if { $doff > $ilen } {
+ # have to put padding between record and inserted
+ # string
+ set newdata [format %s%s $beg $end]
+ set diff [expr $doff - $ilen]
+ set nlen [string length $newdata]
+ set newdata [binary \
+ format a[set nlen]x[set diff]a$dlen $newdata $pdata]
+ } else {
+ set newdata [make_fixed_length \
+ frecno [format %s%s%s $beg $pdata $end]]
+ }
+ set ret [$db get -recno $key]
+ error_check_good compare($newdata,$ret) \
+ [binary_compare [lindex [lindex $ret 0] 1] $newdata] 0
+ }
+
+ $db close
+}
diff --git a/storage/bdb/test/test052.tcl b/storage/bdb/test/test052.tcl
new file mode 100644
index 00000000000..1f386449630
--- /dev/null
+++ b/storage/bdb/test/test052.tcl
@@ -0,0 +1,276 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test052.tcl,v 11.16 2002/07/08 20:48:58 sandstro Exp $
+#
+# TEST test052
+# TEST Renumbering record Recno test.
+proc test052 { method args } {
+ global alphabet
+ global errorInfo
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test052: Test of renumbering recno."
+ if { [is_rrecno $method] != 1} {
+ puts "Test052: skipping for method $method."
+ return
+ }
+
+ set data "data"
+ set txn ""
+ set flags ""
+
+ puts "\tTest052: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test052.db
+ set env NULL
+ } else {
+ set testfile test052.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags "-create -mode 0644 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # keep nkeys even
+ set nkeys 20
+
+ # Fill page w/ small key/data pairs
+ puts "\tTest052: Fill page with $nkeys small key/data pairs."
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # open curs to db
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # get db order of keys
+ for {set i 1; set ret [$dbc get -first]} { [llength $ret] != 0} { \
+ set ret [$dbc get -next]} {
+ set keys($i) [lindex [lindex $ret 0] 0]
+ set darray($i) [lindex [lindex $ret 0] 1]
+ incr i
+ }
+
+ puts "\tTest052: Deletes by key."
+ puts "\t Test052.a: Get data with SET, then delete before cursor."
+ # get key in middle of page, call this the nth set curr to it
+ set i [expr $nkeys/2]
+ set k $keys($i)
+ set ret [$dbc get -set $k]
+ error_check_bad dbc_get:set [llength $ret] 0
+ error_check_good dbc_get:set [lindex [lindex $ret 0] 1] $darray($i)
+
+ # delete by key before current
+ set i [incr i -1]
+ error_check_good db_del:before [eval {$db del} $txn {$keys($i)}] 0
+ # with renumber, current's data should be constant, but key==--key
+ set i [incr i +1]
+ error_check_good dbc:data \
+ [lindex [lindex [$dbc get -current] 0] 1] $darray($i)
+ error_check_good dbc:keys \
+ [lindex [lindex [$dbc get -current] 0] 0] $keys([expr $nkeys/2 - 1])
+
+ puts "\t Test052.b: Delete cursor item by key."
+ set i [expr $nkeys/2 ]
+
+ set ret [$dbc get -set $keys($i)]
+ error_check_bad dbc:get [llength $ret] 0
+ error_check_good dbc:get:curs [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 1])
+ error_check_good db_del:curr [eval {$db del} $txn {$keys($i)}] 0
+ set ret [$dbc get -current]
+
+ # After a delete, cursor should return DB_NOTFOUND.
+ error_check_good dbc:get:key [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:get:data [llength [lindex [lindex $ret 0] 1]] 0
+
+ # And the item after the cursor should now be
+ # key: $nkeys/2, data: $nkeys/2 + 2
+ set ret [$dbc get -next]
+ error_check_bad dbc:getnext [llength $ret] 0
+ error_check_good dbc:getnext:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+ error_check_good dbc:getnext:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ puts "\t Test052.c: Delete item after cursor."
+ # should be { keys($nkeys/2), darray($nkeys/2 + 2) }
+ set i [expr $nkeys/2]
+ # deleting data for key after current (key $nkeys/2 + 1)
+ error_check_good db_del [eval {$db del} $txn {$keys([expr $i + 1])}] 0
+
+ # current should be constant
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:current [llength $ret] 0
+ error_check_good dbc:get:keys [lindex [lindex $ret 0] 0] \
+ $keys($i)
+ error_check_good dbc:get:data [lindex [lindex $ret 0] 1] \
+ $darray([expr $i + 2])
+
+ puts "\tTest052: Deletes by cursor."
+ puts "\t Test052.d: Delete, do DB_NEXT."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ error_check_good dbc_get:first [lindex [lindex $ret 0] 1] $darray($i)
+ error_check_good dbc_del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc:get:curs \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # Move one more forward, so we're not on the first item.
+ error_check_bad dbc:getnext [llength [$dbc get -next]] 0
+
+ puts "\t Test052.e: Delete, do DB_PREV."
+ error_check_good dbc:del [$dbc del] 0
+ set ret [$dbc get -current]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:getcurrent:key \
+ [llength [lindex [lindex $ret 0] 0]] 0
+ error_check_good dbc:getcurrent:data \
+ [llength [lindex [lindex $ret 0] 1]] 0
+
+ # next should now reference the record that was previously after
+ # old current
+ set ret [$dbc get -next]
+ error_check_bad get:next [llength $ret] 0
+ error_check_good dbc:get:next:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+ error_check_good dbc:get:next:keys \
+ [lindex [lindex $ret 0] 0] $keys([expr $i + 1])
+
+ set ret [$dbc get -prev]
+ error_check_bad dbc:get:curr [llength $ret] 0
+ error_check_good dbc:get:curr:compare \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 1])
+ error_check_good dbc:get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+
+ # The rest of the test was written with the old rrecno semantics,
+ # which required a separate c_del(CURRENT) test; to leave
+ # the database in the expected state, we now delete the first item.
+ set ret [$dbc get -first]
+ error_check_bad getfirst [llength $ret] 0
+ error_check_good delfirst [$dbc del] 0
+
+ puts "\tTest052: Inserts."
+ puts "\t Test052.g: Insert before (DB_BEFORE)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc:get:first [llength $ret] 0
+ error_check_good dbc_get:first \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:first:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 3])
+
+ set ret [$dbc put -before $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbc_put:before $ret $keys($i)
+ # cursor should adjust to point to new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:curr [llength $ret] 0
+ error_check_good dbc_put:before:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_put:before:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ error_check_good dbc_get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 3])]]
+ set ret [$dbc get -prev]
+ error_check_bad dbc_get:prev [llength $ret] 0
+
+ puts "\t Test052.h: Insert by cursor after (DB_AFTER)."
+ set i [incr i]
+ set ret [$dbc put -after $darray($i)]
+ # should return new key, which should be $keys($i)
+ error_check_good dbcput:after $ret $keys($i)
+ # cursor should reference new item
+ set ret [$dbc get -current]
+ error_check_good dbc:get:current:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc:get:current:data \
+ [lindex [lindex $ret 0] 1] $darray($i)
+
+ # items after curs should be adjusted
+ set ret [$dbc get -next]
+ error_check_bad dbc:get:next [llength $ret] 0
+ error_check_good dbc:get:next:compare \
+ $ret [list [list $keys([expr $i + 1]) $darray([expr $i + 2])]]
+
+ puts "\t Test052.i: Insert (overwrite) current item (DB_CURRENT)."
+ set i 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get:first [llength $ret] 0
+ # choose a datum that is not currently in db
+ set ret [$dbc put -current $darray([expr $i + 2])]
+ error_check_good dbc_put:curr $ret 0
+ # curs should be on new item
+ set ret [$dbc get -current]
+ error_check_bad dbc_get:current [llength $ret] 0
+ error_check_good dbc_get:curr:keys \
+ [lindex [lindex $ret 0] 0] $keys($i)
+ error_check_good dbc_get:curr:data \
+ [lindex [lindex $ret 0] 1] $darray([expr $i + 2])
+
+ set ret [$dbc get -next]
+ error_check_bad dbc_get:next [llength $ret] 0
+ set i [incr i]
+ error_check_good dbc_get:next \
+ $ret [list [list $keys($i) $darray($i)]]
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest052 complete."
+}
diff --git a/storage/bdb/test/test053.tcl b/storage/bdb/test/test053.tcl
new file mode 100644
index 00000000000..3e217a2b55f
--- /dev/null
+++ b/storage/bdb/test/test053.tcl
@@ -0,0 +1,225 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test053.tcl,v 11.18 2002/05/24 15:24:55 sue Exp $
+#
+# TEST test053
+# TEST Test of the DB_REVSPLITOFF flag in the Btree and Btree-w-recnum
+# TEST methods.
+proc test053 { method args } {
+ global alphabet
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "\tTest053: Test of cursor stability across btree splits."
+ if { [is_btree $method] != 1 && [is_rbtree $method] != 1 } {
+ puts "Test053: skipping for method $method."
+ return
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test053: skipping for specific pagesizes"
+ return
+ }
+
+ set txn ""
+ set flags ""
+
+ puts "\tTest053.a: Create $omethod $args database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test053.db
+ set env NULL
+ } else {
+ set testfile test053.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set oflags \
+ "-create -revsplitoff -pagesize 1024 $args $omethod"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 8
+ set npages 15
+
+ # We want to create a db with npages leaf pages, and have each page
+ # be near full with keys that we can predict. We set pagesize above
+ # to 1024 bytes, it should breakdown as follows (per page):
+ #
+ # ~20 bytes overhead
+ # key: ~4 bytes overhead, XXX0N where X is a letter, N is 0-9
+ # data: ~4 bytes overhead, + 100 bytes
+ #
+ # then, with 8 keys/page we should be just under 1024 bytes
+ puts "\tTest053.b: Create $npages pages with $nkeys pairs on each."
+ set keystring [string range $alphabet 0 [expr $npages -1]]
+ set data [repeat DATA 22]
+ for { set i 0 } { $i < $npages } {incr i } {
+ set key ""
+ set keyroot \
+ [repeat [string toupper [string range $keystring $i $i]] 3]
+ set key_set($i) $keyroot
+ for {set j 0} { $j < $nkeys} {incr j} {
+ if { $j < 10 } {
+ set key [set keyroot]0$j
+ } else {
+ set key $keyroot$j
+ }
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ puts "\tTest053.c: Check page count."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ puts "\tTest053.d: Delete all but one key per page."
+ for {set i 0} { $i < $npages } {incr i } {
+ for {set j 1} { $j < $nkeys } {incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db del} $txn {$key_set($i)0$j}]
+ error_check_good dbdel $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+ puts "\tTest053.e: Check to make sure all pages are still there."
+ error_check_good page_count:check \
+ [is_substr [$db stat] "{Leaf pages} $npages"] 1
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db:cursor [is_valid_cursor $dbc $db] TRUE
+
+ # walk cursor through tree forward, backward.
+ # delete one key, repeat
+ for {set i 0} { $i < $npages} {incr i} {
+ puts -nonewline \
+ "\tTest053.f.$i: Walk curs through tree: forward..."
+ for { set j $i; set curr [$dbc get -first]} { $j < $npages} { \
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts -nonewline "backward..."
+ for { set j [expr $npages - 1]; set curr [$dbc get -last]} { \
+ $j >= $i } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.f.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $npages - $i]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j + $i - 1])00
+ }
+ }
+ puts "\tTest053.g.$i:\
+ Delete single key ([expr $npages - $i] keys left)."
+ set ret [eval {$db del} $txn {$key_set($i)00}]
+ error_check_good dbdel $ret 0
+ error_check_good del:check \
+ [llength [eval {$db get} $txn {$key_set($i)00}]] 0
+ }
+
+ # end for loop, verify db_notfound
+ set ret [$dbc get -first]
+ error_check_good dbc:get:verify [llength $ret] 0
+
+ # loop: until single key restored on each page
+ for {set i 0} { $i < $npages} {incr i} {
+ puts "\tTest053.i.$i:\
+ Restore single key ([expr $i + 1] keys in tree)."
+ set ret [eval {$db put} $txn {$key_set($i)00 $data}]
+ error_check_good dbput $ret 0
+
+ puts -nonewline \
+ "\tTest053.j: Walk cursor through tree: forward..."
+ for { set j 0; set curr [$dbc get -first]} { $j <= $i} {\
+ incr j; set curr [$dbc get -next]} {
+ error_check_bad dbc:get:next [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ error_check_good dbc:get:next [llength $curr] 0
+
+ puts -nonewline "backward..."
+ for { set j $i; set curr [$dbc get -last]} { \
+ $j >= 0 } { \
+ set j [incr j -1]; set curr [$dbc get -prev]} {
+ error_check_bad dbc:get:prev [llength $curr] 0
+ error_check_good dbc:get:keys \
+ [lindex [lindex $curr 0] 0] $key_set($j)00
+ }
+ puts "complete."
+ error_check_good dbc:get:prev [llength $curr] 0
+
+ if { [is_rbtree $method] == 1} {
+ puts "\t\tTest053.k.$i:\
+ Walk through tree with record numbers."
+ for {set j 1} {$j <= [expr $i + 1]} {incr j} {
+ set curr [eval {$db get} $txn {-recno $j}]
+ error_check_bad \
+ db_get:recno:$j [llength $curr] 0
+ error_check_good db_get:recno:keys:$j \
+ [lindex [lindex $curr 0] 0] \
+ $key_set([expr $j - 1])00
+ }
+ }
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "Test053 complete."
+}
diff --git a/storage/bdb/test/test054.tcl b/storage/bdb/test/test054.tcl
new file mode 100644
index 00000000000..f53f5a658bf
--- /dev/null
+++ b/storage/bdb/test/test054.tcl
@@ -0,0 +1,461 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test054.tcl,v 11.23 2002/06/17 18:41:29 sue Exp $
+#
+# TEST test054
+# TEST Cursor maintenance during key/data deletion.
+# TEST
+# TEST This test checks for cursor maintenance in the presence of deletes.
+# TEST There are N different scenarios to tests:
+# TEST 1. No duplicates. Cursor A deletes a key, do a GET for the key.
+# TEST 2. No duplicates. Cursor is positioned right before key K, Delete K,
+# TEST do a next on the cursor.
+# TEST 3. No duplicates. Cursor is positioned on key K, do a regular delete
+# TEST of K, do a current get on K.
+# TEST 4. Repeat 3 but do a next instead of current.
+# TEST 5. Duplicates. Cursor A is on the first item of a duplicate set, A
+# TEST does a delete. Then we do a non-cursor get.
+# TEST 6. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST do a delete of the entire Key. Test cursor current.
+# TEST 7. Continue last test and try cursor next.
+# TEST 8. Duplicates. Cursor A is in a duplicate set and deletes the item.
+# TEST Cursor B is in the same duplicate set and deletes a different item.
+# TEST Verify that the cursor is in the right place.
+# TEST 9. Cursors A and B are in the place in the same duplicate set. A
+# TEST deletes its item. Do current on B.
+# TEST 10. Continue 8 and do a next on B.
+proc test054 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644"
+ puts "Test054 ($method $args):\
+ interspersed cursor and normal operations"
+ if { [is_record_based $method] == 1 } {
+ puts "Test054 skipping for method $method"
+ return
+ }
+
+ # Find the environment in the argument list, we'll need it
+ # later.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-nodup.db
+ set env NULL
+ } else {
+ set testfile test054-nodup.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest054.a: No Duplicate Tests"
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest054.a1: Delete w/cursor, regular get"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [llength $r] 0
+
+ # Free up the cursor.
+ error_check_good cursor_close [eval {$curs close}] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Test case #2.
+ puts "\tTest054.a2: Cursor before K, delete K, cursor next"
+
+ # Replace key 2
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # Open and position cursor on first item.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ for {set d [eval {$curs get} -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ set r [eval {$curs get} -set {$key_set(1)} ]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now delete (next item) $key_set(2)
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Test case #3.
+ puts "\tTest054.a3: Cursor on K, delete K, cursor current"
+
+ # delete item 3
+ error_check_good \
+ db_del:$key_set(3) [eval {$db del} $txn {$key_set(3)}] 0
+ # NEEDS TO COME BACK IN, BUG CHECK
+ set ret [$curs get -current]
+ error_check_good current_after_del $ret [list [list [] []]]
+ error_check_good cursor_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ puts "\tTest054.a4: Cursor on K, delete K, cursor next"
+
+ # Restore keys 2 and 3
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn {$key_set(2) datum$key_set(2)}]
+ error_check_good put $r 0
+ set r [eval {$db put} $txn {$key_set(3) datum$key_set(3)}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # Create the new cursor and put it on 1
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Delete 2
+ error_check_good \
+ db_del:$key_set(2) [eval {$db del} $txn {$key_set(2)}] 0
+
+ # Now do next on cursor
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now get ready for duplicate tests
+
+ if { [is_rbtree $method] == 1 } {
+ puts "Test054: skipping remainder of test for method $method."
+ return
+ }
+
+ puts "\tTest054.b: Duplicate Tests"
+ append args " -dup"
+
+ # Open a new database for the dup tests so -truncate is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test054-dup.db
+ set env NULL
+ } else {
+ set testfile test054-dup.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ set i 1
+ for {set d [$curs get -first] } \
+ {[llength $d] != 0 } \
+ {set d [$curs get -nextdup] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Test case #5.
+ puts "\tTest054.b1: Delete dup w/cursor on first item. Get on key."
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [eval {$curs get} -set {$key_set(2)}]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good curs_del $r 0
+
+ # Now do the get
+ set r [eval {$db get} $txn {$key_set(2)}]
+ error_check_good get_after_del [lindex [lindex $r 0] 1] dup_1
+
+ # Test case #6.
+ puts "\tTest054.b2: Now get the next duplicate from the cursor."
+
+ # Now do next on cursor
+ set r [$curs get -nextdup]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ # Test case #3.
+ puts "\tTest054.b3: Two cursors in set; each delete different items"
+
+ # Open a new cursor.
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set on last of duplicate set.
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_5
+
+ # Delete the item at cursor 1 (dup_1)
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify curs1 and curs2
+ # current should fail
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get_after_del $ret [list [list [] []]]
+
+ set r [$curs2 get -current]
+ error_check_bad curs2_get [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good curs_get:DB_CURRENT:data $d dup_5
+
+ # Now delete the item at cursor 2 (dup_5)
+ error_check_good curs2_del [$curs2 del] 0
+
+ # Verify curs1 and curs2
+ set ret [$curs get -current]
+ error_check_good curs1_get:del2 $ret [list [list [] []]]
+
+ set ret [$curs2 get -current]
+ error_check_good curs2_get:del2 $ret [list [list [] []]]
+
+ # Now verify that next and prev work.
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_4
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ puts "\tTest054.b4: Two cursors same item, one delete, one get"
+
+ # Move curs2 onto dup_2
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_3
+
+ set r [$curs2 get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(2)
+ error_check_good curs_get:DB_PREV:data $d dup_2
+
+ # delete on curs 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ set ret [$curs get -current]
+ error_check_good \
+ curs1_get:deleted $ret [list [list [] []]]
+ set ret [$curs2 get -current]
+ error_check_good \
+ curs2_get:deleted $ret [list [list [] []]]
+
+ puts "\tTest054.b5: Now do a next on both cursors"
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ set r [$curs2 get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_3
+
+ # Close cursor
+ error_check_good curs_close [$curs close] 0
+ error_check_good curs2_close [$curs2 close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test055.tcl b/storage/bdb/test/test055.tcl
new file mode 100644
index 00000000000..25134dca4be
--- /dev/null
+++ b/storage/bdb/test/test055.tcl
@@ -0,0 +1,141 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test055.tcl,v 11.16 2002/05/22 15:42:55 sue Exp $
+#
+# TEST test055
+# TEST Basic cursor operations.
+# TEST This test checks basic cursor operations.
+# TEST There are N different scenarios to tests:
+# TEST 1. (no dups) Set cursor, retrieve current.
+# TEST 2. (no dups) Set cursor, retrieve next.
+# TEST 3. (no dups) Set cursor, retrieve prev.
+proc test055 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test055: $method interspersed cursor and normal operations"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test055.db
+ set env NULL
+ } else {
+ set testfile test055.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ puts "\tTest055.a: No duplicates"
+ set db [eval {berkdb_open -create -mode 0644 $omethod } \
+ $args {$testfile}]
+ error_check_good db_open:nodup [is_valid_db $db] TRUE
+
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:nodup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {\
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Test case #1.
+ puts "\tTest055.a1: Set cursor, retrieve current"
+
+ # Now set the cursor on the middle on.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve current
+ set r [$curs get -current]
+ error_check_bad cursor_get:DB_CURRENT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_CURRENT:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_CURRENT:data $d [pad_data $method datum$key_set(2)]
+
+ # Test case #2.
+ puts "\tTest055.a2: Set cursor, retrieve previous"
+ set r [$curs get -prev]
+ error_check_bad cursor_get:DB_PREV [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_PREV:key $k $key_set(1)
+ error_check_good \
+ curs_get:DB_PREV:data $d [pad_data $method datum$key_set(1)]
+
+ # Test case #3.
+ puts "\tTest055.a2: Set cursor, retrieve next"
+
+ # Now set the cursor on the middle one.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good \
+ curs_get:DB_SET:data $d [pad_data $method datum$key_set(2)]
+
+ # Now retrieve next
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good \
+ curs_get:DB_NEXT:data $d [pad_data $method datum$key_set(3)]
+
+ # Close cursor and database.
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test056.tcl b/storage/bdb/test/test056.tcl
new file mode 100644
index 00000000000..ef310332ed1
--- /dev/null
+++ b/storage/bdb/test/test056.tcl
@@ -0,0 +1,169 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test056.tcl,v 11.18 2002/05/22 15:42:55 sue Exp $
+#
+# TEST test056
+# TEST Cursor maintenance during deletes.
+# TEST Check if deleting a key when a cursor is on a duplicate of that
+# TEST key works.
+proc test056 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] } {
+ puts "Test056: skipping for method $method"
+ return
+ }
+ puts "Test056: $method delete of key in presence of cursor"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test056.db
+ set env NULL
+ } else {
+ set testfile test056.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good db_open:dup [is_valid_db $db] TRUE
+
+ puts "\tTest056.a: Key delete with cursor on duplicate."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on a duplicate of key 2
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ # Now do two nexts
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_2
+
+ # Now do the delete
+ set r [eval {$db del} $txn $flags {$key_set(2)}]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ set ret [$curs get -current]
+ error_check_good curs_after_del $ret [list [list [] []]]
+
+ # Now check that the rest of the database looks intact. There
+ # should be only two keys, 1 and 3.
+
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(3)
+ error_check_good curs_get:DB_NEXT:data $d datum$key_set(3)
+
+ set r [$curs get -next]
+ error_check_good cursor_get:DB_NEXT [llength $r] 0
+
+ puts "\tTest056.b:\
+ Cursor delete of first item, followed by cursor FIRST"
+ # Set to beginning
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(1)
+
+ # Now do delete
+ error_check_good curs_del [$curs del] 0
+
+ # Now do DB_FIRST
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(3)
+ error_check_good curs_get:DB_FIRST:data $d datum$key_set(3)
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test057.tcl b/storage/bdb/test/test057.tcl
new file mode 100644
index 00000000000..04fb09ef260
--- /dev/null
+++ b/storage/bdb/test/test057.tcl
@@ -0,0 +1,248 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test057.tcl,v 11.22 2002/05/22 15:42:56 sue Exp $
+#
+# TEST test057
+# TEST Cursor maintenance during key deletes.
+# TEST Check if we handle the case where we delete a key with the cursor on
+# TEST it and then add the same key. The cursor should not get the new item
+# TEST returned, but the item shouldn't disappear.
+# TEST Run test tests, one where the overwriting put is done with a put and
+# TEST one where it's done with a cursor put.
+proc test057 { method args } {
+ global errorInfo
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ append args " -create -mode 0644 -dup "
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test057: skipping for method $method"
+ return
+ }
+ puts "Test057: $method delete and replace in presence of cursor."
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test057.db
+ set env NULL
+ } else {
+ set testfile test057.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set flags ""
+ set txn ""
+
+ set db [eval {berkdb_open} $args {$omethod $testfile}]
+ error_check_good dbopen:dup [is_valid_db $db] TRUE
+
+ puts "\tTest057.a: Set cursor, delete cursor, put with key."
+ # Put three keys in the database
+ for { set key 1 } { $key <= 3 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $flags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good curs_open:dup [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } {[llength $d] != 0 } \
+ {set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ # Now put in a bunch of duplicates for key 2
+ for { set d 1 } { $d <= 5 } {incr d} {
+ set r [eval {$db put} $txn $flags {$key_set(2) dup_$d}]
+ error_check_good dup:put $r 0
+ }
+
+ # Now put the cursor on key 1
+
+ # Now set the cursor on the first of the duplicate set.
+ set r [$curs get -set $key_set(1)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(1)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(1)
+
+ # Now do the delete
+ set r [$curs del]
+ error_check_good delete $r 0
+
+ # Now check the get current on the cursor.
+ error_check_good curs_get:del [$curs get -current] [list [list [] []]]
+
+ # Now do a put on the key
+ set r [eval {$db put} $txn $flags {$key_set(1) new_datum$key_set(1)}]
+ error_check_good put $r 0
+
+ # Do a get
+ set r [eval {$db get} $txn {$key_set(1)}]
+ error_check_good get [lindex [lindex $r 0] 1] new_datum$key_set(1)
+
+ # Recheck cursor
+ error_check_good curs_get:deleted [$curs get -current] [list [list [] []]]
+
+ # Move cursor and see if we get the key.
+ set r [$curs get -first]
+ error_check_bad cursor_get:DB_FIRST [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_FIRST:key $k $key_set(1)
+ error_check_good curs_get:DB_FIRST:data $d new_datum$key_set(1)
+
+ puts "\tTest057.b: Set two cursor on a key, delete one, overwrite other"
+ set curs2 [eval {$db cursor} $txn]
+ error_check_good curs2_open [is_valid_cursor $curs2 $db] TRUE
+
+ # Set both cursors on the 4rd key
+ set r [$curs get -set $key_set(3)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(3)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(3)
+
+ set r [$curs2 get -set $key_set(3)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(3)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(3)
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2
+ set pflags "-current"
+ if {[is_hash $method] == 1} {
+ error_check_good curs1_get_after_del [is_substr \
+ [$curs2 put $pflags new_datum$key_set(3)] "DB_NOTFOUND"] 1
+
+ # Gets fail
+ error_check_good curs1_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs2_get:deleted \
+ [$curs get -current] \
+ [list [list [] []]]
+ } else {
+ # btree only, recno is skipped this test
+ set ret [$curs2 put $pflags new_datum$key_set(3)]
+ error_check_good curs_replace $ret 0
+ }
+
+ # Gets fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ puts "\tTest057.c:\
+ Set two cursors on a dup, delete one, overwrite other"
+
+ # Set both cursors on the 2nd duplicate of key 2
+ set r [$curs get -set $key_set(2)]
+ error_check_bad cursor_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_SET:key $k $key_set(2)
+ error_check_good curs_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs get -next]
+ error_check_bad cursor_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs_get:DB_NEXT:data $d dup_1
+
+ set r [$curs2 get -set $key_set(2)]
+ error_check_bad cursor2_get:DB_SET [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_SET:key $k $key_set(2)
+ error_check_good curs2_get:DB_SET:data $d datum$key_set(2)
+
+ set r [$curs2 get -next]
+ error_check_bad cursor2_get:DB_NEXT [llength $r] 0
+ set k [lindex [lindex $r 0] 0]
+ set d [lindex [lindex $r 0] 1]
+ error_check_good curs2_get:DB_NEXT:key $k $key_set(2)
+ error_check_good curs2_get:DB_NEXT:data $d dup_1
+
+ # Now delete through cursor 1
+ error_check_good curs1_del [$curs del] 0
+
+ # Verify gets on both 1 and 2
+ error_check_good curs_get:deleted [$curs get -current] \
+ [list [list [] []]]
+ error_check_good curs_get:deleted [$curs2 get -current] \
+ [list [list [] []]]
+
+ # Now do a replace through cursor 2 -- this will work on btree but
+ # not on hash
+ if {[is_hash $method] == 1} {
+ error_check_good hash_replace \
+ [is_substr [$curs2 put -current new_dup_1] "DB_NOTFOUND"] 1
+ } else {
+ error_check_good curs_replace [$curs2 put -current new_dup_1] 0
+ }
+
+ # Both gets should fail
+ #error_check_good curs1_get:deleted [catch {$curs get -current} r] 1
+ #error_check_good curs1_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+ #error_check_good curs2_get:deleted [catch {$curs2 get -current} r] 1
+ #error_check_good curs2_get_after_del \
+ [is_substr $errorInfo "DB_KEYEMPTY"] 1
+
+ error_check_good curs2_close [$curs2 close] 0
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test058.tcl b/storage/bdb/test/test058.tcl
new file mode 100644
index 00000000000..daf164fd6e2
--- /dev/null
+++ b/storage/bdb/test/test058.tcl
@@ -0,0 +1,103 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test058.tcl,v 11.20 2002/02/22 15:26:27 sandstro Exp $
+#
+# TEST test058
+# TEST Verify that deleting and reading duplicates results in correct ordering.
+proc test058 { method args } {
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test058 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test058: skipping for method $method"
+ return
+ }
+ puts "Test058: $method delete dups after inserting after duped key."
+
+ # environment
+ env_cleanup $testdir
+ set eflags "-create -txn $encargs -home $testdir"
+ set env [eval {berkdb_env} $eflags]
+ error_check_good env [is_valid_env $env] TRUE
+
+ # db open
+ set flags "-auto_commit -create -mode 0644 -dup -env $env $args"
+ set db [eval {berkdb_open} $flags $omethod "test058.db"]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set tn ""
+ set tid ""
+ set tn [$env txn]
+ set tflags "-txn $tn"
+
+ puts "\tTest058.a: Adding 10 duplicates"
+ # Add a bunch of dups
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret \
+ [eval {$db put} $tflags {doghouse $i"DUPLICATE_DATA_VALUE"}]
+ error_check_good db_put $ret 0
+ }
+
+ puts "\tTest058.b: Adding key after duplicates"
+ # Now add one more key/data AFTER the dup set.
+ set ret [eval {$db put} $tflags {zebrahouse NOT_A_DUP}]
+ error_check_good db_put $ret 0
+
+ error_check_good txn_commit [$tn commit] 0
+
+ set tn [$env txn]
+ error_check_good txnbegin [is_substr $tn $env] 1
+ set tflags "-txn $tn"
+
+ # Now delete everything
+ puts "\tTest058.c: Deleting duplicated key"
+ set ret [eval {$db del} $tflags {doghouse}]
+ error_check_good del $ret 0
+
+ # Now reput everything
+ set pad \
+ abcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuvabcdefghijklmnopqrtsuv
+
+ puts "\tTest058.d: Reputting duplicates with big data vals"
+ for { set i 0 } { $i < 10 } {incr i} {
+ set ret [eval {$db put} \
+ $tflags {doghouse $i"DUPLICATE_DATA_VALUE"$pad}]
+ error_check_good db_put $ret 0
+ }
+ error_check_good txn_commit [$tn commit] 0
+
+ # Check duplicates for order
+ set dbc [$db cursor]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ puts "\tTest058.e: Verifying that duplicates are in order."
+ set i 0
+ for { set ret [$dbc get -set doghouse] } \
+ {$i < 10 && [llength $ret] != 0} \
+ { set ret [$dbc get -nextdup] } {
+ set data [lindex [lindex $ret 0] 1]
+ error_check_good \
+ duplicate_value $data $i"DUPLICATE_DATA_VALUE"$pad
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ reset_env $env
+}
diff --git a/storage/bdb/test/test059.tcl b/storage/bdb/test/test059.tcl
new file mode 100644
index 00000000000..596ea7a3c94
--- /dev/null
+++ b/storage/bdb/test/test059.tcl
@@ -0,0 +1,150 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test059.tcl,v 11.18 2002/06/11 15:10:16 sue Exp $
+#
+# TEST test059
+# TEST Cursor ops work with a partial length of 0.
+# TEST Make sure that we handle retrieves of zero-length data items correctly.
+# TEST The following ops, should allow a partial data retrieve of 0-length.
+# TEST db_get
+# TEST db_cget FIRST, NEXT, LAST, PREV, CURRENT, SET, SET_RANGE
+proc test059 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test059: $method 0-length partial data retrieval"
+
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test059.db
+ set env NULL
+ } else {
+ set testfile test059.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ if { [is_record_based $method] == 1 } {
+ append gflags " -recno"
+ }
+
+ puts "\tTest059.a: Populate a database"
+ set oflags "-create -mode 0644 $omethod $args $testfile"
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_create [is_substr $db db] 1
+
+ # Put ten keys in the database
+ for { set key 1 } { $key <= 10 } {incr key} {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} $txn $pflags {$key datum$key}]
+ error_check_good put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Retrieve keys sequentially so we can figure out their order
+ set i 1
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set curs [eval {$db cursor} $txn]
+ error_check_good db_curs [is_valid_cursor $curs $db] TRUE
+
+ for {set d [$curs get -first] } { [llength $d] != 0 } {
+ set d [$curs get -next] } {
+ set key_set($i) [lindex [lindex $d 0] 0]
+ incr i
+ }
+
+ puts "\tTest059.a: db get with 0 partial length retrieve"
+
+ # Now set the cursor on the middle one.
+ set ret [eval {$db get -partial {0 0}} $txn $gflags {$key_set(5)}]
+ error_check_bad db_get_0 [llength $ret] 0
+
+ puts "\tTest059.a: db cget FIRST with 0 partial length retrieve"
+ set ret [$curs get -first -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_first $key $key_set(1)
+ error_check_good db_cget_first [string length $data] 0
+
+ puts "\tTest059.b: db cget NEXT with 0 partial length retrieve"
+ set ret [$curs get -next -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_next $key $key_set(2)
+ error_check_good db_cget_next [string length $data] 0
+
+ puts "\tTest059.c: db cget LAST with 0 partial length retrieve"
+ set ret [$curs get -last -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_last $key $key_set(10)
+ error_check_good db_cget_last [string length $data] 0
+
+ puts "\tTest059.d: db cget PREV with 0 partial length retrieve"
+ set ret [$curs get -prev -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_prev $key $key_set(9)
+ error_check_good db_cget_prev [string length $data] 0
+
+ puts "\tTest059.e: db cget CURRENT with 0 partial length retrieve"
+ set ret [$curs get -current -partial {0 0}]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_current $key $key_set(9)
+ error_check_good db_cget_current [string length $data] 0
+
+ puts "\tTest059.f: db cget SET with 0 partial length retrieve"
+ set ret [$curs get -set -partial {0 0} $key_set(7)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(7)
+ error_check_good db_cget_set [string length $data] 0
+
+ if {[is_btree $method] == 1} {
+ puts "\tTest059.g:\
+ db cget SET_RANGE with 0 partial length retrieve"
+ set ret [$curs get -set_range -partial {0 0} $key_set(5)]
+ set data [lindex [lindex $ret 0] 1]
+ set key [lindex [lindex $ret 0] 0]
+ error_check_good key_check_set $key $key_set(5)
+ error_check_good db_cget_set [string length $data] 0
+ }
+
+ error_check_good curs_close [$curs close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test060.tcl b/storage/bdb/test/test060.tcl
new file mode 100644
index 00000000000..4a18c97f42f
--- /dev/null
+++ b/storage/bdb/test/test060.tcl
@@ -0,0 +1,60 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test060.tcl,v 11.10 2002/05/22 15:42:56 sue Exp $
+#
+# TEST test060
+# TEST Test of the DB_EXCL flag to DB->open().
+# TEST 1) Attempt to open and create a nonexistent database; verify success.
+# TEST 2) Attempt to reopen it; verify failure.
+proc test060 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test060: $method ($args) Test of the DB_EXCL flag to DB->open"
+
+ # Set the database location and make sure the db doesn't exist yet
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test060.db
+ set env NULL
+ } else {
+ set testfile test060.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Create the database and check success
+ puts "\tTest060.a: open and close non-existent file with DB_EXCL"
+ set db [eval {berkdb_open \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen:excl [is_valid_db $db] TRUE
+
+ # Close it and check success
+ error_check_good db_close [$db close] 0
+
+ # Try to open it again, and make sure the open fails
+ puts "\tTest060.b: open it again with DB_EXCL and make sure it fails"
+ set errorCode NONE
+ error_check_good open:excl:catch [catch { \
+ set db [eval {berkdb_open_noerr \
+ -create -excl -mode 0644} $args {$omethod $testfile}]
+ } ret ] 1
+
+ error_check_good dbopen:excl [is_substr $errorCode EEXIST] 1
+}
diff --git a/storage/bdb/test/test061.tcl b/storage/bdb/test/test061.tcl
new file mode 100644
index 00000000000..65544e88deb
--- /dev/null
+++ b/storage/bdb/test/test061.tcl
@@ -0,0 +1,226 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test061.tcl,v 11.18 2002/02/22 15:26:27 sandstro Exp $
+#
+# TEST test061
+# TEST Test of txn abort and commit for in-memory databases.
+# TEST a) Put + abort: verify absence of data
+# TEST b) Put + commit: verify presence of data
+# TEST c) Overwrite + abort: verify that data is unchanged
+# TEST d) Overwrite + commit: verify that data has changed
+# TEST e) Delete + abort: verify that data is still present
+# TEST f) Delete + commit: verify that data has been deleted
+proc test061 { method args } {
+ global alphabet
+ global encrypt
+ global errorCode
+ global passwd
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test061 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ if { [is_queueext $method] == 1} {
+ puts "Test061 skipping for method $method"
+ return
+ }
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ puts "Test061: Transaction abort and commit test for in-memory data."
+ puts "Test061: $method $args"
+
+ set key "key"
+ set data "data"
+ set otherdata "otherdata"
+ set txn ""
+ set flags ""
+ set gflags ""
+
+ if { [is_record_based $method] == 1} {
+ set key 1
+ set gflags " -recno"
+ }
+
+ puts "\tTest061: Create environment and $method database."
+ env_cleanup $testdir
+
+ # create environment
+ set eflags "-create -txn $encargs -home $testdir"
+ set dbenv [eval {berkdb_env} $eflags]
+ error_check_good dbenv [is_valid_env $dbenv] TRUE
+
+ # db open -- no file specified, in-memory database
+ set flags "-auto_commit -create $args $omethod"
+ set db [eval {berkdb_open -env} $dbenv $flags]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here we go with the six test cases. Since we need to verify
+ # a different thing each time, and since we can't just reuse
+ # the same data if we're to test overwrite, we just
+ # plow through rather than writing some impenetrable loop code;
+ # each of the cases is only a few lines long, anyway.
+
+ puts "\tTest061.a: put/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for *non*-existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ puts "\tTest061.b: put/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check again for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.c: overwrite/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check that data is unchanged ($data not $otherdata)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest061.d: overwrite/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # overwrite {key,data} with {key,otherdata}
+ set ret [eval {$db put} -txn $txn {$key [chop_data $method $otherdata]}]
+ error_check_good db_put $ret 0
+
+ # check for existence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check that data has changed ($otherdata not $data)
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.e: delete/abort"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # delete
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # abort
+ error_check_good txn_abort [$txn abort] 0
+
+ # check for existence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret \
+ [list [list $key [pad_data $method $otherdata]]]
+
+ puts "\tTest061.f: delete/commit"
+
+ # txn_begin
+ set txn [$dbenv txn]
+ error_check_good txn_begin [is_valid_txn $txn $dbenv] TRUE
+
+ # put a key
+ set ret [eval {$db del} -txn $txn {$key}]
+ error_check_good db_put $ret 0
+
+ # check for nonexistence
+ set ret [eval {$db get} -txn $txn $gflags {$key}]
+ error_check_good get $ret {}
+
+ # commit
+ error_check_good txn_commit [$txn commit] 0
+
+ # check for continued nonexistence
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get $ret {}
+
+ # We're done; clean up.
+ error_check_good db_close [eval {$db close}] 0
+ error_check_good env_close [eval {$dbenv close}] 0
+
+ # Now run db_recover and ensure that it runs cleanly.
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set utilflag "-P $passwd"
+ }
+ puts "\tTest061.g: Running db_recover -h"
+ set ret [catch {eval {exec} $util_path/db_recover -h $testdir \
+ $utilflag} res]
+ if { $ret != 0 } {
+ puts "FAIL: db_recover outputted $res"
+ }
+ error_check_good db_recover $ret 0
+
+ puts "\tTest061.h: Running db_recover -c -h"
+ set ret [catch {eval {exec} $util_path/db_recover -c -h $testdir \
+ $utilflag} res]
+ error_check_good db_recover-c $ret 0
+}
diff --git a/storage/bdb/test/test062.tcl b/storage/bdb/test/test062.tcl
new file mode 100644
index 00000000000..5cacd98a2c0
--- /dev/null
+++ b/storage/bdb/test/test062.tcl
@@ -0,0 +1,153 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test062.tcl,v 11.20 2002/06/11 14:09:57 sue Exp $
+#
+# TEST test062
+# TEST Test of partial puts (using DB_CURRENT) onto duplicate pages.
+# TEST Insert the first 200 words into the dictionary 200 times each with
+# TEST self as key and <random letter>:self as data. Use partial puts to
+# TEST append self again to data; verify correctness.
+proc test062 { method {nentries 200} {ndups 200} {tnum 62} args } {
+ global alphabet
+ global rand_init
+ source ./include.tcl
+
+ berkdb srand $rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $omethod"
+ return
+ }
+ # Create the database and open the dictionary
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 200 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum:\
+ $method ($args) $nentries Partial puts and $ndups duplicates."
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod -dup} $args {$testfile} ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest0$tnum.a: Put loop (initialize database)"
+ while { [gets $did str] != -1 && $count < $nentries } {
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set pref \
+ [string index $alphabet [berkdb random_int 0 25]]
+ set datastr $pref:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$str [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ set keys($count) $str
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest0$tnum.b: Partial puts."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_substr $dbc $db] 1
+
+ # Do a partial write to extend each datum in
+ # the regular db by the corresponding dictionary word.
+ # We have to go through each key's dup set using -set
+ # because cursors are not stable in the hash AM and we
+ # want to make sure we hit all the keys.
+ for { set i 0 } { $i < $count } { incr i } {
+ set key $keys($i)
+ for {set ret [$dbc get -set $key]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -nextdup]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set orig_d [lindex [lindex $ret 0] 1]
+ set d [string range $orig_d 2 end]
+ set doff [expr [string length $d] + 2]
+ set dlen 0
+ error_check_good data_and_key_sanity $d $k
+
+ set ret [$dbc get -current]
+ error_check_good before_sanity \
+ [lindex [lindex $ret 0] 0] \
+ [string range [lindex [lindex $ret 0] 1] 2 end]
+
+ error_check_good partial_put [eval {$dbc put -current \
+ -partial [list $doff $dlen] $d}] 0
+
+ set ret [$dbc get -current]
+ error_check_good partial_put_correct \
+ [lindex [lindex $ret 0] 1] $orig_d$d
+ }
+ }
+
+ puts "\tTest0$tnum.c: Double-checking get loop."
+ # Double-check that each datum in the regular db has
+ # been appropriately modified.
+
+ for {set ret [$dbc get -first]} \
+ {[llength $ret] != 0} \
+ {set ret [$dbc get -next]} {
+
+ set k [lindex [lindex $ret 0] 0]
+ set d [lindex [lindex $ret 0] 1]
+ error_check_good modification_correct \
+ [string range $d 2 end] [repeat $k 2]
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test063.tcl b/storage/bdb/test/test063.tcl
new file mode 100644
index 00000000000..2e8726c8f96
--- /dev/null
+++ b/storage/bdb/test/test063.tcl
@@ -0,0 +1,174 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test063.tcl,v 11.17 2002/05/24 15:24:55 sue Exp $
+#
+# TEST test063
+# TEST Test of the DB_RDONLY flag to DB->open
+# TEST Attempt to both DB->put and DBC->c_put into a database
+# TEST that has been opened DB_RDONLY, and check for failure.
+proc test063 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 63
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "key"
+ set data "data"
+ set key2 "another_key"
+ set data2 "more_data"
+
+ set gflags ""
+ set txn ""
+
+ if { [is_record_based $method] == 1 } {
+ set key "1"
+ set key2 "2"
+ append gflags " -recno"
+ }
+
+ puts "Test0$tnum: $method ($args) DB_RDONLY test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database."
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ # Put and get an item so it's nonempty.
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good initial_put $ret 0
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good initial_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ if { $eindex == -1 } {
+ # Confirm that database is writable. If we are
+ # using an env (that may be remote on a server)
+ # we cannot do this check.
+ error_check_good writable [file writable $testfile] 1
+ }
+
+ puts "\tTest0$tnum.b: Re-opening DB_RDONLY and attempting to put."
+
+ # Now open it read-only and make sure we can get but not put.
+ set db [eval {berkdb_open_noerr -rdonly} $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ set ret [catch {eval {$db put} $txn \
+ {$key2 [chop_data $method $data]}} res]
+ error_check_good put_failed $ret 1
+ error_check_good db_put_rdonly [is_substr $errorCode "EACCES"] 1
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set errorCode "NONE"
+
+ puts "\tTest0$tnum.c: Attempting cursor put."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good cursor_set [$dbc get -first] $dbt
+ set ret [catch {eval {$dbc put} -current $data} res]
+ error_check_good c_put_failed $ret 1
+ error_check_good dbc_put_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $gflags {$key2}]
+ error_check_good db_get_key2 $dbt ""
+
+ puts "\tTest0$tnum.d: Attempting ordinary delete."
+
+ set errorCode "NONE"
+ set ret [catch {eval {$db del} $txn {$key}} 1]
+ error_check_good del_failed $ret 1
+ error_check_good db_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt [eval {$db get} $txn $gflags {$key}]
+ error_check_good db_get_key $dbt \
+ [list [list $key [pad_data $method $data]]]
+
+ puts "\tTest0$tnum.e: Attempting cursor delete."
+ # Just set the cursor to the beginning; we don't care what's there...
+ # yet.
+ set dbt2 [$dbc get -first]
+ error_check_good db_get_first_key $dbt2 $dbt
+ set errorCode "NONE"
+ set ret [catch {$dbc del} res]
+ error_check_good c_del_failed $ret 1
+ error_check_good dbc_del_rdonly [is_substr $errorCode "EACCES"] 1
+
+ set dbt2 [$dbc get -current]
+ error_check_good db_get_key $dbt2 $dbt
+
+ puts "\tTest0$tnum.f: Close, reopen db; verify unchanged."
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ set db [eval {berkdb_open} $omethod $args $testfile]
+ error_check_good db_reopen [is_valid_db $db] TRUE
+
+ set dbc [$db cursor]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good first_there [$dbc get -first] \
+ [list [list $key [pad_data $method $data]]]
+ error_check_good nomore_there [$dbc get -next] ""
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test064.tcl b/storage/bdb/test/test064.tcl
new file mode 100644
index 00000000000..c306b0d9d46
--- /dev/null
+++ b/storage/bdb/test/test064.tcl
@@ -0,0 +1,69 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test064.tcl,v 11.13 2002/05/22 15:42:57 sue Exp $
+#
+# TEST test064
+# TEST Test of DB->get_type
+# TEST Create a database of type specified by method.
+# TEST Make sure DB->get_type returns the right thing with both a normal
+# TEST and DB_UNKNOWN open.
+proc test064 { method args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 64
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->get_type test."
+
+ # Create a test database.
+ puts "\tTest0$tnum.a: Creating test database of type $method."
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_create [is_valid_db $db] TRUE
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.b: get_type after method specifier."
+
+ set db [eval {berkdb_open} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.c: get_type after DB_UNKNOWN."
+
+ set db [eval {berkdb_open} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set type [$db get_type]
+ error_check_good get_type $type [string range $omethod 1 end]
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test065.tcl b/storage/bdb/test/test065.tcl
new file mode 100644
index 00000000000..ea29b4d2db7
--- /dev/null
+++ b/storage/bdb/test/test065.tcl
@@ -0,0 +1,199 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test065.tcl,v 11.16 2002/08/22 18:18:50 sandstro Exp $
+#
+# TEST test065
+# TEST Test of DB->stat(DB_FASTSTAT)
+proc test065 { method args } {
+ source ./include.tcl
+ global errorCode
+ global alphabet
+
+ set nentries 10000
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+ set tnum 65
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) DB->stat(DB_FAST_STAT) test."
+
+ puts "\tTest0$tnum.a: Create database and check it while empty."
+
+ set db [eval {berkdb_open_noerr -create -mode 0644} \
+ $omethod $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set ret [catch {eval $db stat -faststat} res]
+
+ error_check_good db_close [$db close] 0
+
+ if { ([is_record_based $method] && ![is_queue $method]) \
+ || [is_rbtree $method] } {
+ error_check_good recordcount_ok [is_substr $res \
+ "{{Number of keys} 0}"] 1
+ } else {
+ puts "\tTest0$tnum: Test complete for method $method."
+ return
+ }
+
+ # If we've got this far, we're on an access method for
+ # which record counts makes sense. Thus, we no longer
+ # catch EINVALs, and no longer care about __db_errs.
+ set db [eval {berkdb_open -create -mode 0644} $omethod $args $testfile]
+
+ puts "\tTest0$tnum.b: put $nentries keys."
+
+ if { [is_record_based $method] } {
+ set gflags " -recno "
+ set keypfx ""
+ } else {
+ set gflags ""
+ set keypfx "key"
+ }
+
+ set txn ""
+ set data [pad_data $method $alphabet]
+
+ for { set ndx 1 } { $ndx <= $nentries } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ error_check_good recordcount_after_puts \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+
+ puts "\tTest0$tnum.c: delete 90% of keys."
+ set end [expr {$nentries / 10 * 9}]
+ for { set ndx 1 } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ if { [is_rrecno $method] == 1 } {
+ # if we're renumbering, when we hit key 5001 we'll
+ # have deleted 5000 and we'll croak! So delete key
+ # 1, repeatedly.
+ set ret [eval {$db del} $txn {[concat $keypfx 1]}]
+ } else {
+ set ret [eval {$db del} $txn {$keypfx$ndx}]
+ }
+ error_check_good db_del $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 || [is_rbtree $method] == 1 } {
+ # We allow renumbering--thus the stat should return 10%
+ # of nentries.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10}]}"] 1
+ } else {
+ # No renumbering--no change in RECORDCOUNT!
+ error_check_good recordcount_after_dels \
+ [is_substr $ret "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest0$tnum.d: put new keys at the beginning."
+ set end [expr {$nentries / 10 * 8}]
+ for { set ndx 1 } { $ndx <= $end } {incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_beginning $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rrecno $method] == 1 } {
+ # With renumbering we're back up to 80% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 8}]}"] 1
+ } elseif { [is_rbtree $method] == 1 } {
+ # Total records in a btree is now 90% of $nentries
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} [expr {$nentries / 10 * 9}]}"] 1
+ } else {
+ # No renumbering--still no change in RECORDCOUNT.
+ error_check_good recordcount_after_dels [is_substr $ret \
+ "{{Number of keys} $nentries}"] 1
+ }
+
+ puts "\tTest0$tnum.e: put new keys at the end."
+ set start [expr {1 + $nentries / 10 * 9}]
+ set end [expr {($nentries / 10 * 9) + ($nentries / 10 * 8)}]
+ for { set ndx $start } { $ndx <= $end } { incr ndx } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$keypfx$ndx $data}]
+ error_check_good db_put_end $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [$db stat -faststat]
+ if { [is_rbtree $method] != 1 } {
+ # If this is a recno database, the record count should be up
+ # to (1.7 x nentries), the largest number we've seen, with
+ # or without renumbering.
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start - 1 + $nentries / 10 * 8}]}"] 1
+ } else {
+ # In an rbtree, 1000 of those keys were overwrites, so there
+ # are (.7 x nentries) new keys and (.9 x nentries) old keys
+ # for a total of (1.6 x nentries).
+ error_check_good recordcount_after_puts2 [is_substr $ret \
+ "{{Number of keys} [expr {$start -1 + $nentries / 10 * 7}]}"] 1
+ }
+
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test066.tcl b/storage/bdb/test/test066.tcl
new file mode 100644
index 00000000000..13d0894dcae
--- /dev/null
+++ b/storage/bdb/test/test066.tcl
@@ -0,0 +1,99 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test066.tcl,v 11.12 2002/05/24 15:24:56 sue Exp $
+#
+# TEST test066
+# TEST Test of cursor overwrites of DB_CURRENT w/ duplicates.
+# TEST
+# TEST Make sure a cursor put to DB_CURRENT acts as an overwrite in a
+# TEST database with duplicates.
+proc test066 { method args } {
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set tnum 66
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Test0$tnum: Skipping for method $method."
+ return
+ }
+
+ puts "Test0$tnum: Test of cursor put to DB_CURRENT with duplicates."
+
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test066.db
+ set env NULL
+ } else {
+ set testfile test066.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set txn ""
+ set key "test"
+ set data "olddata"
+
+ set db [eval {berkdb_open -create -mode 0644 -dup} $omethod $args \
+ $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key [chop_data $method $data]}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set ret [$dbc get -first]
+ error_check_good db_get $ret [list [list $key [pad_data $method $data]]]
+
+ set newdata "newdata"
+ set ret [$dbc put -current [chop_data $method $newdata]]
+ error_check_good dbc_put $ret 0
+
+ # There should be only one (key,data) pair in the database, and this
+ # is it.
+ set ret [$dbc get -first]
+ error_check_good db_get_first $ret \
+ [list [list $key [pad_data $method $newdata]]]
+
+ # and this one should come up empty.
+ set ret [$dbc get -next]
+ error_check_good db_get_next $ret ""
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum: Test completed successfully."
+}
diff --git a/storage/bdb/test/test067.tcl b/storage/bdb/test/test067.tcl
new file mode 100644
index 00000000000..5f5a88c4be1
--- /dev/null
+++ b/storage/bdb/test/test067.tcl
@@ -0,0 +1,155 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test067.tcl,v 11.19 2002/06/11 15:19:16 sue Exp $
+#
+# TEST test067
+# TEST Test of DB_CURRENT partial puts onto almost empty duplicate
+# TEST pages, with and without DB_DUP_SORT.
+# TEST
+# TEST Test of DB_CURRENT partial puts on almost-empty duplicate pages.
+# TEST This test was written to address the following issue, #2 in the
+# TEST list of issues relating to bug #0820:
+# TEST
+# TEST 2. DBcursor->put, DB_CURRENT flag, off-page duplicates, hash and btree:
+# TEST In Btree, the DB_CURRENT overwrite of off-page duplicate records
+# TEST first deletes the record and then puts the new one -- this could
+# TEST be a problem if the removal of the record causes a reverse split.
+# TEST Suggested solution is to acquire a cursor to lock down the current
+# TEST record, put a new record after that record, and then delete using
+# TEST the held cursor.
+# TEST
+# TEST It also tests the following, #5 in the same list of issues:
+# TEST 5. DBcursor->put, DB_AFTER/DB_BEFORE/DB_CURRENT flags, DB_DBT_PARTIAL
+# TEST set, duplicate comparison routine specified.
+# TEST The partial change does not change how data items sort, but the
+# TEST record to be put isn't built yet, and that record supplied is the
+# TEST one that's checked for ordering compatibility.
+proc test067 { method {ndups 1000} {tnum 67} args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+ set txn ""
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndups == 1000 } {
+ set ndups 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Partial puts on near-empty duplicate pages."
+
+ foreach dupopt { "-dup" "-dup -dupsort" } {
+ #
+ # Testdir might get reset from the env's home dir back
+ # to the default if this calls something that sources
+ # include.tcl, since testdir is a global. Set it correctly
+ # here each time through the loop.
+ #
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a ($dupopt): Put $ndups duplicates."
+
+ set key "key_test$tnum"
+
+ for { set ndx 0 } { $ndx < $ndups } { incr ndx } {
+ set data $alphabet$ndx
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ # No need for pad_data since we're skipping recno.
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($key,$data) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # Sync so we can inspect database if the next section bombs.
+ error_check_good db_sync [$db sync] 0
+ puts "\tTest0$tnum.b ($dupopt):\
+ Deleting dups (last first), overwriting each."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_create [is_valid_cursor $dbc $db] TRUE
+
+ set count 0
+ while { $count < $ndups - 1 } {
+ # set cursor to last item in db
+ set ret [$dbc get -last]
+ error_check_good \
+ verify_key [lindex [lindex $ret 0] 0] $key
+
+ # for error reporting
+ set currdatum [lindex [lindex $ret 0] 1]
+
+ # partial-overwrite it
+ # (overwrite offsets 1-4 with "bcde"--which they
+ # already are)
+
+ # Even though we expect success, we catch this
+ # since it might return EINVAL, and we want that
+ # to FAIL.
+ set errorCode NONE
+ set ret [catch {eval $dbc put -current \
+ {-partial [list 1 4]} "bcde"} \
+ res]
+ error_check_good \
+ partial_put_valid($currdatum) $errorCode NONE
+ error_check_good partial_put($currdatum) $res 0
+
+ # delete it
+ error_check_good dbc_del [$dbc del] 0
+
+ #puts $currdatum
+
+ incr count
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test068.tcl b/storage/bdb/test/test068.tcl
new file mode 100644
index 00000000000..31f4272ba55
--- /dev/null
+++ b/storage/bdb/test/test068.tcl
@@ -0,0 +1,226 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test068.tcl,v 11.17 2002/06/11 15:34:47 sue Exp $
+#
+# TEST test068
+# TEST Test of DB_BEFORE and DB_AFTER with partial puts.
+# TEST Make sure DB_BEFORE and DB_AFTER work properly with partial puts, and
+# TEST check that they return EINVAL if DB_DUPSORT is set or if DB_DUP is not.
+proc test068 { method args } {
+ source ./include.tcl
+ global alphabet
+ global errorCode
+
+ set tnum 68
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set nkeys 1000
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ set nkeys 100
+ }
+ set testdir [get_home $env]
+ }
+
+ puts "Test0$tnum:\
+ $method ($args) Test of DB_BEFORE/DB_AFTER and partial puts."
+ if { [is_record_based $method] == 1 } {
+ puts "\tTest0$tnum: skipping for method $method."
+ return
+ }
+
+ # Create a list of $nkeys words to insert into db.
+ puts "\tTest0$tnum.a: Initialize word list."
+ set txn ""
+ set wordlist {}
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nkeys } {
+ lappend wordlist $str
+ incr count
+ }
+ close $did
+
+ # Sanity check: did we get $nkeys words?
+ error_check_good enough_keys [llength $wordlist] $nkeys
+
+ # rbtree can't handle dups, so just test the non-dup case
+ # if it's the current method.
+ if { [is_rbtree $method] == 1 } {
+ set dupoptlist { "" }
+ } else {
+ set dupoptlist { "" "-dup" "-dup -dupsort" }
+ }
+
+ foreach dupopt $dupoptlist {
+ #
+ # Testdir might be reset in the loop by some proc sourcing
+ # include.tcl. Reset it to the env's home here, before
+ # cleanup.
+ if { $env != "NULL" } {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open_noerr -create -mode 0644 \
+ $omethod} $args $dupopt {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.b ($dupopt): DB initialization: put loop."
+ foreach word $wordlist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$word $word}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.c ($dupopt): get loop."
+ foreach word $wordlist {
+ # Make sure that the Nth word has been correctly
+ # inserted, and also that the Nth word is the
+ # Nth one we pull out of the database using a cursor.
+
+ set dbt [$db get $word]
+ error_check_good get_key [list [list $word $word]] $dbt
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good cursor_open [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest0$tnum.d ($dupopt): DBC->put w/ DB_AFTER."
+
+ # Set cursor to the first key; make sure it succeeds.
+ # With an unsorted wordlist, we can't be sure that the
+ # first item returned will equal the first item in the
+ # wordlist, so we just make sure it got something back.
+ set dbt [eval {$dbc get -first}]
+ error_check_good \
+ dbc_get_first [llength $dbt] 1
+
+ # If -dup is not set, or if -dupsort is set too, we
+ # need to verify that DB_BEFORE and DB_AFTER fail
+ # and then move on to the next $dupopt.
+ if { $dupopt != "-dup" } {
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -after \
+ {-partial [list 6 0]} "after"} res]
+ error_check_good dbc_put_after_fail $ret 1
+ error_check_good dbc_put_after_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_AFTER returns EINVAL."
+ set errorCode "NONE"
+ set ret [catch {eval $dbc put -before \
+ {-partial [list 6 0]} "before"} res]
+ error_check_good dbc_put_before_fail $ret 1
+ error_check_good dbc_put_before_einval \
+ [is_substr $errorCode EINVAL] 1
+ puts "\tTest0$tnum ($dupopt): DB_BEFORE returns EINVAL."
+ puts "\tTest0$tnum ($dupopt): Correct error returns,\
+ skipping further test."
+ # continue with broad foreach
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ continue
+ }
+
+ puts "\tTest0$tnum.e ($dupopt): DBC->put(DB_AFTER) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put after it
+ set ret [$dbc put -after -partial {4 0} after]
+ error_check_good dbc_put_after $ret 0
+ }
+
+ puts "\tTest0$tnum.f ($dupopt): DBC->put(DB_BEFORE) loop."
+ foreach word $wordlist {
+ # set cursor to $word
+ set dbt [$dbc get -set $word]
+ error_check_good \
+ dbc_get_set $dbt [list [list $word $word]]
+ # put before it
+ set ret [$dbc put -before -partial {6 0} before]
+ error_check_good dbc_put_before $ret 0
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ eval $db sync
+ puts "\tTest0$tnum.g ($dupopt): Verify correctness."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # loop through the whole db beginning to end,
+ # make sure we have, in order, {$word "\0\0\0\0\0\0before"},
+ # {$word $word}, {$word "\0\0\0\0after"} for each word.
+ set count 0
+ while { $count < $nkeys } {
+ # Get the first item of each set of three.
+ # We don't know what the word is, but set $word to
+ # the key and check that the data is
+ # "\0\0\0\0\0\0before".
+ set dbt [$dbc get -next]
+ set word [lindex [lindex $dbt 0] 0]
+
+ error_check_good dbc_get_one $dbt \
+ [list [list $word "\0\0\0\0\0\0before"]]
+
+ set dbt [$dbc get -next]
+ error_check_good \
+ dbc_get_two $dbt [list [list $word $word]]
+
+ set dbt [$dbc get -next]
+ error_check_good dbc_get_three $dbt \
+ [list [list $word "\0\0\0\0after"]]
+
+ incr count
+ }
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test069.tcl b/storage/bdb/test/test069.tcl
new file mode 100644
index 00000000000..d986c861358
--- /dev/null
+++ b/storage/bdb/test/test069.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test069.tcl,v 11.7 2002/01/11 15:53:52 bostic Exp $
+#
+# TEST test069
+# TEST Test of DB_CURRENT partial puts without duplicates-- test067 w/
+# TEST small ndups to ensure that partial puts to DB_CURRENT work
+# TEST correctly in the absence of duplicate pages.
+proc test069 { method {ndups 50} {tnum 69} args } {
+ eval test067 $method $ndups $tnum $args
+}
diff --git a/storage/bdb/test/test070.tcl b/storage/bdb/test/test070.tcl
new file mode 100644
index 00000000000..986fd079589
--- /dev/null
+++ b/storage/bdb/test/test070.tcl
@@ -0,0 +1,142 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test070.tcl,v 11.27 2002/09/05 17:23:07 sandstro Exp $
+#
+# TEST test070
+# TEST Test of DB_CONSUME (Four consumers, 1000 items.)
+# TEST
+# TEST Fork off six processes, four consumers and two producers.
+# TEST The producers will each put 20000 records into a queue;
+# TEST the consumers will each get 10000.
+# TEST Then, verify that no record was lost or retrieved twice.
+proc test070 { method {nconsumers 4} {nproducers 2} \
+ {nitems 1000} {mode CONSUME } {start 0} {txn -txn} {tnum 70} args } {
+ source ./include.tcl
+ global alphabet
+ global encrypt
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test0$tnum skipping for env $env"
+ return
+ }
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ if { $encrypt != 0 } {
+ puts "Test0$tnum skipping for security"
+ return
+ }
+
+ puts "Test0$tnum: $method ($args) Test of DB_$mode flag to DB->get."
+ puts "\tUsing $txn environment."
+
+ error_check_good enough_consumers [expr $nconsumers > 0] 1
+ error_check_good enough_producers [expr $nproducers > 0] 1
+
+ if { [is_queue $method] != 1 } {
+ puts "\tSkipping Test0$tnum for method $method."
+ return
+ }
+
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+
+ # Create environment
+ set dbenv [eval {berkdb_env -create $txn -home } $testdir]
+ error_check_good dbenv_create [is_valid_env $dbenv] TRUE
+
+ # Create database
+ set db [eval {berkdb_open -create -mode 0644 -queue}\
+ -env $dbenv $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ if { $start != 0 } {
+ error_check_good set_seed [$db put $start "consumer data"] 0
+ puts "\tStarting at $start."
+ } else {
+ incr start
+ }
+
+ set pidlist {}
+
+ # Divvy up the total number of records amongst the consumers and
+ # producers.
+ error_check_good cons_div_evenly [expr $nitems % $nconsumers] 0
+ error_check_good prod_div_evenly [expr $nitems % $nproducers] 0
+ set nperconsumer [expr $nitems / $nconsumers]
+ set nperproducer [expr $nitems / $nproducers]
+
+ set consumerlog $testdir/CONSUMERLOG.
+
+ # Fork consumer processes (we want them to be hungry)
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set output $consumerlog$ndx
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.consumer$ndx \
+ $testdir $testfile $mode $nperconsumer $output $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+ for { set ndx 0 } { $ndx < $nproducers } { incr ndx } {
+ set p [exec $tclsh_path $test_path/wrap.tcl \
+ conscript.tcl $testdir/conscript.log.producer$ndx \
+ $testdir $testfile PRODUCE $nperproducer "" $tnum \
+ $args &]
+ lappend pidlist $p
+ }
+
+ # Wait for all children.
+ watch_procs $pidlist 10
+
+ # Verify: slurp all record numbers into list, sort, and make
+ # sure each appears exactly once.
+ puts "\tTest0$tnum: Verifying results."
+ set reclist {}
+ for { set ndx 0 } { $ndx < $nconsumers } { incr ndx } {
+ set input $consumerlog$ndx
+ set iid [open $input r]
+ while { [gets $iid str] != -1 } {
+ # Convert high ints to negative ints, to
+ # simulate Tcl's behavior on a 32-bit machine
+ # even if we're on a 64-bit one.
+ if { $str > 0x7fffffff } {
+ set str [expr $str - 1 - 0xffffffff]
+ }
+ lappend reclist $str
+ }
+ close $iid
+ }
+ set sortreclist [lsort -integer $reclist]
+
+ set nitems [expr $start + $nitems]
+ for { set ndx $start } { $ndx < $nitems } { incr ndx } {
+ # Convert high ints to negative ints, to simulate
+ # 32-bit behavior on 64-bit platforms.
+ if { $ndx > 0x7fffffff } {
+ set cmp [expr $ndx - 1 - 0xffffffff]
+ } else {
+ set cmp [expr $ndx + 0]
+ }
+ # Skip 0 if we are wrapping around
+ if { $cmp == 0 } {
+ incr ndx
+ incr nitems
+ incr cmp
+ }
+ # Be sure to convert ndx to a number before comparing.
+ error_check_good pop_num [lindex $sortreclist 0] $cmp
+ set sortreclist [lreplace $sortreclist 0 0]
+ }
+ error_check_good list_ends_empty $sortreclist {}
+ error_check_good db_close [$db close] 0
+ error_check_good dbenv_close [$dbenv close] 0
+
+ puts "\tTest0$tnum completed successfully."
+}
diff --git a/storage/bdb/test/test071.tcl b/storage/bdb/test/test071.tcl
new file mode 100644
index 00000000000..3f2604022f1
--- /dev/null
+++ b/storage/bdb/test/test071.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test071.tcl,v 11.9 2002/01/11 15:53:53 bostic Exp $
+#
+# TEST test071
+# TEST Test of DB_CONSUME (One consumer, 10000 items.)
+# TEST This is DB Test 70, with one consumer, one producers, and 10000 items.
+proc test071 { method {nconsumers 1} {nproducers 1}\
+ {nitems 10000} {mode CONSUME} {start 0 } {txn -txn} {tnum 71} args } {
+
+ eval test070 $method \
+ $nconsumers $nproducers $nitems $mode $start $txn $tnum $args
+}
diff --git a/storage/bdb/test/test072.tcl b/storage/bdb/test/test072.tcl
new file mode 100644
index 00000000000..3c08f93975d
--- /dev/null
+++ b/storage/bdb/test/test072.tcl
@@ -0,0 +1,252 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test072.tcl,v 11.27 2002/07/01 15:40:48 krinsky Exp $
+#
+# TEST test072
+# TEST Test of cursor stability when duplicates are moved off-page.
+proc test072 { method {pagesize 512} {ndups 20} {tnum 72} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "a key"
+ set key "the key"
+ set postkey "z key"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set postdatum "0987654321"
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "\n Test of cursor stability when\
+ duplicates are moved off-page."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test0$tnum: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize "
+ set txn ""
+
+ set dlist [list "-dup" "-dup -dupsort"]
+ set testid 0
+ foreach dupopt $dlist {
+ incr testid
+ set duptestfile $testfile$testid
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $args $dupopt {$duptestfile}]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ puts \
+"\tTest0$tnum.a: ($dupopt) Set up surrounding keys and cursors."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$prekey $predatum}]
+ error_check_good pre_put $ret 0
+ set ret [eval {$db put} $txn {$postkey $postdatum}]
+ error_check_good post_put $ret 0
+
+ set precursor [eval {$db cursor} $txn]
+ error_check_good precursor [is_valid_cursor $precursor \
+ $db] TRUE
+ set postcursor [eval {$db cursor} $txn]
+ error_check_good postcursor [is_valid_cursor $postcursor \
+ $db] TRUE
+ error_check_good preset [$precursor get -set $prekey] \
+ [list [list $prekey $predatum]]
+ error_check_good postset [$postcursor get -set $postkey] \
+ [list [list $postkey $postdatum]]
+
+ puts "\tTest0$tnum.b: Put/create cursor/verify all cursor loop."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ puts "\tTest0$tnum.c: Reverse Put/create cursor/verify all cursor loop."
+ set end [expr $ndups * 2 - 1]
+ for { set i $end } { $i >= $ndups } { set i [expr $i - 1] } {
+ set datum [format "%4d$alphabet" [expr $i + 1000]]
+ set data($i) $datum
+
+ # Uncomment these lines to see intermediate steps.
+ # error_check_good db_sync($i) [$db sync] 0
+ # error_check_good db_dump($i) \
+ # [catch {exec $util_path/db_dump \
+ # -da $duptestfile > $testdir/out.$i}] 0
+
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+
+ error_check_bad dbc($i)_stomped [info exists dbc($i)] 1
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ for { set j $i } { $j < $end } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ #puts "cursor $j after $i: $d"
+
+ eval {$db sync}
+
+ error_check_good\
+ "cursor $j key correctness after $i puts" \
+ $k $key
+ error_check_good\
+ "cursor $j data correctness after $i puts" \
+ $d $data($j)
+ }
+
+ # Check correctness of pre- and post- cursors. Do an
+ # error_check_good on the lengths first so that we don't
+ # spew garbage as the "got" field and screw up our
+ # terminal. (It's happened here.)
+ set pre_dbt [$precursor get -current]
+ set post_dbt [$postcursor get -current]
+ error_check_good \
+ "key earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 0]] \
+ [string length $prekey]
+ error_check_good \
+ "data earlier cursor correctness after $i puts" \
+ [string length [lindex [lindex $pre_dbt 0] 1]] \
+ [string length $predatum]
+ error_check_good \
+ "key later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 0]] \
+ [string length $postkey]
+ error_check_good \
+ "data later cursor correctness after $i puts" \
+ [string length [lindex [lindex $post_dbt 0] 1]]\
+ [string length $postdatum]
+
+ error_check_good \
+ "earlier cursor correctness after $i puts" \
+ $pre_dbt [list [list $prekey $predatum]]
+ error_check_good \
+ "later cursor correctness after $i puts" \
+ $post_dbt [list [list $postkey $postdatum]]
+ }
+
+ # Close cursors.
+ puts "\tTest0$tnum.d: Closing cursors."
+ for { set i 0 } { $i <= $end } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ unset dbc
+ error_check_good precursor_close [$precursor close] 0
+ error_check_good postcursor_close [$postcursor close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test073.tcl b/storage/bdb/test/test073.tcl
new file mode 100644
index 00000000000..02a0f3b0d19
--- /dev/null
+++ b/storage/bdb/test/test073.tcl
@@ -0,0 +1,290 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test073.tcl,v 11.23 2002/05/22 15:42:59 sue Exp $
+#
+# TEST test073
+# TEST Test of cursor stability on duplicate pages.
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test073 { method {pagesize 512} {ndups 50} {tnum 73} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set key "the key"
+ set txn ""
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "cursor stability on duplicate pages."
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test073: skipping for specific pagesizes"
+ return
+ }
+
+ append args " -pagesize $pagesize -dup"
+
+ set db [eval {berkdb_open \
+ -create -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys 0
+
+ puts "\tTest0$tnum.a.1: Initializing put loop; $ndups dups, short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $datum}]
+ error_check_good "db put ($i)" $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set is_long($i) 0
+ incr keys
+ }
+
+ puts "\tTest0$tnum.a.2: Initializing cursor get loop; $keys dups."
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ for { set i 0 } { $i < $keys } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set dbc($i) [eval {$db cursor} $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ verify_t73 is_long dbc $keys $key
+ }
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ set dbc($keys) $curs
+ set is_long($keys) 0
+ incr keys
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [eval {$db cursor} $txn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ error_check_good "cursor close" [$curs close] 0
+
+ set is_long($i) 1
+
+ if { $i % 10 == 1 } {
+ verify_t73 is_long dbc $keys $key
+ }
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.g: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+}
+
+# !!!: This procedure is also used by test087.
+proc makedatum_t73 { num is_long } {
+ global alphabet
+ if { $is_long == 1 } {
+ set a $alphabet$alphabet$alphabet
+ } else {
+ set a abcdefghijklm
+ }
+
+ # format won't do leading zeros, alas.
+ if { $num / 1000 > 0 } {
+ set i $num
+ } elseif { $num / 100 > 0 } {
+ set i 0$num
+ } elseif { $num / 10 > 0 } {
+ set i 00$num
+ } else {
+ set i 000$num
+ }
+
+ return $i$a
+}
+
+# !!!: This procedure is also used by test087.
+proc verify_t73 { is_long_array curs_array numkeys key } {
+ upvar $is_long_array is_long
+ upvar $curs_array dbc
+ upvar db db
+
+ #useful for debugging, perhaps.
+ eval $db sync
+
+ for { set j 0 } { $j < $numkeys } { incr j } {
+ set dbt [$dbc($j) get -current]
+ set k [lindex [lindex $dbt 0] 0]
+ set d [lindex [lindex $dbt 0] 1]
+
+ error_check_good\
+ "cursor $j key correctness (with $numkeys total items)"\
+ $k $key
+ error_check_good\
+ "cursor $j data correctness (with $numkeys total items)"\
+ $d [makedatum_t73 $j $is_long($j)]
+ }
+}
diff --git a/storage/bdb/test/test074.tcl b/storage/bdb/test/test074.tcl
new file mode 100644
index 00000000000..7f620db2d97
--- /dev/null
+++ b/storage/bdb/test/test074.tcl
@@ -0,0 +1,271 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test074.tcl,v 11.17 2002/05/24 15:24:56 sue Exp $
+#
+# TEST test074
+# TEST Test of DB_NEXT_NODUP.
+proc test074 { method {dir -nextnodup} {nitems 100} {tnum 74} args } {
+ source ./include.tcl
+ global alphabet
+ global rand_init
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ berkdb srand $rand_init
+
+ # Data prefix--big enough that we get a mix of on-page, off-page,
+ # and multi-off-page dups with the default nitems
+ if { [is_fixed_length $method] == 1 } {
+ set globaldata "somedata"
+ } else {
+ set globaldata [repeat $alphabet 4]
+ }
+
+ puts "Test0$tnum $omethod ($args): Test of $dir"
+
+ # First, test non-dup (and not-very-interesting) case with
+ # all db types.
+
+ puts "\tTest0$tnum.a: No duplicates."
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-nodup.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-nodup.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} $omethod\
+ $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ # Insert nitems items.
+ puts "\t\tTest0$tnum.a.1: Put loop."
+ for {set i 1} {$i <= $nitems} {incr i} {
+ #
+ # If record based, set key to $i * 2 to leave
+ # holes/unused entries for further testing.
+ #
+ if {[is_record_based $method] == 1} {
+ set key [expr $i * 2]
+ } else {
+ set key "key$i"
+ }
+ set data "$globaldata$i"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key \
+ [chop_data $method $data]}]
+ error_check_good put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\t\tTest0$tnum.a.2: Get($dir)"
+
+ # foundarray($i) is set when key number i is found in the database
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ # Initialize foundarray($i) to zero for all $i
+ for {set i 1} {$i < $nitems} {incr i} {
+ set foundarray($i) 0
+ }
+
+ # Walk database using $dir and record each key gotten.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ if {[is_record_based $method] == 1} {
+ set num [expr $key / 2]
+ set desired_key $key
+ error_check_good $method:num $key [expr $num * 2]
+ } else {
+ set num [string range $key 3 end]
+ set desired_key key$num
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ [pad_data $method $globaldata$num]]]
+
+ set foundarray($num) 1
+ }
+
+ puts "\t\tTest0$tnum.a.3: Final key."
+ error_check_good last_db_get [$dbc get $dir] [list]
+
+ puts "\t\tTest0$tnum.a.4: Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close(nodup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ # If we are a method that doesn't allow dups, verify that
+ # we get an empty list if we try to use DB_NEXT_DUP
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ puts "\t\tTest0$tnum.a.5: Check DB_NEXT_DUP for $method."
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ set dbt [$dbc get $dir]
+ error_check_good $method:nextdup [$dbc get -nextdup] [list]
+ error_check_good dbc_close(nextdup) [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ error_check_good db_close(nodup) [$db close] 0
+
+ # Quit here if we're a method that won't allow dups.
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "\tTest0$tnum: Skipping remainder for method $method."
+ return
+ }
+
+ foreach opt { "-dup" "-dupsort" } {
+
+ #
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum$opt.db
+ } else {
+ set testfile test0$tnum$opt.db
+ }
+
+ if { [string compare $opt "-dupsort"] == 0 } {
+ set opt "-dup -dupsort"
+ }
+
+ puts "\tTest0$tnum.b: Duplicates ($opt)."
+
+ puts "\t\tTest0$tnum.b.1 ($opt): Put loop."
+ set db [eval {berkdb_open -create -mode 0644}\
+ $opt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Insert nitems different keys such that key i has i dups.
+ for {set i 1} {$i <= $nitems} {incr i} {
+ set key key$i
+
+ for {set j 1} {$j <= $i} {incr j} {
+ if { $j < 10 } {
+ set data "${globaldata}00$j"
+ } elseif { $j < 100 } {
+ set data "${globaldata}0$j"
+ } else {
+ set data "$globaldata$j"
+ }
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good put($i,$j) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ # Initialize foundarray($i) to 0 for all i.
+ unset foundarray
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set foundarray($i) 0
+ }
+
+ # Get loop--after each get, move forward a random increment
+ # within the duplicate set.
+ puts "\t\tTest0$tnum.b.2 ($opt): Get loop."
+ set one "001"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc($opt) [is_valid_cursor $dbc $db] TRUE
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ set dbt [$dbc get $dir]
+ set key [lindex [lindex $dbt 0] 0]
+ set num [string range $key 3 end]
+
+ set desired_key key$num
+ if { [string compare $dir "-prevnodup"] == 0 } {
+ if { $num < 10 } {
+ set one "00$num"
+ } elseif { $num < 100 } {
+ set one "0$num"
+ } else {
+ set one $num
+ }
+ }
+
+ error_check_good dbt_correct($i) $dbt\
+ [list [list $desired_key\
+ "$globaldata$one"]]
+
+ set foundarray($num) 1
+
+ # Go forward by some number w/i dup set.
+ set inc [berkdb random_int 0 [expr $num - 1]]
+ for { set j 0 } { $j < $inc } { incr j } {
+ eval {$dbc get -nextdup}
+ }
+ }
+
+ puts "\t\tTest0$tnum.b.3 ($opt): Final key."
+ error_check_good last_db_get($opt) [$dbc get $dir] [list]
+
+ # Verify
+ puts "\t\tTest0$tnum.b.4 ($opt): Verify loop."
+ for { set i 1 } { $i <= $nitems } { incr i } {
+ error_check_good found_key($i) $foundarray($i) 1
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test075.tcl b/storage/bdb/test/test075.tcl
new file mode 100644
index 00000000000..540d8f0ed73
--- /dev/null
+++ b/storage/bdb/test/test075.tcl
@@ -0,0 +1,205 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test075.tcl,v 11.21 2002/08/08 15:38:11 bostic Exp $
+#
+# TEST test075
+# TEST Test of DB->rename().
+# TEST (formerly test of DB_TRUNCATE cached page invalidation [#1487])
+proc test075 { method { tnum 75 } args } {
+ global encrypt
+ global errorCode
+ global errorInfo
+
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: $method ($args): Test of DB->rename()"
+ # If we are using an env, then testfile should just be the
+ # db name. Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Skipping test075 for env $env"
+ return
+ }
+ if { $encrypt != 0 } {
+ puts "Skipping test075 for security"
+ return
+ }
+
+ # Define absolute pathnames
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+ set reldir $testdir
+
+ # Set up absolute and relative pathnames for test
+ set paths [list $fulldir $reldir]
+ foreach path $paths {
+ puts "\tTest0$tnum: starting test of $path path"
+ set oldfile $path/test0$tnum-old.db
+ set newfile $path/test0$tnum.db
+ set env NULL
+ set envargs ""
+
+ # Loop through test using the following rename options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ puts "\tTest0$tnum.a: Create/rename file with $op"
+
+ # Make sure we're starting with a clean slate.
+
+ if { $op == "noenv" } {
+ cleanup $path $env
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+ }
+ }
+
+ if { $op == "env" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ if { $op == "auto" || $op == "commit" || $op == "abort" } {
+ env_cleanup $path
+ set env [berkdb_env -create -home $path -txn]
+ set envargs "-env $env"
+ error_check_good env_open [is_valid_env $env] TRUE
+ }
+
+ puts "\t\tTest0$tnum.a.1: create"
+ set db [eval {berkdb_open -create -mode 0644} \
+ $omethod $envargs $args $oldfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+
+ puts "\t\tTest0$tnum.a.2: rename"
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 0
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 1
+ }
+
+ # Regular renames use berkdb dbrename but transaction
+ # protected renames must use $env dbrename.
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good rename_file [eval {berkdb dbrename} \
+ $envargs $oldfile $newfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good rename_file [eval {$env dbrename} \
+ -auto_commit $oldfile $newfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$env txn]
+ error_check_good rename_file [eval {$env dbrename} \
+ -txn $txn $oldfile $newfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ if { $env == "NULL" } {
+ error_check_bad \
+ "$oldfile exists" [file exists $oldfile] 1
+ error_check_bad \
+ "$newfile exists" [file exists $newfile] 0
+ }
+
+ puts "\t\tTest0$tnum.a.3: check"
+ # Open again with create to make sure we're not caching or
+ # anything silly. In the normal case (no env), we already
+ # know the file doesn't exist.
+ set odb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $oldfile]
+ set ndb [eval {berkdb_open -create -mode 0644} \
+ $envargs $omethod $args $newfile]
+ error_check_good odb_open [is_valid_db $odb] TRUE
+ error_check_good ndb_open [is_valid_db $ndb] TRUE
+
+ # The DBT from the "old" database should be empty,
+ # not the "new" one, except in the case of an abort.
+ set odbt [$odb get $key]
+ if { $op == "abort" } {
+ error_check_good odbt_has_data [llength $odbt] 1
+ } else {
+ set ndbt [$ndb get $key]
+ error_check_good odbt_empty [llength $odbt] 0
+ error_check_bad ndbt_empty [llength $ndbt] 0
+ error_check_good ndbt [lindex \
+ [lindex $ndbt 0] 1] $data
+ }
+ error_check_good odb_close [$odb close] 0
+ error_check_good ndb_close [$ndb close] 0
+
+ # Now there's both an old and a new. Rename the
+ # "new" to the "old" and make sure that fails.
+ #
+ # XXX Ideally we'd do this test even when there's
+ # an external environment, but that env has
+ # errpfx/errfile set now. :-(
+ puts "\tTest0$tnum.b: Make sure rename fails\
+ instead of overwriting"
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ set env [berkdb_env_noerr -home $path]
+ error_check_good env_open2 \
+ [is_valid_env $env] TRUE
+ set ret [catch {eval {berkdb dbrename} \
+ -env $env $newfile $oldfile} res]
+ error_check_bad rename_overwrite $ret 0
+ error_check_good rename_overwrite_ret \
+ [is_substr $errorCode EEXIST] 1
+ }
+
+ # Verify and then start over from a clean slate.
+ verify_dir $path "\tTest0$tnum.c: "
+ cleanup $path $env
+ if { $env != "NULL" } {
+ error_check_good env_close [$env close] 0
+ }
+ if { $env == "NULL" } {
+ error_check_bad "$oldfile exists" \
+ [file exists $oldfile] 1
+ error_check_bad "$newfile exists" \
+ [file exists $newfile] 1
+
+ set oldfile test0$tnum-old.db
+ set newfile test0$tnum.db
+ }
+ }
+ }
+}
diff --git a/storage/bdb/test/test076.tcl b/storage/bdb/test/test076.tcl
new file mode 100644
index 00000000000..9f7b1ed2972
--- /dev/null
+++ b/storage/bdb/test/test076.tcl
@@ -0,0 +1,80 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test076.tcl,v 1.18 2002/07/08 20:16:31 sue Exp $
+#
+# TEST test076
+# TEST Test creation of many small databases in a single environment. [#1528].
+proc test076 { method { ndbs 1000 } { tnum 76 } args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ if { [is_record_based $method] == 1 } {
+ set key ""
+ } else {
+ set key "key"
+ }
+ set data "datamoredatamoredata"
+
+ # Create an env if we weren't passed one.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set deleteenv 1
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -home} $testdir $encargs]
+ error_check_good env [is_valid_env $env] TRUE
+ set args "$args -env $env"
+ } else {
+ set deleteenv 0
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ if { $ndbs == 1000 } {
+ set ndbs 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ puts -nonewline "Test0$tnum $method ($args): "
+ puts -nonewline "Create $ndbs"
+ puts " small databases in one env."
+
+ cleanup $testdir $env
+ set txn ""
+
+ for { set i 1 } { $i <= $ndbs } { incr i } {
+ set testfile test0$tnum.$i.db
+
+ set db [eval {berkdb_open -create -mode 0644}\
+ $args $omethod $testfile]
+ error_check_good db_open($i) [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key$i \
+ [chop_data $method $data$i]}]
+ error_check_good db_put($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close($i) [$db close] 0
+ }
+
+ if { $deleteenv == 1 } {
+ error_check_good env_close [$env close] 0
+ }
+
+ puts "\tTest0$tnum passed."
+}
diff --git a/storage/bdb/test/test077.tcl b/storage/bdb/test/test077.tcl
new file mode 100644
index 00000000000..99cf432af20
--- /dev/null
+++ b/storage/bdb/test/test077.tcl
@@ -0,0 +1,93 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test077.tcl,v 1.10 2002/05/24 15:24:57 sue Exp $
+#
+# TEST test077
+# TEST Test of DB_GET_RECNO [#1206].
+proc test077 { method { nkeys 1000 } { pagesize 512 } { tnum 77 } args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test0$tnum: Test of DB_GET_RECNO."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest0$tnum: Skipping for method $method."
+ return
+ }
+
+ set data $alphabet
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ puts "\tTest0$tnum.a: Populating database."
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ set key [format %5d $i]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$key $data}]
+ error_check_good db_put($key) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ puts "\tTest0$tnum.b: Verifying record numbers."
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc_open [is_valid_cursor $dbc $db] TRUE
+
+ set i 1
+ for { set dbt [$dbc get -first] } \
+ { [string length $dbt] != 0 } \
+ { set dbt [$dbc get -next] } {
+ set recno [$dbc get -get_recno]
+ set keynum [expr [lindex [lindex $dbt 0] 0]]
+
+ # Verify that i, the number that is the key, and recno
+ # are all equal.
+ error_check_good key($i) $keynum $i
+ error_check_good recno($i) $recno $i
+ incr i
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test078.tcl b/storage/bdb/test/test078.tcl
new file mode 100644
index 00000000000..45a1d46466e
--- /dev/null
+++ b/storage/bdb/test/test078.tcl
@@ -0,0 +1,130 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test078.tcl,v 1.18 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test078
+# TEST Test of DBC->c_count(). [#303]
+proc test078 { method { nkeys 100 } { pagesize 512 } { tnum 78 } args } {
+ source ./include.tcl
+ global alphabet rand_init
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of key counts."
+
+ berkdb srand $rand_init
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ }
+
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest0$tnum.a: No duplicates, trivial answer."
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test078: skipping for specific pagesizes"
+ return
+ }
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set txn ""
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $alphabet$i]}]
+ error_check_good put.a($i) $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good count.a [$db count $i] 1
+ }
+ error_check_good db_close.a [$db close] 0
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts \
+ "\tTest0$tnum.b: Duplicates not supported in $method, skipping."
+ return
+ }
+
+ foreach tuple {{b sorted "-dup -dupsort"} {c unsorted "-dup"}} {
+ set letter [lindex $tuple 0]
+ set dupopt [lindex $tuple 2]
+
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $args $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest0$tnum.$letter: Duplicates ([lindex $tuple 1])."
+
+ puts "\t\tTest0$tnum.$letter.1: Populating database."
+
+ set db [eval {berkdb_open -create -mode 0644\
+ -pagesize $pagesize} $dupopt $omethod $args {$testfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ for { set i 1 } { $i <= $nkeys } { incr i } {
+ for { set j 0 } { $j < $i } { incr j } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {$i\
+ [pad_data $method $j$alphabet]}]
+ error_check_good put.$letter,$i $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.2: "
+ puts "Verifying dup counts on first dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+
+ puts -nonewline "\t\tTest0$tnum.$letter.3: "
+ puts "Verifying dup counts on random dup."
+ for { set i 1 } { $i < $nkeys } { incr i } {
+ set key [berkdb random_int 1 $nkeys]
+ error_check_good count.$letter,$i \
+ [$db count $i] $i
+ }
+ error_check_good db_close.$letter [$db close] 0
+ }
+}
diff --git a/storage/bdb/test/test079.tcl b/storage/bdb/test/test079.tcl
new file mode 100644
index 00000000000..70fd4e05090
--- /dev/null
+++ b/storage/bdb/test/test079.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test079.tcl,v 11.8 2002/01/11 15:53:54 bostic Exp $
+#
+# TEST test079
+# TEST Test of deletes in large trees. (test006 w/ sm. pagesize).
+# TEST
+# TEST Check that delete operations work in large btrees. 10000 entries
+# TEST and a pagesize of 512 push this out to a four-level btree, with a
+# TEST small fraction of the entries going on overflow pages.
+proc test079 { method {nentries 10000} {pagesize 512} {tnum 79} args} {
+ if { [ is_queueext $method ] == 1 } {
+ set method "queue";
+ lappend args "-extent" "20"
+ }
+ eval {test006 $method $nentries 1 $tnum -pagesize $pagesize} $args
+}
diff --git a/storage/bdb/test/test080.tcl b/storage/bdb/test/test080.tcl
new file mode 100644
index 00000000000..9f649496f68
--- /dev/null
+++ b/storage/bdb/test/test080.tcl
@@ -0,0 +1,126 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test080.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test080
+# TEST Test of DB->remove()
+proc test080 { method {tnum 80} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum: Test of DB->remove()"
+
+ # Determine full path
+ set curdir [pwd]
+ cd $testdir
+ set fulldir [pwd]
+ cd $curdir
+
+ # Test both relative and absolute path
+ set paths [list $fulldir $testdir]
+
+ # If we are using an env, then skip this test.
+ # It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ if { $encargs != ""} {
+ puts "Skipping test080 for security"
+ return
+ }
+ if { $eindex != -1 } {
+ incr eindex
+ set e [lindex $args $eindex]
+ puts "Skipping test080 for env $e"
+ return
+ }
+
+ foreach path $paths {
+
+ set dbfile test0$tnum.db
+ set testfile $path/$dbfile
+
+ # Loop through test using the following remove options
+ # 1. no environment, not in transaction
+ # 2. with environment, not in transaction
+ # 3. rename with auto-commit
+ # 4. rename in committed transaction
+ # 5. rename in aborted transaction
+
+ foreach op "noenv env auto commit abort" {
+
+ # Make sure we're starting with a clean slate.
+ env_cleanup $testdir
+ if { $op == "noenv" } {
+ set dbfile $testfile
+ set e NULL
+ set envargs ""
+ } else {
+ if { $op == "env" } {
+ set largs ""
+ } else {
+ set largs " -txn"
+ }
+ set e [eval {berkdb_env -create -home $path} $largs]
+ set envargs "-env $e"
+ error_check_good env_open [is_valid_env $e] TRUE
+ }
+
+ puts "\tTest0$tnum: dbremove with $op in $path"
+ puts "\tTest0$tnum.a.1: Create file"
+ set db [eval {berkdb_open -create -mode 0644} $omethod \
+ $envargs $args {$dbfile}]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # The nature of the key and data are unimportant;
+ # use numeric key to record-based methods don't need
+ # special treatment.
+ set key 1
+ set data [pad_data $method data]
+
+ error_check_good dbput [$db put $key $data] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good file_exists_before \
+ [file exists $testfile] 1
+
+ # Use berkdb dbremove for non-transactional tests
+ # and $env dbremove for transactional tests
+ puts "\tTest0$tnum.a.2: Remove file"
+ if { $op == "noenv" || $op == "env" } {
+ error_check_good remove_$op \
+ [eval {berkdb dbremove} $envargs $dbfile] 0
+ } elseif { $op == "auto" } {
+ error_check_good remove_$op \
+ [eval {$e dbremove} -auto_commit $dbfile] 0
+ } else {
+ # $op is "abort" or "commit"
+ set txn [$e txn]
+ error_check_good remove_$op \
+ [eval {$e dbremove} -txn $txn $dbfile] 0
+ error_check_good txn_$op [$txn $op] 0
+ }
+
+ puts "\tTest0$tnum.a.3: Check that file is gone"
+ # File should now be gone, except in the case of an abort.
+ if { $op != "abort" } {
+ error_check_good exists_after \
+ [file exists $testfile] 0
+ } else {
+ error_check_good exists_after \
+ [file exists $testfile] 1
+ }
+
+ if { $e != "NULL" } {
+ error_check_good env_close [$e close] 0
+ }
+
+ set dbfile test0$tnum-old.db
+ set testfile $path/$dbfile
+ }
+ }
+}
diff --git a/storage/bdb/test/test081.tcl b/storage/bdb/test/test081.tcl
new file mode 100644
index 00000000000..37c2b44ac33
--- /dev/null
+++ b/storage/bdb/test/test081.tcl
@@ -0,0 +1,15 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test081.tcl,v 11.6 2002/01/11 15:53:55 bostic Exp $
+#
+# TEST test081
+# TEST Test off-page duplicates and overflow pages together with
+# TEST very large keys (key/data as file contents).
+proc test081 { method {ndups 13} {tnum 81} args} {
+ source ./include.tcl
+
+ eval {test017 $method 1 $ndups $tnum} $args
+}
diff --git a/storage/bdb/test/test082.tcl b/storage/bdb/test/test082.tcl
new file mode 100644
index 00000000000..e8c1fa45a92
--- /dev/null
+++ b/storage/bdb/test/test082.tcl
@@ -0,0 +1,14 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test082.tcl,v 11.5 2002/01/11 15:53:55 bostic Exp $
+#
+# TEST test082
+# TEST Test of DB_PREV_NODUP (uses test074).
+proc test082 { method {dir -prevnodup} {nitems 100} {tnum 82} args} {
+ source ./include.tcl
+
+ eval {test074 $method $dir $nitems $tnum} $args
+}
diff --git a/storage/bdb/test/test083.tcl b/storage/bdb/test/test083.tcl
new file mode 100644
index 00000000000..e4168ee1c43
--- /dev/null
+++ b/storage/bdb/test/test083.tcl
@@ -0,0 +1,162 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test083.tcl,v 11.13 2002/06/24 14:06:38 sue Exp $
+#
+# TEST test083
+# TEST Test of DB->key_range.
+proc test083 { method {pgsz 512} {maxitems 5000} {step 2} args} {
+ source ./include.tcl
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test083 $method ($args): Test of DB->key_range"
+ if { [is_btree $method] != 1 } {
+ puts "\tTest083: Skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test083: skipping for specific pagesizes"
+ return
+ }
+
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex == -1 } {
+ set testfile $testdir/test083.db
+ set env NULL
+ } else {
+ set testfile test083.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ # We assume that numbers will be at most six digits wide
+ error_check_bad maxitems_range [expr $maxitems > 999999] 1
+
+ # We want to test key_range on a variety of sizes of btree.
+ # Start at ten keys and work up to $maxitems keys, at each step
+ # multiplying the number of keys by $step.
+ for { set nitems 10 } { $nitems <= $maxitems }\
+ { set nitems [expr $nitems * $step] } {
+
+ puts "\tTest083.a: Opening new database"
+ if { $env != "NULL"} {
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+ set db [eval {berkdb_open -create -mode 0644} \
+ -pagesize $pgsz $omethod $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ t83_build $db $nitems $env $txnenv
+ t83_test $db $nitems $env $txnenv
+
+ error_check_good db_close [$db close] 0
+ }
+}
+
+proc t83_build { db nitems env txnenv } {
+ source ./include.tcl
+
+ puts "\tTest083.b: Populating database with $nitems keys"
+
+ set keylist {}
+ puts "\t\tTest083.b.1: Generating key list"
+ for { set i 0 } { $i < $nitems } { incr i } {
+ lappend keylist $i
+ }
+
+ # With randomly ordered insertions, the range of errors we
+ # get from key_range can be unpredictably high [#2134]. For now,
+ # just skip the randomization step.
+ #puts "\t\tTest083.b.2: Randomizing key list"
+ #set keylist [randomize_list $keylist]
+ #puts "\t\tTest083.b.3: Populating database with randomized keys"
+
+ puts "\t\tTest083.b.2: Populating database"
+ set data [repeat . 50]
+ set txn ""
+ foreach keynum $keylist {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {key[format %6d $keynum] $data}]
+ error_check_good db_put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+}
+
+proc t83_test { db nitems env txnenv } {
+ # Look at the first key, then at keys about 1/4, 1/2, 3/4, and
+ # all the way through the database. Make sure the key_ranges
+ # aren't off by more than 10%.
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ } else {
+ set txn ""
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good dbc [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest083.c: Verifying ranges..."
+
+ for { set i 0 } { $i < $nitems } \
+ { incr i [expr $nitems / [berkdb random_int 3 16]] } {
+ puts "\t\t...key $i"
+ error_check_bad key0 [llength [set dbt [$dbc get -first]]] 0
+
+ for { set j 0 } { $j < $i } { incr j } {
+ error_check_bad key$j \
+ [llength [set dbt [$dbc get -next]]] 0
+ }
+
+ set ranges [$db keyrange [lindex [lindex $dbt 0] 0]]
+
+ #puts $ranges
+ error_check_good howmanyranges [llength $ranges] 3
+
+ set lessthan [lindex $ranges 0]
+ set morethan [lindex $ranges 2]
+
+ set rangesum [expr $lessthan + [lindex $ranges 1] + $morethan]
+
+ roughly_equal $rangesum 1 0.05
+
+ # Wild guess.
+ if { $nitems < 500 } {
+ set tol 0.3
+ } elseif { $nitems > 500 } {
+ set tol 0.15
+ }
+
+ roughly_equal $lessthan [expr $i * 1.0 / $nitems] $tol
+
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+}
+
+proc roughly_equal { a b tol } {
+ error_check_good "$a =~ $b" [expr $a - $b < $tol] 1
+}
diff --git a/storage/bdb/test/test084.tcl b/storage/bdb/test/test084.tcl
new file mode 100644
index 00000000000..89bc13978b0
--- /dev/null
+++ b/storage/bdb/test/test084.tcl
@@ -0,0 +1,53 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test084.tcl,v 11.11 2002/07/13 18:09:14 margo Exp $
+#
+# TEST test084
+# TEST Basic sanity test (test001) with large (64K) pages.
+proc test084 { method {nentries 10000} {tnum 84} {pagesize 65536} args} {
+ source ./include.tcl
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-empty.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-empty.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test084: skipping for specific pagesizes"
+ return
+ }
+
+ cleanup $testdir $env
+
+ set args "-pagesize $pagesize $args"
+
+ eval {test001 $method $nentries 0 $tnum 0} $args
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ # For good measure, create a second database that's empty
+ # with the large page size. (There was a verifier bug that
+ # choked on empty 64K pages. [#2408])
+ set db [eval {berkdb_open -create -mode 0644} $args $omethod $testfile]
+ error_check_good empty_db [is_valid_db $db] TRUE
+ error_check_good empty_db_close [$db close] 0
+}
diff --git a/storage/bdb/test/test085.tcl b/storage/bdb/test/test085.tcl
new file mode 100644
index 00000000000..b0412d6fe68
--- /dev/null
+++ b/storage/bdb/test/test085.tcl
@@ -0,0 +1,332 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test085.tcl,v 1.13 2002/08/08 17:23:46 sandstro Exp $
+#
+# TEST test085
+# TEST Test of cursor behavior when a cursor is pointing to a deleted
+# TEST btree key which then has duplicates added. [#2473]
+proc test085 { method {pagesize 512} {onp 3} {offp 10} {tnum 85} args } {
+ source ./include.tcl
+ global alphabet
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test085: skipping for specific pagesizes"
+ return
+ }
+ cleanup $testdir $env
+
+ # Keys must sort $prekey < $key < $postkey.
+ set prekey "AA"
+ set key "BBB"
+ set postkey "CCCC"
+
+ # Make these distinguishable from each other and from the
+ # alphabets used for the $key's data.
+ set predatum "1234567890"
+ set datum $alphabet
+ set postdatum "0987654321"
+ set txn ""
+
+ append args " -pagesize $pagesize -dup"
+
+ puts -nonewline "Test0$tnum $omethod ($args): "
+
+ # Skip for all non-btrees. (Rbtrees don't count as btrees, for
+ # now, since they don't support dups.)
+ if { [is_btree $method] != 1 } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Duplicates w/ deleted item cursor."
+ }
+
+ # Repeat the test with both on-page and off-page numbers of dups.
+ foreach ndups "$onp $offp" {
+ # Put operations we want to test on a cursor set to the
+ # deleted item, the key to use with them, and what should
+ # come before and after them given a placement of
+ # the deleted item at the beginning or end of the dupset.
+ set final [expr $ndups - 1]
+ set putops {
+ {{-before} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-before} "" {[test085_ddatum $final]} $postdatum end}
+ {{-current} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-current} "" {[test085_ddatum $final]} $postdatum end}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} beginning}
+ {{-keyfirst} $key $predatum {[test085_ddatum 0]} end}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum beginning}
+ {{-keylast} $key {[test085_ddatum $final]} $postdatum end}
+ {{-after} "" $predatum {[test085_ddatum 0]} beginning}
+ {{-after} "" {[test085_ddatum $final]} $postdatum end}
+ }
+
+ # Get operations we want to test on a cursor set to the
+ # deleted item, any args to get, and the expected key/data pair.
+ set getops {
+ {{-current} "" "" "" beginning}
+ {{-current} "" "" "" end}
+ {{-next} "" $key {[test085_ddatum 0]} beginning}
+ {{-next} "" $postkey $postdatum end}
+ {{-prev} "" $prekey $predatum beginning}
+ {{-prev} "" $key {[test085_ddatum $final]} end}
+ {{-first} "" $prekey $predatum beginning}
+ {{-first} "" $prekey $predatum end}
+ {{-last} "" $postkey $postdatum beginning}
+ {{-last} "" $postkey $postdatum end}
+ {{-nextdup} "" $key {[test085_ddatum 0]} beginning}
+ {{-nextdup} "" EMPTYLIST "" end}
+ {{-nextnodup} "" $postkey $postdatum beginning}
+ {{-nextnodup} "" $postkey $postdatum end}
+ {{-prevnodup} "" $prekey $predatum beginning}
+ {{-prevnodup} "" $prekey $predatum end}
+ }
+
+ set txn ""
+ foreach pair $getops {
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Get ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $encargs $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn \
+ [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ set gargs [lindex $pair 1]
+ set ekey ""
+ set edata ""
+ eval set ekey [lindex $pair 2]
+ eval set edata [lindex $pair 3]
+
+ set dbt [eval $dbc get $op $gargs]
+ if { [string compare $ekey EMPTYLIST] == 0 } {
+ error_check_good dbt($op,$ndups) \
+ [llength $dbt] 0
+ } else {
+ error_check_good dbt($op,$ndups) $dbt \
+ [list [list $ekey $edata]]
+ }
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+
+ }
+
+ foreach pair $putops {
+ # Open and set up database.
+ set op [lindex $pair 0]
+ puts "\tTest0$tnum: Put ($op) with $ndups duplicates,\
+ cursor at the [lindex $pair 4]."
+ set db [eval {berkdb_open -create \
+ -mode 0644} $omethod $args $encargs $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ set beginning [expr [string compare \
+ [lindex $pair 4] "beginning"] == 0]
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [test085_setup $db $txn]
+
+ # Put duplicates.
+ for { set i 0 } { $i < $ndups } { incr i } {
+ if { $beginning } {
+ error_check_good db_put($i) \
+ [eval {$db put} $txn \
+ {$key [test085_ddatum $i]}] 0
+ } else {
+ set c [eval {$db cursor} $txn]
+ set j [expr $ndups - $i - 1]
+ error_check_good db_cursor($j) \
+ [is_valid_cursor $c $db] TRUE
+ set d [test085_ddatum $j]
+ error_check_good dbc_put($j) \
+ [$c put -keyfirst $key $d] 0
+ error_check_good c_close [$c close] 0
+ }
+ }
+
+ # Set up cursors for stability test.
+ set pre_dbc [eval {$db cursor} $txn]
+ error_check_good pre_set [$pre_dbc get -set $prekey] \
+ [list [list $prekey $predatum]]
+ set post_dbc [eval {$db cursor} $txn]
+ error_check_good post_set [$post_dbc get -set $postkey]\
+ [list [list $postkey $postdatum]]
+ set first_dbc [eval {$db cursor} $txn]
+ error_check_good first_set \
+ [$first_dbc get -get_both $key [test085_ddatum 0]] \
+ [list [list $key [test085_ddatum 0]]]
+ set last_dbc [eval {$db cursor} $txn]
+ error_check_good last_set \
+ [$last_dbc get -get_both $key [test085_ddatum \
+ [expr $ndups - 1]]] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ set k [lindex $pair 1]
+ set d_before ""
+ set d_after ""
+ eval set d_before [lindex $pair 2]
+ eval set d_after [lindex $pair 3]
+ set newdatum "NewDatum"
+ error_check_good dbc_put($op,$ndups) \
+ [eval $dbc put $op $k $newdatum] 0
+ error_check_good dbc_prev($op,$ndups) \
+ [lindex [lindex [$dbc get -prev] 0] 1] \
+ $d_before
+ error_check_good dbc_current($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $newdatum
+
+ error_check_good dbc_next($op,$ndups) \
+ [lindex [lindex [$dbc get -next] 0] 1] \
+ $d_after
+
+ # Verify stability of pre- and post- cursors.
+ error_check_good pre_stable [$pre_dbc get -current] \
+ [list [list $prekey $predatum]]
+ error_check_good post_stable [$post_dbc get -current] \
+ [list [list $postkey $postdatum]]
+ error_check_good first_stable \
+ [$first_dbc get -current] \
+ [list [list $key [test085_ddatum 0]]]
+ error_check_good last_stable \
+ [$last_dbc get -current] \
+ [list [list $key [test085_ddatum [expr $ndups -1]]]]
+
+ foreach c "$pre_dbc $post_dbc $first_dbc $last_dbc" {
+ error_check_good ${c}_close [$c close] 0
+ }
+
+ error_check_good "dbc close" [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good "db close" [$db close] 0
+ verify_dir $testdir "\t\t"
+
+ # Remove testfile so we can do without truncate flag.
+ # This is okay because we've already done verify and
+ # dump/load.
+ if { $env == "NULL" } {
+ set ret [eval {berkdb dbremove} \
+ $encargs $testfile]
+ } elseif { $txnenv == 1 } {
+ set ret [eval "$env dbremove" \
+ -auto_commit $encargs $testfile]
+ } else {
+ set ret [eval {berkdb dbremove} \
+ -env $env $encargs $testfile]
+ }
+ error_check_good dbremove $ret 0
+ }
+ }
+}
+
+# Set up the test database; put $prekey, $key, and $postkey with their
+# respective data, and then delete $key with a new cursor. Return that
+# cursor, still pointing to the deleted item.
+proc test085_setup { db txn } {
+ upvar key key
+ upvar prekey prekey
+ upvar postkey postkey
+ upvar predatum predatum
+ upvar postdatum postdatum
+
+ # no one else should ever see this one!
+ set datum "bbbbbbbb"
+
+ error_check_good pre_put [eval {$db put} $txn {$prekey $predatum}] 0
+ error_check_good main_put [eval {$db put} $txn {$key $datum}] 0
+ error_check_good post_put [eval {$db put} $txn {$postkey $postdatum}] 0
+
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $dbc $db] TRUE
+
+ error_check_good dbc_getset [$dbc get -get_both $key $datum] \
+ [list [list $key $datum]]
+
+ error_check_good dbc_del [$dbc del] 0
+
+ return $dbc
+}
+
+proc test085_ddatum { a } {
+ global alphabet
+ return $a$alphabet
+}
diff --git a/storage/bdb/test/test086.tcl b/storage/bdb/test/test086.tcl
new file mode 100644
index 00000000000..e15aa1d8bb9
--- /dev/null
+++ b/storage/bdb/test/test086.tcl
@@ -0,0 +1,166 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test086.tcl,v 11.9 2002/08/06 17:58:00 sandstro Exp $
+#
+# TEST test086
+# TEST Test of cursor stability across btree splits/rsplits with
+# TEST subtransaction aborts (a variant of test048). [#2373]
+proc test086 { method args } {
+ global errorCode
+ source ./include.tcl
+
+ set tstn 086
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across aborted\
+ btree splits."
+
+ set key "key"
+ set data "data"
+ set txn ""
+ set flags ""
+
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then this test won't work.
+ if { $eindex == -1 } {
+ # But we will be using our own env...
+ set testfile test0$tstn.db
+ } else {
+ puts "\tTest$tstn: Environment provided; skipping test."
+ return
+ }
+ set t1 $testdir/t1
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good berkdb_env [is_valid_env $env] TRUE
+
+ puts "\tTest$tstn.a: Create $method database."
+ set oflags "-auto_commit -create -env $env -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ small key/data pairs, keep at leaf
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ set ret [$db put -txn $txn key000$i $data$i]
+ error_check_good dbput $ret 0
+ }
+ error_check_good commit [$txn commit] 0
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for {set i 0; set ret [$db get -txn $txn key000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get -txn $txn key000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [$db cursor -txn $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ # Create child txn.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 1000
+ puts "\tTest$tstn.d: Add $mkeys pairs to force split."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $ctxn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $ctxn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $ctxn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tstn.f: Check and see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ # Put (and this time keep) the keys that caused the split.
+ # We'll delete them to test reverse splits.
+ puts "\tTest$tstn.g: Put back added keys."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ set ret [$db put -txn $txn key0$i $data$i]
+ } elseif { $i >= 10 } {
+ set ret [$db put -txn $txn key00$i $data$i]
+ } else {
+ set ret [$db put -txn $txn key000$i $data$i]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Delete added keys to force reverse split."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn [is_valid_txn $txn $env] TRUE
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 100 } {
+ error_check_good db_del:$i [$db del -txn $ctxn key0$i] 0
+ } elseif { $i >= 10 } {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key00$i] 0
+ } else {
+ error_check_good db_del:$i \
+ [$db del -txn $ctxn key000$i] 0
+ }
+ }
+
+ puts "\tTest$tstn.i: Abort."
+ error_check_good ctxn_abort [$ctxn abort] 0
+
+ puts "\tTest$tstn.j: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.j: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+
+ error_check_good commit [$txn commit] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good envclose [$env close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test087.tcl b/storage/bdb/test/test087.tcl
new file mode 100644
index 00000000000..089664a0002
--- /dev/null
+++ b/storage/bdb/test/test087.tcl
@@ -0,0 +1,290 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test087.tcl,v 11.14 2002/07/08 20:16:31 sue Exp $
+#
+# TEST test087
+# TEST Test of cursor stability when converting to and modifying
+# TEST off-page duplicate pages with subtransaction aborts. [#2373]
+# TEST
+# TEST Does the following:
+# TEST a. Initialize things by DB->putting ndups dups and
+# TEST setting a reference cursor to point to each. Do each put twice,
+# TEST first aborting, then committing, so we're sure to abort the move
+# TEST to off-page dups at some point.
+# TEST b. c_put ndups dups (and correspondingly expanding
+# TEST the set of reference cursors) after the last one, making sure
+# TEST after each step that all the reference cursors still point to
+# TEST the right item.
+# TEST c. Ditto, but before the first one.
+# TEST d. Ditto, but after each one in sequence first to last.
+# TEST e. Ditto, but after each one in sequence from last to first.
+# TEST occur relative to the new datum)
+# TEST f. Ditto for the two sequence tests, only doing a
+# TEST DBC->c_put(DB_CURRENT) of a larger datum instead of adding a
+# TEST new one.
+proc test087 { method {pagesize 512} {ndups 50} {tnum 87} args } {
+ source ./include.tcl
+ global alphabet
+
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test0$tnum $omethod ($args): "
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then return
+ if { $eindex != -1 } {
+ puts "Environment specified; skipping."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test087: skipping for specific pagesizes"
+ return
+ }
+ env_cleanup $testdir
+ set testfile test0$tnum.db
+ set key "the key"
+ append args " -pagesize $pagesize -dup"
+
+ if { [is_record_based $method] || [is_rbtree $method] } {
+ puts "Skipping for method $method."
+ return
+ } else {
+ puts "Cursor stability on dup. pages w/ aborts."
+ }
+
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -auto_commit \
+ -create -env $env -mode 0644} $omethod $args $testfile]
+ error_check_good "db open" [is_valid_db $db] TRUE
+
+ # Number of outstanding keys.
+ set keys $ndups
+
+ puts "\tTest0$tnum.a: put/abort/put/commit loop;\
+ $ndups dups, short data."
+ set txn [$env txn]
+ error_check_good txn [is_valid_txn $txn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ set datum [makedatum_t73 $i 0]
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(abort,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/abort ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_abort($i) [$ctxn abort] 0
+
+ verify_t73 is_long dbc [expr $i - 1] $key
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn(commit,$i) [is_valid_txn $ctxn $env] TRUE
+ error_check_good "db put/commit ($i)" \
+ [$db put -txn $ctxn $key $datum] 0
+ error_check_good ctxn_commit($i) [$ctxn commit] 0
+
+ set is_long($i) 0
+
+ set dbc($i) [$db cursor -txn $txn]
+ error_check_good "db cursor ($i)"\
+ [is_valid_cursor $dbc($i) $db] TRUE
+ error_check_good "dbc get -get_both ($i)"\
+ [$dbc($i) get -get_both $key $datum]\
+ [list [list $key $datum]]
+
+ verify_t73 is_long dbc $i $key
+ }
+
+ puts "\tTest0$tnum.b: Cursor put (DB_KEYLAST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYLAST, $keys)"\
+ [$curs put -keylast $key $datum] 0
+
+ # We can't do a verification while a child txn is active,
+ # or we'll run into trouble when DEBUG_ROP is enabled.
+ # If this test has trouble, though, uncommenting this
+ # might be illuminating--it makes things a bit more rigorous
+ # and works fine when DEBUG_ROP is not enabled.
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.c: Cursor put (DB_KEYFIRST); $ndups new dups,\
+ short data."
+
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $ndups } { incr i } {
+ # !!! keys contains the number of the next dup
+ # to be added (since they start from zero)
+
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+ error_check_good "c_put(DB_KEYFIRST, $keys)"\
+ [$curs put -keyfirst $key $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ # verify_t73 is_long dbc $keys $key
+ # verify_t73 is_long dbc $keys $key
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.d: Cursor put (DB_AFTER) first to last;\
+ $keys new dups, short data"
+ # We want to add a datum after each key from 0 to the current
+ # value of $keys, which we thus need to save.
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set keysnow $keys
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy after.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_AFTER, $i)"\
+ [$curs put -after $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.e: Cursor put (DB_BEFORE) last to first;\
+ $keys new dups, short data"
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i [expr $keys - 1] } { $i >= 0 } { incr i -1 } {
+ set datum [makedatum_t73 $keys 0]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ # Which datum to insert this guy before.
+ set curdatum [makedatum_t73 $i 0]
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $curdatum]\
+ [list [list $key $curdatum]]
+ error_check_good "c_put(DB_BEFORE, $i)"\
+ [$curs put -before $datum] 0
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ puts "\tTest0$tnum.f: Cursor put (DB_CURRENT), first to last,\
+ growing $keys data."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set olddatum [makedatum_t73 $i 0]
+ set newdatum [makedatum_t73 $i 1]
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db]\
+ TRUE
+
+ error_check_good "c_get(DB_GET_BOTH, $i)"\
+ [$curs get -get_both $key $olddatum]\
+ [list [list $key $olddatum]]
+ error_check_good "c_put(DB_CURRENT, $i)"\
+ [$curs put -current $newdatum] 0
+
+ set is_long($i) 1
+
+ # verify_t73 is_long dbc $keys $key
+ error_check_good curs_close [$curs close] 0
+ }
+ error_check_good ctxn_abort [$ctxn abort] 0
+ for { set i 0 } { $i < $keysnow } { incr i } {
+ set is_long($i) 0
+ }
+ verify_t73 is_long dbc $keys $key
+
+ # Now delete the first item, abort the deletion, and make sure
+ # we're still sane.
+ puts "\tTest0$tnum.g: Cursor delete first item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for the last item.
+ puts "\tTest0$tnum.h: Cursor delete last item, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 [expr $keys - 1] 0]
+ error_check_good "c_get(DB_GET_BOTH, [expr $keys - 1])"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Ditto, for all the items.
+ puts "\tTest0$tnum.i: Cursor delete all items, then abort delete."
+ set ctxn [$env txn -parent $txn]
+ error_check_good ctxn($i) [is_valid_txn $ctxn $env] TRUE
+ set curs [$db cursor -txn $ctxn]
+ error_check_good "db cursor create" [is_valid_cursor $curs $db] TRUE
+ set datum [makedatum_t73 0 0]
+ error_check_good "c_get(DB_GET_BOTH, 0)"\
+ [$curs get -get_both $key $datum] [list [list $key $datum]]
+ error_check_good "c_del(0)" [$curs del] 0
+ for { set i 1 } { $i < $keys } { incr i } {
+ error_check_good "c_get(DB_NEXT, $i)"\
+ [$curs get -next] [list [list $key [makedatum_t73 $i 0]]]
+ error_check_good "c_del($i)" [$curs del] 0
+ }
+ error_check_good curs_close [$curs close] 0
+ error_check_good ctxn_abort [$ctxn abort] 0
+ verify_t73 is_long dbc $keys $key
+
+ # Close cursors.
+ puts "\tTest0$tnum.j: Closing cursors."
+ for { set i 0 } { $i < $keys } { incr i } {
+ error_check_good "dbc close ($i)" [$dbc($i) close] 0
+ }
+ error_check_good "db close" [$db close] 0
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good "env close" [$env close] 0
+}
diff --git a/storage/bdb/test/test088.tcl b/storage/bdb/test/test088.tcl
new file mode 100644
index 00000000000..7065b4cd642
--- /dev/null
+++ b/storage/bdb/test/test088.tcl
@@ -0,0 +1,172 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test088.tcl,v 11.12 2002/08/05 19:23:51 sandstro Exp $
+#
+# TEST test088
+# TEST Test of cursor stability across btree splits with very
+# TEST deep trees (a variant of test048). [#2514]
+proc test088 { method args } {
+ global errorCode alphabet
+ source ./include.tcl
+
+ set tstn 088
+ set args [convert_args $method $args]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test$tstn skipping for method $method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test088: skipping for specific pagesizes"
+ return
+ }
+
+ set method "-btree"
+
+ puts "\tTest$tstn: Test of cursor stability across btree splits."
+
+ set key "key$alphabet$alphabet$alphabet"
+ set data "data$alphabet$alphabet$alphabet"
+ set txn ""
+ set flags ""
+
+ puts "\tTest$tstn.a: Create $method database."
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test$tstn.db
+ set env NULL
+ } else {
+ set testfile test$tstn.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ set ps 512
+ set txn ""
+ set oflags "-create -pagesize $ps -mode 0644 $args $method"
+ set db [eval {berkdb_open} $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set nkeys 5
+ # Fill page w/ key/data pairs.
+ #
+ puts "\tTest$tstn.b: Fill page with $nkeys small key/data pairs."
+ for { set i 0 } { $i < $nkeys } { incr i } {
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ error_check_good dbput $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ # get db ordering, set cursors
+ puts "\tTest$tstn.c: Set cursors on each of $nkeys pairs."
+ # if mkeys is above 1000, need to adjust below for lexical order
+ set mkeys 30000
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set mkeys 300
+ }
+ for {set i 0; set ret [$db get ${key}00000$i]} {\
+ $i < $nkeys && [llength $ret] != 0} {\
+ incr i; set ret [$db get ${key}00000$i]} {
+ set key_set($i) [lindex [lindex $ret 0] 0]
+ set data_set($i) [lindex [lindex $ret 0] 1]
+ set dbc [eval {$db cursor} $txn]
+ set dbc_set($i) $dbc
+ error_check_good db_cursor:$i [is_substr $dbc_set($i) $db] 1
+ set ret [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc_set($i)_get:set [llength $ret] 0
+ }
+
+ puts "\tTest$tstn.d: Add $mkeys pairs to force splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db put} $txn {${key}0$i $data$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db put} $txn {${key}00$i $data$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db put} $txn {${key}000$i $data$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db put} $txn {${key}0000$i $data$i}]
+ } else {
+ set ret [eval {$db put} $txn {${key}00000$i $data$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.e: Make sure splits happened."
+ # XXX cannot execute stat in presence of txns and cursors.
+ if { $txnenv == 0 } {
+ error_check_bad stat:check-split [is_substr [$db stat] \
+ "{{Internal pages} 0}"] 1
+ }
+
+ puts "\tTest$tstn.f: Check to see that cursors maintained reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.g: Delete added keys to force reverse splits."
+ for {set i $nkeys} { $i < $mkeys } { incr i } {
+ if { $i >= 10000 } {
+ set ret [eval {$db del} $txn {${key}0$i}]
+ } elseif { $i >= 1000 } {
+ set ret [eval {$db del} $txn {${key}00$i}]
+ } elseif { $i >= 100 } {
+ set ret [eval {$db del} $txn {${key}000$i}]
+ } elseif { $i >= 10 } {
+ set ret [eval {$db del} $txn {${key}0000$i}]
+ } else {
+ set ret [eval {$db del} $txn {${key}00000$i}]
+ }
+ error_check_good dbput:more $ret 0
+ }
+
+ puts "\tTest$tstn.h: Verify cursor reference."
+ for {set i 0} { $i < $nkeys } {incr i} {
+ set ret [$dbc_set($i) get -current]
+ error_check_bad dbc$i:get:current [llength $ret] 0
+ set ret2 [$dbc_set($i) get -set $key_set($i)]
+ error_check_bad dbc$i:get:set [llength $ret2] 0
+ error_check_good dbc$i:get(match) $ret $ret2
+ }
+
+ puts "\tTest$tstn.i: Cleanup."
+ # close cursors
+ for {set i 0} { $i < $nkeys } {incr i} {
+ error_check_good dbc_close:$i [$dbc_set($i) close] 0
+ }
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest$tstn complete."
+}
diff --git a/storage/bdb/test/test089.tcl b/storage/bdb/test/test089.tcl
new file mode 100644
index 00000000000..d378152f203
--- /dev/null
+++ b/storage/bdb/test/test089.tcl
@@ -0,0 +1,180 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test089.tcl,v 11.2 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test089
+# TEST Concurrent Data Store test (CDB)
+# TEST
+# TEST Enhanced CDB testing to test off-page dups, cursor dups and
+# TEST cursor operations like c_del then c_get.
+proc test089 { method {nentries 1000} args } {
+ global datastr
+ global encrypt
+ source ./include.tcl
+
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test089 skipping for env $env"
+ return
+ }
+ set encargs ""
+ set args [convert_args $method $args]
+ set oargs [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test089: ($oargs) $method CDB Test cursor/dup operations"
+
+ # Process arguments
+ # Create the database and open the dictionary
+ set testfile test089.db
+ set testfile1 test089a.db
+
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set db1 [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put each key/data pair
+ puts "\tTest089.a: put loop"
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db $ret 0
+ set ret [eval {$db1 put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put:$db1 $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+ error_check_good close:$db1 [$db1 close] 0
+
+ # Database is created, now set up environment
+
+ # Remove old mpools and Open/create the lock and mpool regions
+ error_check_good env:close:$env [$env close] 0
+ set ret [eval {berkdb envremove} $encargs -home $testdir]
+ error_check_good env_remove $ret 0
+
+ set env [eval {berkdb_env_noerr -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ # This tests the failure found in #1923
+ puts "\tTest089.b: test delete then get"
+
+ set db1 [eval {berkdb_open_noerr -env $env -create \
+ -mode 0644 $omethod} $oargs {$testfile1}]
+ error_check_good dbopen [is_valid_db $db1] TRUE
+
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+
+ puts "\tTest089.c: CDB cursor dups"
+ set dbc [$db1 cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db1] TRUE
+ set stat [catch {$dbc dup} ret]
+ error_check_bad wr_cdup_stat $stat 0
+ error_check_good wr_cdup [is_substr $ret \
+ "Cannot duplicate writeable cursor"] 1
+
+ set dbc_ro [$db1 cursor]
+ error_check_good dbcursor [is_valid_cursor $dbc_ro $db1] TRUE
+ set dup_dbc [$dbc_ro dup]
+ error_check_good rd_cdup [is_valid_cursor $dup_dbc $db1] TRUE
+
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good dbc_close [$dbc_ro close] 0
+ error_check_good dbc_close [$dup_dbc close] 0
+ error_check_good db_close [$db1 close] 0
+ error_check_good env_close [$env close] 0
+
+ if { [is_btree $method] != 1 } {
+ puts "Skipping rest of test089 for $method method."
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Skipping rest of test089 for specific pagesizes"
+ return
+ }
+ append oargs " -dup "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+ append oargs " -dupsort "
+ test089_dup $testdir $encargs $oargs $omethod $nentries
+}
+
+proc test089_dup { testdir encargs oargs method nentries } {
+
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create -cdb} $encargs -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ #
+ # Set pagesize small to generate lots of off-page dups
+ #
+ set page 512
+ set nkeys 5
+ set data "data"
+ set key "test089_key"
+ set testfile test089.db
+ puts "\tTest089.d: CDB ($oargs) off-page dups"
+ set oflags "-env $env -create -mode 0644 $oargs $method"
+ set db [eval {berkdb_open} -pagesize $page $oflags $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ puts "\tTest089.e: Fill page with $nkeys keys, with $nentries dups"
+ for { set k 0 } { $k < $nkeys } { incr k } {
+ for { set i 0 } { $i < $nentries } { incr i } {
+ set ret [$db put $key $i$data$k]
+ error_check_good dbput $ret 0
+ }
+ }
+
+ # Verify we have off-page duplicates
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat "{{Internal pages} 0}"] 1
+
+ set dbc [$db cursor -update]
+ error_check_good dbcursor [is_valid_cursor $dbc $db] TRUE
+
+ puts "\tTest089.f: test delete then get of off-page dups"
+ for {set kd [$dbc get -first] } { [llength $kd] != 0 } \
+ {set kd [$dbc get -next] } {
+ error_check_good dbcdel [$dbc del] 0
+ }
+ error_check_good dbc_close [$dbc close] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$env close] 0
+}
diff --git a/storage/bdb/test/test090.tcl b/storage/bdb/test/test090.tcl
new file mode 100644
index 00000000000..da90688ffc5
--- /dev/null
+++ b/storage/bdb/test/test090.tcl
@@ -0,0 +1,16 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test090.tcl,v 11.10 2002/08/15 20:55:21 sandstro Exp $
+#
+# TEST test090
+# TEST Test for functionality near the end of the queue using test001.
+proc test090 { method {nentries 10000} {txn -txn} {tnum "90"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test001 $method $nentries 4294967000 $tnum 0} $args
+}
diff --git a/storage/bdb/test/test091.tcl b/storage/bdb/test/test091.tcl
new file mode 100644
index 00000000000..cfd2a60ebb5
--- /dev/null
+++ b/storage/bdb/test/test091.tcl
@@ -0,0 +1,20 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test091.tcl,v 11.7 2002/01/11 15:53:56 bostic Exp $
+#
+# TEST test091
+# TEST Test of DB_CONSUME_WAIT.
+proc test091 { method {nconsumers 4} \
+ {nproducers 2} {nitems 1000} {start 0 } {tnum "91"} args} {
+ if { [is_queue $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -txn $tnum } $args
+ eval {test070 $method \
+ $nconsumers $nproducers $nitems WAIT $start -cdb $tnum } $args
+}
diff --git a/storage/bdb/test/test092.tcl b/storage/bdb/test/test092.tcl
new file mode 100644
index 00000000000..29c1c55a9a9
--- /dev/null
+++ b/storage/bdb/test/test092.tcl
@@ -0,0 +1,241 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test092.tcl,v 11.13 2002/02/22 15:26:28 sandstro Exp $
+#
+# TEST test092
+# TEST Test of DB_DIRTY_READ [#3395]
+# TEST
+# TEST We set up a database with nentries in it. We then open the
+# TEST database read-only twice. One with dirty read and one without.
+# TEST We open the database for writing and update some entries in it.
+# TEST Then read those new entries via db->get (clean and dirty), and
+# TEST via cursors (clean and dirty).
+proc test092 { method {nentries 1000} args } {
+ source ./include.tcl
+ #
+ # If we are using an env, then skip this test. It needs its own.
+ set eindex [lsearch -exact $args "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ puts "Test092 skipping for env $env"
+ return
+ }
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test092: Dirty Read Test $method $nentries"
+
+ # Create the database and open the dictionary
+ set testfile test092.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+
+ env_cleanup $testdir
+
+ set lmax [expr $nentries * 2]
+ set lomax [expr $nentries * 2]
+ set env [eval {berkdb_env -create -txn} $encargs -home $testdir \
+ -lock_max_locks $lmax -lock_max_objects $lomax]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ set db [eval {berkdb_open -env $env -create \
+ -mode 0644 $omethod} $args {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put each key/data pair.
+ # Key is entry, data is entry also.
+ puts "\tTest092.a: put loop"
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ global kvals
+
+ set key [expr $count + 1]
+ set kvals($key) [pad_data $method $str]
+ } else {
+ set key $str
+ }
+ set ret [eval {$db put} {$key [chop_data $method $str]}]
+ error_check_good put:$db $ret 0
+ incr count
+ }
+ close $did
+ error_check_good close:$db [$db close] 0
+
+ puts "\tTest092.b: Opening all the handles"
+ #
+ # Open all of our handles.
+ # We need:
+ # 1. Our main txn (t).
+ # 2. A txn that can read dirty data (tdr).
+ # 3. A db handle for writing via txn (dbtxn).
+ # 4. A db handle for clean data (dbcl).
+ # 5. A db handle for dirty data (dbdr).
+ # 6. A cursor handle for dirty txn data (clean db handle using
+ # the dirty txn handle on the cursor call) (dbccl1).
+ # 7. A cursor handle for dirty data (dirty on get call) (dbcdr0).
+ # 8. A cursor handle for dirty data (dirty on cursor call) (dbcdr1).
+ set t [$env txn]
+ error_check_good txnbegin [is_valid_txn $t $env] TRUE
+
+ set tdr [$env txn -dirty]
+ error_check_good txnbegin:dr [is_valid_txn $tdr $env] TRUE
+ set dbtxn [eval {berkdb_open -auto_commit -env $env -dirty \
+ -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbtxn [is_valid_db $dbtxn] TRUE
+
+ set dbcl [eval {berkdb_open -auto_commit -env $env \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbcl [is_valid_db $dbcl] TRUE
+
+ set dbdr [eval {berkdb_open -auto_commit -env $env -dirty \
+ -rdonly -mode 0644 $omethod} {$testfile}]
+ error_check_good dbopen:dbdr [is_valid_db $dbdr] TRUE
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ #
+ # Now that we have all of our handles, change all the data in there
+ # to be the key and data the same, but data is capitalized.
+ puts "\tTest092.c: put/get data within a txn"
+ set gflags ""
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092dr_recno.check
+ append gflags " -recno"
+ } else {
+ set checkfunc test092dr.check
+ }
+ set count 0
+ set did [open $dict]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ set ustr [string toupper $str]
+ set clret [list [list $key [pad_data $method $str]]]
+ set drret [list [list $key [pad_data $method $ustr]]]
+ #
+ # Put the data in the txn.
+ #
+ set ret [eval {$dbtxn put} -txn $t \
+ {$key [chop_data $method $ustr]}]
+ error_check_good put:$dbtxn $ret 0
+
+ #
+ # Now get the data using the different db handles and
+ # make sure it is dirty or clean data.
+ #
+ # Using the dirty txn should show us dirty data
+ set ret [eval {$dbcl get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ set ret [eval {$dbdr get -dirty} $gflags {$key}]
+ error_check_good dbdr1:get $ret $drret
+
+ set ret [eval {$dbdr get -txn $tdr} $gflags {$key}]
+ error_check_good dbdr2:get $ret $drret
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest092.d: Check dirty data using dirty txn and clean db/cursor"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.e: Check dirty data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.f: Check dirty data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ #
+ # We must close these before aborting the real txn
+ # because they all hold read locks on the pages.
+ #
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+
+ #
+ # Now abort the modifying transaction and rerun the data checks.
+ #
+ puts "\tTest092.g: Aborting the write-txn"
+ error_check_good txnabort [$t abort] 0
+
+ set dbccl [$dbcl cursor -txn $tdr]
+ error_check_good dbcurs:dbcl [is_valid_cursor $dbccl $dbcl] TRUE
+
+ set dbcdr0 [$dbdr cursor]
+ error_check_good dbcurs:dbdr0 [is_valid_cursor $dbcdr0 $dbdr] TRUE
+
+ set dbcdr1 [$dbdr cursor -dirty]
+ error_check_good dbcurs:dbdr1 [is_valid_cursor $dbcdr1 $dbdr] TRUE
+
+ if { [is_record_based $method] == 1 } {
+ set checkfunc test092cl_recno.check
+ } else {
+ set checkfunc test092cl.check
+ }
+ puts "\tTest092.h: Check clean data using -dirty cget flag"
+ dump_file_walk $dbccl $t1 $checkfunc "-first" "-next"
+
+ puts "\tTest092.i: Check clean data using -dirty cget flag"
+ dump_file_walk $dbcdr0 $t2 $checkfunc "-first" "-next" "-dirty"
+
+ puts "\tTest092.j: Check clean data using -dirty cursor"
+ dump_file_walk $dbcdr1 $t3 $checkfunc "-first" "-next"
+
+ # Clean up our handles
+ error_check_good dbccl:close [$dbccl close] 0
+ error_check_good tdrcommit [$tdr commit] 0
+ error_check_good dbcdr0:close [$dbcdr0 close] 0
+ error_check_good dbcdr1:close [$dbcdr1 close] 0
+ error_check_good dbclose [$dbcl close] 0
+ error_check_good dbclose [$dbdr close] 0
+ error_check_good dbclose [$dbtxn close] 0
+ error_check_good envclose [$env close] 0
+}
+
+# Check functions for test092; keys and data are identical
+# Clean checks mean keys and data are identical.
+# Dirty checks mean data are uppercase versions of keys.
+proc test092cl.check { key data } {
+ error_check_good "key/data mismatch" $key $data
+}
+
+proc test092cl_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data $kvals($key)
+}
+
+proc test092dr.check { key data } {
+ error_check_good "key/data mismatch" $key [string tolower $data]
+}
+
+proc test092dr_recno.check { key data } {
+ global kvals
+
+ error_check_good key"$key"_exists [info exists kvals($key)] 1
+ error_check_good "key/data mismatch, key $key" $data \
+ [string toupper $kvals($key)]
+}
+
diff --git a/storage/bdb/test/test093.tcl b/storage/bdb/test/test093.tcl
new file mode 100644
index 00000000000..e3f8f0103c6
--- /dev/null
+++ b/storage/bdb/test/test093.tcl
@@ -0,0 +1,393 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test093.tcl,v 11.20 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test093
+# TEST Test using set_bt_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test093 { method {nentries 10000} {tnum "93"} args} {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex != -1 } {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ cleanup $testdir $env
+ }
+ puts "Test0$tnum: $method ($args) $nentries using btcompare"
+
+
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp1 test093_sort1
+ test093_runbig $omethod $dbargs $nentries $tnum \
+ test093_cmp1 test093_sort1
+ test093_run $omethod $dbargs $nentries $tnum test093_cmp2 test093_sort2
+ #
+ # Don't bother running the second, really slow, comparison
+ # function on test093_runbig (file contents).
+
+ # Clean up so verification doesn't fail. (There's currently
+ # no way to specify a comparison function to berkdb dbverify.)
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set eindex [lsearch -exact $dbargs "-env"]
+ if { $eindex == -1 } {
+ set env NULL
+ } else {
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+}
+
+proc test093_run { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set did [open $dict]
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_check
+ puts "\tTest0$tnum.a: put/get loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set str [reverse $str]
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval \
+ {$db put} $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good \
+ get $ret [list [list $key [pad_data $method $str]]]
+
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Now compare the keys to see if they match the dictionary (or ints)
+ set q q
+ filehead $nentries $dict $t2
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ filesort $t1 $t3
+
+ error_check_good Test0$tnum:diff($t3,$t2) \
+ [filecmp $t3 $t2] 0
+
+ puts "\tTest0$tnum.c: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.d: check file order"
+
+ $sortfunc
+
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+ for {set i 0} {$i < $nentries} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+proc test093_runbig { method dbargs nentries tnum cmpfunc sortfunc } {
+ source ./include.tcl
+ global btvals
+ global btvalsck
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $dbargs "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ set txnenv 0
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum.db
+ set env NULL
+ } else {
+ set testfile test0$tnum.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set txnenv [is_txnenv $env]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open -btcompare $cmpfunc \
+ -create -mode 0644} $method $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ set t4 $testdir/t4
+ set t5 $testdir/t5
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set btvals {}
+ set btvalsck {}
+ set checkfunc test093_checkbig
+ puts "\tTest0$tnum.e:\
+ big key put/get loop key=filecontents data=filename"
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set key [read $fid]
+ close $fid
+
+ set key $f$key
+
+ set fcopy [open $t5 w]
+ fconfigure $fcopy -translation binary
+ puts -nonewline $fcopy $key
+ close $fcopy
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} $txn $pflags {$key \
+ [chop_data $method $f]}]
+ error_check_good put_file $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ lappend btvals $key
+
+ # Should really catch errors
+ set fid [open $t4 w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $gflags {$key}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set key [lindex [lindex $data 0] 0]
+ puts -nonewline $fid $key
+ }
+ close $fid
+ error_check_good \
+ Test093:diff($t5,$t4) [filecmp $t5 $t4] 0
+
+ incr count
+ }
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.f: big dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 $checkfunc
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest0$tnum.g: dump file in order"
+ # Now, reopen the file and run the last test again.
+ # We open it here, ourselves, because all uses of the db
+ # need to have the correct comparison func set. Then
+ # call dump_file_direction directly.
+
+ set btvalsck {}
+ set db [eval {berkdb_open -btcompare $cmpfunc -rdonly} \
+ $dbargs $method $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file_direction $db $txn $t1 $checkfunc "-first" "-next"
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ #
+ # We need to sort btvals according to the comparison function.
+ # Once that is done, btvalsck and btvals should be the same.
+ puts "\tTest0$tnum.h: check file order"
+
+ $sortfunc
+ error_check_good btvals:len [llength $btvals] [llength $btvalsck]
+
+ set end [llength $btvals]
+ for {set i 0} {$i < $end} {incr i} {
+ error_check_good vals:$i [lindex $btvals $i] \
+ [lindex $btvalsck $i]
+ }
+}
+
+# Simple bt comparison.
+proc test093_cmp1 { a b } {
+ return [string compare $b $a]
+}
+
+# Simple bt sorting.
+proc test093_sort1 {} {
+ global btvals
+ #
+ # This one is easy, just sort in reverse.
+ #
+ set btvals [lsort -decreasing $btvals]
+}
+
+proc test093_cmp2 { a b } {
+ set arev [reverse $a]
+ set brev [reverse $b]
+ return [string compare $arev $brev]
+}
+
+proc test093_sort2 {} {
+ global btvals
+
+ # We have to reverse them, then sorts them.
+ # Then reverse them back to real words.
+ set rbtvals {}
+ foreach i $btvals {
+ lappend rbtvals [reverse $i]
+ }
+ set rbtvals [lsort -increasing $rbtvals]
+ set newbtvals {}
+ foreach i $rbtvals {
+ lappend newbtvals [reverse $i]
+ }
+ set btvals $newbtvals
+}
+
+# Check function for test093; keys and data are identical
+proc test093_check { key data } {
+ global btvalsck
+
+ error_check_good "key/data mismatch" $data [reverse $key]
+ lappend btvalsck $key
+}
+
+# Check function for test093 big keys;
+proc test093_checkbig { key data } {
+ source ./include.tcl
+ global btvalsck
+
+ set fid [open $data r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+ error_check_good "key/data mismatch" $key $data$cont
+ lappend btvalsck $key
+}
+
diff --git a/storage/bdb/test/test094.tcl b/storage/bdb/test/test094.tcl
new file mode 100644
index 00000000000..781052913f4
--- /dev/null
+++ b/storage/bdb/test/test094.tcl
@@ -0,0 +1,251 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test094.tcl,v 11.16 2002/06/20 19:01:02 sue Exp $
+#
+# TEST test094
+# TEST Test using set_dup_compare.
+# TEST
+# TEST Use the first 10,000 entries from the dictionary.
+# TEST Insert each with self as key and data; retrieve each.
+# TEST After all are entered, retrieve all; compare output to original.
+# TEST Close file, reopen, do retrieve and re-verify.
+proc test094 { method {nentries 10000} {ndups 10} {tnum "94"} args} {
+ source ./include.tcl
+ global errorInfo
+
+ set dbargs [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ if { [is_btree $method] != 1 && [is_hash $method] != 1 } {
+ puts "Test0$tnum: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $dbargs "-env"]
+ # Create the database and open the dictionary
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-a.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-a.db
+ incr eindex
+ set env [lindex $dbargs $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test0$tnum: skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append dbargs " -auto_commit "
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ }
+ set testdir [get_home $env]
+ }
+ puts "Test0$tnum: $method ($args) $nentries \
+ with $ndups dups using dupcompare"
+
+ cleanup $testdir $env
+
+ set db [eval {berkdb_open_noerr -dupcompare test094_cmp \
+ -dup -dupsort -create -mode 0644} $omethod $dbargs {$testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set t1 $testdir/t1
+ set pflags ""
+ set gflags ""
+ set txn ""
+ puts "\tTest0$tnum.a: $nentries put/get duplicates loop"
+ # Here is the loop where we put and get each key/data pair
+ set count 0
+ set dlist {}
+ for {set i 0} {$i < $ndups} {incr i} {
+ set dlist [linsert $dlist 0 $i]
+ }
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+ close $did
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest0$tnum.b: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ # Set up second testfile so truncate flag is not needed.
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test0$tnum-b.db
+ set env NULL
+ } else {
+ set testfile test0$tnum-b.db
+ set env [lindex $dbargs $eindex]
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ #
+ # Test dupcompare with data items big enough to force offpage dups.
+ #
+ puts "\tTest0$tnum.c: big key put/get dup loop key=filename data=filecontents"
+ set db [eval {berkdb_open -dupcompare test094_cmp -dup -dupsort \
+ -create -mode 0644} $omethod $dbargs $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Here is the loop where we put and get each key/data pair
+ set file_list [get_file_list 1]
+ if { [llength $file_list] > $nentries } {
+ set file_list [lrange $file_list 1 $nentries]
+ }
+
+ set count 0
+ foreach f $file_list {
+ set fid [open $f r]
+ fconfigure $fid -translation binary
+ set cont [read $fid]
+ close $fid
+
+ set key $f
+ for {set i 0} {$i < $ndups} {incr i} {
+ set data $i:$cont
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $omethod $data]}]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good get [llength $ret] $ndups
+ incr count
+ }
+
+ puts "\tTest0$tnum.d: traverse checking duplicates before close"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dup_file_check $db $txn $t1 $dlist
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ set testdir [get_home $env]
+ }
+ error_check_good db_close [$db close] 0
+
+ # Clean up the test directory, since there's currently
+ # no way to specify a dup_compare function to berkdb dbverify
+ # and without one it will fail.
+ cleanup $testdir $env
+}
+
+# Simple dup comparison.
+proc test094_cmp { a b } {
+ return [string compare $b $a]
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc test094_dup_big { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
diff --git a/storage/bdb/test/test095.tcl b/storage/bdb/test/test095.tcl
new file mode 100644
index 00000000000..5543f346b7e
--- /dev/null
+++ b/storage/bdb/test/test095.tcl
@@ -0,0 +1,296 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test095.tcl,v 11.16 2002/08/08 15:38:12 bostic Exp $
+#
+# TEST test095
+# TEST Bulk get test. [#2934]
+proc test095 { method {nsets 1000} {noverflows 25} {tnum 95} args } {
+ source ./include.tcl
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set basename $testdir/test0$tnum
+ set env NULL
+ # If we've our own env, no reason to swap--this isn't
+ # an mpool test.
+ set carg { -cachesize {0 25000000 0} }
+ } else {
+ set basename test0$tnum
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ puts "Skipping for environment with txns"
+ return
+ }
+ set testdir [get_home $env]
+ set carg {}
+ }
+ cleanup $testdir $env
+
+ puts "Test0$tnum: $method ($args) Bulk get test"
+
+ if { [is_record_based $method] == 1 || [is_rbtree $method] == 1 } {
+ puts "Test0$tnum skipping for method $method"
+ return
+ }
+
+ # We run the meat of the test twice: once with unsorted dups,
+ # once with sorted dups.
+ for { set dflag "-dup"; set sort "unsorted"; set diter 0 } \
+ { $diter < 2 } \
+ { set dflag "-dup -dupsort"; set sort "sorted"; incr diter } {
+ set testfile $basename-$sort.db
+ set did [open $dict]
+
+ # Open and populate the database with $nsets sets of dups.
+ # Each set contains as many dups as its number
+ puts "\tTest0$tnum.a:\
+ Creating database with $nsets sets of $sort dups."
+ set dargs "$dflag $carg $args"
+ set db [eval {berkdb_open -create} $omethod $dargs $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ t95_populate $db $did $nsets 0
+
+ # Run basic get tests.
+ t95_gettest $db $tnum b [expr 8192] 1
+ t95_gettest $db $tnum c [expr 10 * 8192] 0
+
+ # Run cursor get tests.
+ t95_cgettest $db $tnum d [expr 100] 1
+ t95_cgettest $db $tnum e [expr 10 * 8192] 0
+
+ # Run invalid flag combination tests
+ # Sync and reopen test file so errors won't be sent to stderr
+ error_check_good db_sync [$db sync] 0
+ set noerrdb [eval berkdb_open_noerr $dargs $testfile]
+ t95_flagtest $noerrdb $tnum f [expr 8192]
+ t95_cflagtest $noerrdb $tnum g [expr 100]
+ error_check_good noerrdb_close [$noerrdb close] 0
+
+ # Set up for overflow tests
+ set max [expr 4000 * $noverflows]
+ puts "\tTest0$tnum.h: Growing\
+ database with $noverflows overflow sets (max item size $max)"
+ t95_populate $db $did $noverflows 4000
+
+ # Run overflow get tests.
+ t95_gettest $db $tnum i [expr 10 * 8192] 1
+ t95_gettest $db $tnum j [expr $max * 2] 1
+ t95_gettest $db $tnum k [expr $max * $noverflows * 2] 0
+
+ # Run overflow cursor get tests.
+ t95_cgettest $db $tnum l [expr 10 * 8192] 1
+ t95_cgettest $db $tnum m [expr $max * 2] 0
+
+ error_check_good db_close [$db close] 0
+ close $did
+ }
+}
+
+proc t95_gettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 0
+}
+proc t95_cgettest { db tnum letter bufsize expectfail } {
+ t95_gettest_body $db $tnum $letter $bufsize $expectfail 1
+}
+proc t95_flagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 0
+}
+proc t95_cflagtest { db tnum letter bufsize } {
+ t95_flagtest_body $db $tnum $letter $bufsize 1
+}
+
+# Basic get test
+proc t95_gettest_body { db tnum letter bufsize expectfail usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi"
+ } else {
+ set action "dbc get -multi -set/-next"
+ }
+ puts "\tTest0$tnum.$letter: $action with bufsize $bufsize"
+
+ set allpassed TRUE
+ set saved_err ""
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ # Traverse DB with cursor; do get/c_get(DB_MULTIPLE) on each item.
+ set dbc [$db cursor]
+ error_check_good is_valid_dbc [is_valid_cursor $dbc $db] TRUE
+ for { set dbt [$dbc get -first] } { [llength $dbt] != 0 } \
+ { set dbt [$dbc get -nextnodup] } {
+ set key [lindex [lindex $dbt 0] 0]
+ set datum [lindex [lindex $dbt 0] 1]
+
+ if { $usecursor == 0 } {
+ set ret [catch {eval $db get -multi $bufsize $key} res]
+ } else {
+ set res {}
+ for { set ret [catch {eval $getcurs get -multi $bufsize\
+ -set $key} tres] } \
+ { $ret == 0 && [llength $tres] != 0 } \
+ { set ret [catch {eval $getcurs get -multi $bufsize\
+ -nextdup} tres]} {
+ eval lappend res $tres
+ }
+ }
+
+ # If we expect a failure, be more tolerant if the above fails;
+ # just make sure it's an ENOMEM, mark it, and move along.
+ if { $expectfail != 0 && $ret != 0 } {
+ error_check_good multi_failure_errcode \
+ [is_substr $errorCode ENOMEM] 1
+ set allpassed FALSE
+ continue
+ }
+ error_check_good get_multi($key) $ret 0
+ t95_verify $res FALSE
+ }
+
+ set ret [catch {eval $db get -multi $bufsize} res]
+
+ if { $expectfail == 1 } {
+ error_check_good allpassed $allpassed FALSE
+ puts "\t\tTest0$tnum.$letter:\
+ returned at least one ENOMEM (as expected)"
+ } else {
+ error_check_good allpassed $allpassed TRUE
+ puts "\t\tTest0$tnum.$letter: succeeded (as expected)"
+ }
+
+ error_check_good dbc_close [$dbc close] 0
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+}
+
+# Test of invalid flag combinations for -multi
+proc t95_flagtest_body { db tnum letter bufsize usecursor } {
+ global errorCode
+
+ if { $usecursor == 0 } {
+ set action "db get -multi "
+ } else {
+ set action "dbc get -multi "
+ }
+ puts "\tTest0$tnum.$letter: $action with invalid flag combinations"
+
+ # Cursor for $usecursor.
+ if { $usecursor != 0 } {
+ set getcurs [$db cursor]
+ error_check_good getcurs [is_valid_cursor $getcurs $db] TRUE
+ }
+
+ if { $usecursor == 0 } {
+ # Disallowed flags for basic -multi get
+ set badflags [list consume consume_wait {rmw some_key}]
+
+ foreach flag $badflags {
+ catch {eval $db get -multi $bufsize -$flag} ret
+ error_check_good \
+ db:get:multi:$flag [is_substr $errorCode EINVAL] 1
+ }
+ } else {
+ # Disallowed flags for cursor -multi get
+ set cbadflags [list last get_recno join_item \
+ {multi_key 1000} prev prevnodup]
+
+ set dbc [$db cursor]
+ $dbc get -first
+ foreach flag $cbadflags {
+ catch {eval $dbc get -multi $bufsize -$flag} ret
+ error_check_good dbc:get:multi:$flag \
+ [is_substr $errorCode EINVAL] 1
+ }
+ error_check_good dbc_close [$dbc close] 0
+ }
+ if { $usecursor != 0 } {
+ error_check_good getcurs_close [$getcurs close] 0
+ }
+ puts "\t\tTest0$tnum.$letter completed"
+}
+
+# Verify that a passed-in list of key/data pairs all match the predicted
+# structure (e.g. {{thing1 thing1.0}}, {{key2 key2.0} {key2 key2.1}}).
+proc t95_verify { res multiple_keys } {
+ global alphabet
+
+ set i 0
+
+ set orig_key [lindex [lindex $res 0] 0]
+ set nkeys [string trim $orig_key $alphabet']
+ set base_key [string trim $orig_key 0123456789]
+ set datum_count 0
+
+ while { 1 } {
+ set key [lindex [lindex $res $i] 0]
+ set datum [lindex [lindex $res $i] 1]
+
+ if { $datum_count >= $nkeys } {
+ if { [llength $key] != 0 } {
+ # If there are keys beyond $nkeys, we'd
+ # better have multiple_keys set.
+ error_check_bad "keys beyond number $i allowed"\
+ $multiple_keys FALSE
+
+ # If multiple_keys is set, accept the new key.
+ set orig_key $key
+ set nkeys [eval string trim \
+ $orig_key {$alphabet'}]
+ set base_key [eval string trim \
+ $orig_key 0123456789]
+ set datum_count 0
+ } else {
+ # datum_count has hit nkeys. We're done.
+ return
+ }
+ }
+
+ error_check_good returned_key($i) $key $orig_key
+ error_check_good returned_datum($i) \
+ $datum $base_key.[format %4u $datum_count]
+ incr datum_count
+ incr i
+ }
+}
+
+# Add nsets dup sets, each consisting of {word$ndups word$n} pairs,
+# with "word" having (i * pad_bytes) bytes extra padding.
+proc t95_populate { db did nsets pad_bytes } {
+ set txn ""
+ for { set i 1 } { $i <= $nsets } { incr i } {
+ # basekey is a padded dictionary word
+ gets $did basekey
+
+ append basekey [repeat "a" [expr $pad_bytes * $i]]
+
+ # key is basekey with the number of dups stuck on.
+ set key $basekey$i
+
+ for { set j 0 } { $j < $i } { incr j } {
+ set data $basekey.[format %4u $j]
+ error_check_good db_put($key,$data) \
+ [eval {$db put} $txn {$key $data}] 0
+ }
+ }
+
+ # This will make debugging easier, and since the database is
+ # read-only from here out, it's cheap.
+ error_check_good db_sync [$db sync] 0
+}
diff --git a/storage/bdb/test/test096.tcl b/storage/bdb/test/test096.tcl
new file mode 100644
index 00000000000..042df19eac7
--- /dev/null
+++ b/storage/bdb/test/test096.tcl
@@ -0,0 +1,202 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test096.tcl,v 11.19 2002/08/19 20:09:29 margo Exp $
+#
+# TEST test096
+# TEST Db->truncate test.
+proc test096 { method {pagesize 512} {nentries 50} {ndups 4} args} {
+ global fixed_len
+ source ./include.tcl
+
+ set orig_fixed_len $fixed_len
+ set args [convert_args $method $args]
+ set encargs ""
+ set args [split_encargs $args encargs]
+ set omethod [convert_method $method]
+
+ puts "Test096: $method db truncate method test"
+ if { [is_record_based $method] == 1 || \
+ [is_rbtree $method] == 1 } {
+ puts "Test096 skipping for method $method"
+ return
+ }
+ set pgindex [lsearch -exact $args "-pagesize"]
+ if { $pgindex != -1 } {
+ puts "Test096: Skipping for specific pagesizes"
+ return
+ }
+
+ # Create the database and open the dictionary
+ set eindex [lsearch -exact $args "-env"]
+ set testfile test096.db
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 0 } {
+ puts "Environment w/o txns specified; skipping."
+ return
+ }
+ if { $nentries == 1000 } {
+ set nentries 100
+ }
+ reduce_dups nentries ndups
+ set testdir [get_home $env]
+ set closeenv 0
+ } else {
+ env_cleanup $testdir
+
+ #
+ # We need an env for exclusive-use testing.
+ set env [eval {berkdb_env -create -home $testdir -txn} $encargs]
+ error_check_good env_create [is_valid_env $env] TRUE
+ set closeenv 1
+ }
+
+ set t1 $testdir/t1
+
+ puts "\tTest096.a: Create $nentries entries"
+ set db [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ set datastr [reverse $str]
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_good $key:dbget [llength $ret] 1
+
+ incr count
+ }
+ close $did
+
+ puts "\tTest096.b: Truncate database"
+ error_check_good dbclose [$db close] 0
+ set dbtr [eval {berkdb_open -create -auto_commit \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -auto_commit]
+ error_check_good dbtrunc $ret $nentries
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good dbverify [verify_dir $testdir "\tTest096.c: "] 0
+
+ #
+ # Remove database, and create a new one with dups.
+ #
+ puts "\tTest096.d: Create $nentries entries with $ndups duplicates"
+ set ret [berkdb dbremove -env $env -auto_commit $testfile]
+ set db [eval {berkdb_open -pagesize $pagesize -dup -auto_commit \
+ -create -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+ set did [open $dict]
+ set count 0
+ set txn ""
+ set pflags ""
+ set gflags ""
+ while { [gets $did str] != -1 && $count < $nentries } {
+ set key $str
+ for { set i 1 } { $i <= $ndups } { incr i } {
+ set datastr $i:$str
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ set ret [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ error_check_good txn [$t commit] 0
+ }
+
+ set ret [eval {$db get} $gflags {$key}]
+ error_check_bad $key:dbget_dups [llength $ret] 0
+ error_check_good $key:dbget_dups1 [llength $ret] $ndups
+
+ incr count
+ }
+ close $did
+ set dlist ""
+ for { set i 1 } {$i <= $ndups} {incr i} {
+ lappend dlist $i
+ }
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ dup_check $db $txn $t1 $dlist
+ error_check_good txn [$t commit] 0
+ puts "\tTest096.e: Verify off page duplicates status"
+ set stat [$db stat]
+ error_check_bad stat:offpage [is_substr $stat \
+ "{{Duplicate pages} 0}"] 1
+
+ set recs [expr $ndups * $count]
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.f: Truncate database in a txn then abort"
+ set txn [$env txn]
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txnabort [$txn abort] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [eval {berkdb_open -auto_commit -env $env} $args $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] $recs
+ error_check_good dbclose [$db close] 0
+
+ puts "\tTest096.g: Truncate database in a txn then commit"
+ set txn [$env txn]
+ error_check_good txnbegin [is_valid_txn $txn $env] TRUE
+
+ set dbtr [eval {berkdb_open -auto_commit -create \
+ -env $env $omethod -mode 0644} $args $testfile]
+ error_check_good db_open [is_valid_db $dbtr] TRUE
+
+ set ret [$dbtr truncate -txn $txn]
+ error_check_good dbtrunc $ret $recs
+
+ error_check_good txncommit [$txn commit] 0
+ error_check_good db_close [$dbtr close] 0
+
+ set db [berkdb_open -auto_commit -env $env $testfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ set ret [$db get -glob *]
+ error_check_good dbget [llength $ret] 0
+ error_check_good dbclose [$db close] 0
+
+ set testdir [get_home $env]
+ error_check_good dbverify [verify_dir $testdir "\tTest096.h: "] 0
+
+ if { $closeenv == 1 } {
+ error_check_good envclose [$env close] 0
+ }
+}
diff --git a/storage/bdb/test/test097.tcl b/storage/bdb/test/test097.tcl
new file mode 100644
index 00000000000..6e43b820b2f
--- /dev/null
+++ b/storage/bdb/test/test097.tcl
@@ -0,0 +1,188 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test097.tcl,v 11.8 2002/09/04 18:47:42 sue Exp $
+#
+# TEST test097
+# TEST Open up a large set of database files simultaneously.
+# TEST Adjust for local file descriptor resource limits.
+# TEST Then use the first 1000 entries from the dictionary.
+# TEST Insert each with self as key and a fixed, medium length data string;
+# TEST retrieve each. After all are entered, retrieve all; compare output
+# TEST to original.
+
+proc test097 { method {ndbs 500} {nentries 400} args } {
+ global pad_datastr
+ source ./include.tcl
+
+ set largs [convert_args $method $args]
+ set encargs ""
+ set largs [split_encargs $largs encargs]
+
+ # Open an environment, with a 1MB cache.
+ set eindex [lsearch -exact $largs "-env"]
+ if { $eindex != -1 } {
+ incr eindex
+ set env [lindex $largs $eindex]
+ puts "Test097: $method: skipping for env $env"
+ return
+ }
+ env_cleanup $testdir
+ set env [eval {berkdb_env -create \
+ -cachesize { 0 1048576 1 } -txn} -home $testdir $encargs]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ # Create the database and open the dictionary
+ set testfile test097.db
+ set t1 $testdir/t1
+ set t2 $testdir/t2
+ set t3 $testdir/t3
+ #
+ # When running with HAVE_MUTEX_SYSTEM_RESOURCES,
+ # we can run out of mutex lock slots due to the nature of this test.
+ # So, for this test, increase the number of pages per extent
+ # to consume fewer resources.
+ #
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ set eindex [lsearch -exact $largs "-extent"]
+ error_check_bad extent $eindex -1
+ incr eindex
+ set extval [lindex $largs $eindex]
+ set extval [expr $extval * 4]
+ set largs [lreplace $largs $eindex $eindex $extval]
+ }
+ puts -nonewline "Test097: $method ($largs) "
+ puts "$nentries entries in at most $ndbs simultaneous databases"
+
+ puts "\tTest097.a: Simultaneous open"
+ set numdb [test097_open tdb $ndbs $method $env $testfile $largs]
+ if { $numdb == 0 } {
+ puts "\tTest097: Insufficient resources available -- skipping."
+ error_check_good envclose [$env close] 0
+ return
+ }
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 0
+
+ # Here is the loop where we put and get each key/data pair
+ if { [is_record_based $method] == 1 } {
+ append gflags "-recno"
+ }
+ puts "\tTest097.b: put/get on $numdb databases"
+ set datastr "abcdefghij"
+ set pad_datastr [pad_data $method $datastr]
+ while { [gets $did str] != -1 && $count < $nentries } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } else {
+ set key $str
+ }
+ for { set i 1 } { $i <= $numdb } { incr i } {
+ set ret [eval {$tdb($i) put} $txn $pflags \
+ {$key [chop_data $method $datastr]}]
+ error_check_good put $ret 0
+ set ret [eval {$tdb($i) get} $gflags {$key}]
+ error_check_good get $ret [list [list $key \
+ [pad_data $method $datastr]]]
+ }
+ incr count
+ }
+ close $did
+
+ # Now we will get each key from the DB and compare the results
+ # to the original.
+ puts "\tTest097.c: dump and check files"
+ for { set j 1 } { $j <= $numdb } { incr j } {
+ dump_file $tdb($j) $txn $t1 test097.check
+ error_check_good db_close [$tdb($j) close] 0
+
+ # Now compare the keys to see if they match the dictionary
+ if { [is_record_based $method] == 1 } {
+ set oid [open $t2 w]
+ for {set i 1} {$i <= $nentries} {set i [incr i]} {
+ puts $oid $i
+ }
+ close $oid
+ filesort $t2 $t3
+ file rename -force $t3 $t2
+ } else {
+ set q q
+ filehead $nentries $dict $t3
+ filesort $t3 $t2
+ }
+ filesort $t1 $t3
+
+ error_check_good Test097:diff($t3,$t2) [filecmp $t3 $t2] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+# Check function for test097; data should be fixed are identical
+proc test097.check { key data } {
+ global pad_datastr
+ error_check_good "data mismatch for key $key" $data $pad_datastr
+}
+
+proc test097_open { tdb ndbs method env testfile largs } {
+ global errorCode
+ upvar $tdb db
+
+ set j 0
+ set numdb $ndbs
+ if { [is_queueext $method] } {
+ set numdb [expr $ndbs / 4]
+ }
+ set omethod [convert_method $method]
+ for { set i 1 } {$i <= $numdb } { incr i } {
+ set stat [catch {eval {berkdb_open -env $env \
+ -pagesize 512 -create -mode 0644} \
+ $largs {$omethod $testfile.$i}} db($i)]
+ #
+ # Check if we've reached our limit
+ #
+ if { $stat == 1 } {
+ set min 20
+ set em [is_substr $errorCode EMFILE]
+ set en [is_substr $errorCode ENFILE]
+ error_check_good open_ret [expr $em || $en] 1
+ puts \
+ "\tTest097.a.1 Encountered resource limits opening $i files, adjusting"
+ if { [is_queueext $method] } {
+ set end [expr $j / 4]
+ set min 10
+ } else {
+ set end [expr $j - 10]
+ }
+ #
+ # If we cannot open even $min files, then this test is
+ # not very useful. Close up shop and go back.
+ #
+ if { $end < $min } {
+ test097_close db 1 $j
+ return 0
+ }
+ test097_close db [expr $end + 1] $j
+ return $end
+ } else {
+ error_check_good dbopen [is_valid_db $db($i)] TRUE
+ set j $i
+ }
+ }
+ return $j
+}
+
+proc test097_close { tdb start end } {
+ upvar $tdb db
+
+ for { set i $start } { $i <= $end } { incr i } {
+ error_check_good db($i)close [$db($i) close] 0
+ }
+}
diff --git a/storage/bdb/test/test098.tcl b/storage/bdb/test/test098.tcl
new file mode 100644
index 00000000000..320e0258a84
--- /dev/null
+++ b/storage/bdb/test/test098.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test098.tcl,v 1.5 2002/07/11 20:38:36 sandstro Exp $
+#
+# TEST test098
+# TEST Test of DB_GET_RECNO and secondary indices. Open a primary and
+# TEST a secondary, and do a normal cursor get followed by a get_recno.
+# TEST (This is a smoke test for "Bug #1" in [#5811].)
+
+proc test098 { method args } {
+ source ./include.tcl
+
+ set omethod [convert_method $method]
+ set args [convert_args $method $args]
+
+ puts "Test098: $omethod ($args): DB_GET_RECNO and secondary indices."
+
+ if { [is_rbtree $method] != 1 } {
+ puts "\tTest098: Skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ set txn ""
+ set auto ""
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set base $testdir/test098
+ set env NULL
+ } else {
+ set base test098
+ incr eindex
+ set env [lindex $args $eindex]
+ set rpcenv [is_rpcenv $env]
+ if { $rpcenv == 1 } {
+ puts "Test098: Skipping for RPC"
+ return
+ }
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ set auto " -auto_commit "
+ }
+ set testdir [get_home $env]
+ }
+ cleanup $testdir $env
+
+ puts "\tTest098.a: Set up databases."
+
+ set adb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-primary.db]
+ error_check_good adb_create [is_valid_db $adb] TRUE
+
+ set bdb [eval {berkdb_open} $omethod $args $auto \
+ {-create} $base-secondary.db]
+ error_check_good bdb_create [is_valid_db $bdb] TRUE
+
+ set ret [eval $adb associate $auto [callback_n 0] $bdb]
+ error_check_good associate $ret 0
+
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set ret [eval {$adb put} $txn aaa data1]
+ error_check_good put $ret 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+
+ set bc [$bdb cursor]
+ error_check_good cursor [is_valid_cursor $bc $bdb] TRUE
+
+ puts "\tTest098.b: c_get(DB_FIRST) on the secondary."
+ error_check_good get_first [$bc get -first] \
+ [list [list [[callback_n 0] aaa data1] data1]]
+
+ puts "\tTest098.c: c_get(DB_GET_RECNO) on the secondary."
+ error_check_good get_recno [$bc get -get_recno] 1
+
+ error_check_good c_close [$bc close] 0
+
+ error_check_good bdb_close [$bdb close] 0
+ error_check_good adb_close [$adb close] 0
+}
diff --git a/storage/bdb/test/test099.tcl b/storage/bdb/test/test099.tcl
new file mode 100644
index 00000000000..db177ce5fff
--- /dev/null
+++ b/storage/bdb/test/test099.tcl
@@ -0,0 +1,177 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test099.tcl,v 1.2 2002/08/08 15:38:13 bostic Exp $
+#
+# TEST test099
+# TEST
+# TEST Test of DB->get and DBC->c_get with set_recno and get_recno.
+# TEST
+# TEST Populate a small btree -recnum database.
+# TEST After all are entered, retrieve each using -recno with DB->get.
+# TEST Open a cursor and do the same for DBC->c_get with set_recno.
+# TEST Verify that set_recno sets the record number position properly.
+# TEST Verify that get_recno returns the correct record numbers.
+proc test099 { method {nentries 10000} args } {
+ source ./include.tcl
+
+ set args [convert_args $method $args]
+ set omethod [convert_method $method]
+
+ puts "Test099: Test of set_recno and get_recno in DBC->c_get."
+ if { [is_rbtree $method] != 1 } {
+ puts "Test099: skipping for method $method."
+ return
+ }
+
+ set txnenv 0
+ set eindex [lsearch -exact $args "-env"]
+ #
+ # If we are using an env, then testfile should just be the db name.
+ # Otherwise it is the test directory and the name.
+ if { $eindex == -1 } {
+ set testfile $testdir/test099.db
+ set env NULL
+ } else {
+ set testfile test099.db
+ incr eindex
+ set env [lindex $args $eindex]
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append args " -auto_commit "
+ #
+ # If we are using txns and running with the
+ # default, set the default down a bit.
+ #
+ if { $nentries == 10000 } {
+ set nentries 100
+ }
+ }
+ set testdir [get_home $env]
+ }
+ set t1 $testdir/t1
+ cleanup $testdir $env
+
+ # Create the database and open the dictionary
+ set db [eval {berkdb_open \
+ -create -mode 0644} $args {$omethod $testfile}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ set did [open $dict]
+
+ set pflags ""
+ set gflags ""
+ set txn ""
+ set count 1
+
+ append gflags " -recno"
+
+ puts "\tTest099.a: put loop"
+ # Here is the loop where we put each key/data pair
+ while { [gets $did str] != -1 && $count < $nentries } {
+# global kvals
+# set key [expr $count]
+# set kvals($key) [pad_data $method $str]
+ set key $str
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set r [eval {$db put} \
+ $txn $pflags {$key [chop_data $method $str]}]
+ error_check_good db_put $r 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ incr count
+ }
+ close $did
+
+ puts "\tTest099.b: dump file"
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ dump_file $db $txn $t1 test099.check
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+
+ puts "\tTest099.c: Test set_recno then get_recno"
+ set db [eval {berkdb_open -rdonly} $args $omethod $testfile ]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Open a cursor
+ if { $txnenv == 1 } {
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ set dbc [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_substr $dbc $db] 1
+
+ set did [open $t1]
+ set recno 1
+
+ # Create key(recno) array to use for later comparison
+ while { [gets $did str] != -1 } {
+ set kvals($recno) $str
+ incr recno
+ }
+
+ set recno 1
+ set ret [$dbc get -first]
+ error_check_bad dbc_get_first [llength $ret] 0
+
+ # First walk forward through the database ....
+ while { $recno < $count } {
+ # Test set_recno: verify it sets the record number properly.
+ set current [$dbc get -current]
+ set r [$dbc get -set_recno $recno]
+ error_check_good set_recno $current $r
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set ret [$dbc get -next]
+ incr recno
+ }
+
+ # ... and then backward.
+ set recno [expr $count - 1]
+ while { $recno > 0 } {
+ # Test set_recno: verify that we find the expected key
+ # at the current record number position.
+ set r [$dbc get -set_recno $recno]
+ set k [lindex [lindex $r 0] 0]
+ error_check_good set_recno $kvals($recno) $k
+
+ # Test get_recno: verify that the return from
+ # get_recno matches the record number just set.
+ set g [$dbc get -get_recno]
+ error_check_good get_recno $recno $g
+ set recno [expr $recno - 1]
+ }
+
+ error_check_good cursor_close [$dbc close] 0
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+ close $did
+}
+
+# Check function for dumped file; data should be fixed are identical
+proc test099.check { key data } {
+ error_check_good "data mismatch for key $key" $key $data
+}
diff --git a/storage/bdb/test/test100.tcl b/storage/bdb/test/test100.tcl
new file mode 100644
index 00000000000..f80b2e526dd
--- /dev/null
+++ b/storage/bdb/test/test100.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test100.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test100
+# TEST Test for functionality near the end of the queue
+# TEST using test025 (DB_APPEND).
+proc test100 { method {nentries 10000} {txn -txn} {tnum "100"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test025 $method $nentries 4294967000 $tnum} $args
+}
diff --git a/storage/bdb/test/test101.tcl b/storage/bdb/test/test101.tcl
new file mode 100644
index 00000000000..7e5c8fc30fc
--- /dev/null
+++ b/storage/bdb/test/test101.tcl
@@ -0,0 +1,17 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: test101.tcl,v 11.1 2002/08/15 20:55:20 sandstro Exp $
+#
+# TEST test101
+# TEST Test for functionality near the end of the queue
+# TEST using test070 (DB_CONSUME).
+proc test101 { method {nentries 10000} {txn -txn} {tnum "101"} args} {
+ if { [is_queueext $method ] == 0 } {
+ puts "Skipping test0$tnum for $method."
+ return;
+ }
+ eval {test070 $method 4 2 1000 WAIT 4294967000 $txn $tnum} $args
+}
diff --git a/storage/bdb/test/testparams.tcl b/storage/bdb/test/testparams.tcl
new file mode 100644
index 00000000000..6628db532d7
--- /dev/null
+++ b/storage/bdb/test/testparams.tcl
@@ -0,0 +1,194 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: testparams.tcl,v 11.117 2002/09/05 02:30:00 margo Exp $
+
+set subs {bigfile dead env lock log memp mutex recd rep rpc rsrc \
+ sdb sdbtest sec si test txn}
+
+set num_test(bigfile) 2
+set num_test(dead) 7
+set num_test(env) 11
+set num_test(lock) 5
+set num_test(log) 5
+set num_test(memp) 3
+set num_test(mutex) 3
+set num_test(recd) 20
+set num_test(rep) 5
+set num_test(rpc) 5
+set num_test(rsrc) 4
+set num_test(sdb) 12
+set num_test(sdbtest) 2
+set num_test(sec) 2
+set num_test(si) 6
+set num_test(test) 101
+set num_test(txn) 9
+
+set parms(recd001) 0
+set parms(recd002) 0
+set parms(recd003) 0
+set parms(recd004) 0
+set parms(recd005) ""
+set parms(recd006) 0
+set parms(recd007) ""
+set parms(recd008) {4 4}
+set parms(recd009) 0
+set parms(recd010) 0
+set parms(recd011) {200 15 1}
+set parms(recd012) {0 49 25 100 5}
+set parms(recd013) 100
+set parms(recd014) ""
+set parms(recd015) ""
+set parms(recd016) ""
+set parms(recd017) 0
+set parms(recd018) 10
+set parms(recd019) 50
+set parms(recd020) ""
+set parms(subdb001) ""
+set parms(subdb002) 10000
+set parms(subdb003) 1000
+set parms(subdb004) ""
+set parms(subdb005) 100
+set parms(subdb006) 100
+set parms(subdb007) ""
+set parms(subdb008) ""
+set parms(subdb009) ""
+set parms(subdb010) ""
+set parms(subdb011) {13 10}
+set parms(subdb012) ""
+set parms(test001) {10000 0 "01" 0}
+set parms(test002) 10000
+set parms(test003) ""
+set parms(test004) {10000 4 0}
+set parms(test005) 10000
+set parms(test006) {10000 0 6}
+set parms(test007) {10000 7}
+set parms(test008) {8 0}
+set parms(test009) ""
+set parms(test010) {10000 5 10}
+set parms(test011) {10000 5 11}
+set parms(test012) ""
+set parms(test013) 10000
+set parms(test014) 10000
+set parms(test015) {7500 0}
+set parms(test016) 10000
+set parms(test017) {0 19 17}
+set parms(test018) 10000
+set parms(test019) 10000
+set parms(test020) 10000
+set parms(test021) 10000
+set parms(test022) ""
+set parms(test023) ""
+set parms(test024) 10000
+set parms(test025) {10000 0 25}
+set parms(test026) {2000 5 26}
+set parms(test027) {100}
+set parms(test028) ""
+set parms(test029) 10000
+set parms(test030) 10000
+set parms(test031) {10000 5 31}
+set parms(test032) {10000 5 32}
+set parms(test033) {10000 5 33}
+set parms(test034) 10000
+set parms(test035) 10000
+set parms(test036) 10000
+set parms(test037) 100
+set parms(test038) {10000 5 38}
+set parms(test039) {10000 5 39}
+set parms(test040) 10000
+set parms(test041) 10000
+set parms(test042) 1000
+set parms(test043) 10000
+set parms(test044) {5 10 0}
+set parms(test045) 1000
+set parms(test046) ""
+set parms(test047) ""
+set parms(test048) ""
+set parms(test049) ""
+set parms(test050) ""
+set parms(test051) ""
+set parms(test052) ""
+set parms(test053) ""
+set parms(test054) ""
+set parms(test055) ""
+set parms(test056) ""
+set parms(test057) ""
+set parms(test058) ""
+set parms(test059) ""
+set parms(test060) ""
+set parms(test061) ""
+set parms(test062) {200 200 62}
+set parms(test063) ""
+set parms(test064) ""
+set parms(test065) ""
+set parms(test066) ""
+set parms(test067) {1000 67}
+set parms(test068) ""
+set parms(test069) {50 69}
+set parms(test070) {4 2 1000 CONSUME 0 -txn 70}
+set parms(test071) {1 1 10000 CONSUME 0 -txn 71}
+set parms(test072) {512 20 72}
+set parms(test073) {512 50 73}
+set parms(test074) {-nextnodup 100 74}
+set parms(test075) {75}
+set parms(test076) {1000 76}
+set parms(test077) {1000 512 77}
+set parms(test078) {100 512 78}
+set parms(test079) {10000 512 79}
+set parms(test080) {80}
+set parms(test081) {13 81}
+set parms(test082) {-prevnodup 100 82}
+set parms(test083) {512 5000 2}
+set parms(test084) {10000 84 65536}
+set parms(test085) {512 3 10 85}
+set parms(test086) ""
+set parms(test087) {512 50 87}
+set parms(test088) ""
+set parms(test089) 1000
+set parms(test090) {10000 -txn 90}
+set parms(test091) {4 2 1000 0 91}
+set parms(test092) {1000}
+set parms(test093) {10000 93}
+set parms(test094) {10000 10 94}
+set parms(test095) {1000 25 95}
+set parms(test096) {512 1000 19}
+set parms(test097) {500 400}
+set parms(test098) ""
+set parms(test099) 10000
+set parms(test100) {10000 -txn 100}
+set parms(test101) {10000 -txn 101}
+
+# RPC server executables. Each of these is tested (if it exists)
+# when running the RPC tests.
+set svc_list { berkeley_db_svc berkeley_db_cxxsvc \
+ berkeley_db_javasvc }
+set rpc_svc berkeley_db_svc
+
+# Shell script tests. Each list entry is a {directory filename} pair,
+# invoked with "/bin/sh filename".
+set shelltest_list {
+ { scr001 chk.code }
+ { scr002 chk.def }
+ { scr003 chk.define }
+ { scr004 chk.javafiles }
+ { scr005 chk.nl }
+ { scr006 chk.offt }
+ { scr007 chk.proto }
+ { scr008 chk.pubdef }
+ { scr009 chk.srcfiles }
+ { scr010 chk.str }
+ { scr011 chk.tags }
+ { scr012 chk.vx_code }
+ { scr013 chk.stats }
+ { scr014 chk.err }
+ { scr015 chk.cxxtests }
+ { scr016 chk.javatests }
+ { scr017 chk.db185 }
+ { scr018 chk.comma }
+ { scr019 chk.include }
+ { scr020 chk.inc }
+ { scr021 chk.flags }
+ { scr022 chk.rr }
+}
diff --git a/storage/bdb/test/testutils.tcl b/storage/bdb/test/testutils.tcl
new file mode 100644
index 00000000000..d1f89dd1e15
--- /dev/null
+++ b/storage/bdb/test/testutils.tcl
@@ -0,0 +1,3209 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: testutils.tcl,v 11.165 2002/09/05 17:54:04 sandstro Exp $
+#
+# Test system utilities
+#
+# Timestamp -- print time along with elapsed time since last invocation
+# of timestamp.
+proc timestamp {{opt ""}} {
+ global __timestamp_start
+
+ set now [clock seconds]
+
+ # -c accurate to the click, instead of the second.
+ # -r seconds since the Epoch
+ # -t current time in the format expected by db_recover -t.
+ # -w wallclock time
+ # else wallclock plus elapsed time.
+ if {[string compare $opt "-r"] == 0} {
+ return $now
+ } elseif {[string compare $opt "-t"] == 0} {
+ return [clock format $now -format "%y%m%d%H%M.%S"]
+ } elseif {[string compare $opt "-w"] == 0} {
+ return [clock format $now -format "%c"]
+ } else {
+ if {[string compare $opt "-c"] == 0} {
+ set printclicks 1
+ } else {
+ set printclicks 0
+ }
+
+ if {[catch {set start $__timestamp_start}] != 0} {
+ set __timestamp_start $now
+ }
+ set start $__timestamp_start
+
+ set elapsed [expr $now - $start]
+ set the_time [clock format $now -format ""]
+ set __timestamp_start $now
+
+ if { $printclicks == 1 } {
+ set pc_print [format ".%08u" [__fix_num [clock clicks]]]
+ } else {
+ set pc_print ""
+ }
+
+ format "%02d:%02d:%02d$pc_print (%02d:%02d:%02d)" \
+ [__fix_num [clock format $now -format "%H"]] \
+ [__fix_num [clock format $now -format "%M"]] \
+ [__fix_num [clock format $now -format "%S"]] \
+ [expr $elapsed / 3600] \
+ [expr ($elapsed % 3600) / 60] \
+ [expr ($elapsed % 3600) % 60]
+ }
+}
+
+proc __fix_num { num } {
+ set num [string trimleft $num "0"]
+ if {[string length $num] == 0} {
+ set num "0"
+ }
+ return $num
+}
+
+# Add a {key,data} pair to the specified database where
+# key=filename and data=file contents.
+proc put_file { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set data [read $fid]
+ close $fid
+
+ set ret [eval {$db put} $txn $flags {$file $data}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=filename and data=file contents and then write the
+# data to the specified file.
+proc get_file { db txn flags file outfile } {
+ source ./include.tcl
+
+ set fid [open $outfile w]
+ fconfigure $fid -translation binary
+ if [catch {eval {$db get} $txn $flags {$file}} data] {
+ puts -nonewline $fid $data
+ } else {
+ # Data looks like {{key data}}
+ set data [lindex [lindex $data 0] 1]
+ puts -nonewline $fid $data
+ }
+ close $fid
+}
+
+# Add a {key,data} pair to the specified database where
+# key=file contents and data=file name.
+proc put_file_as_key { db txn flags file } {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ # Use not the file contents, but the file name concatenated
+ # before the file contents, as a key, to ensure uniqueness.
+ set data $file$filecont
+
+ set ret [eval {$db put} $txn $flags {$data $file}]
+ error_check_good put_file $ret 0
+}
+
+# Get a {key,data} pair from the specified database where
+# key=file contents and data=file name
+proc get_file_as_key { db txn flags file} {
+ source ./include.tcl
+
+ set fid [open $file r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+
+ set data $file$filecont
+
+ return [eval {$db get} $txn $flags {$data}]
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_file {
+ dbname env outfile checkfunc dump_func beg cont } {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg " -env $env "
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open} $envarg -rdonly -unknown $encarg $dbname]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# open file and call dump_file to dumpkeys to tempfile
+proc open_and_dump_subfile {
+ dbname env outfile checkfunc dump_func beg cont subdb} {
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $encrypt > 0 && $env == "NULL" } {
+ set encarg "-encryptany $passwd"
+ }
+ set envarg ""
+ set txn ""
+ set txnenv 0
+ if { $env != "NULL" } {
+ append envarg "-env $env"
+ set txnenv [is_txnenv $env]
+ if { $txnenv == 1 } {
+ append envarg " -auto_commit "
+ set t [$env txn]
+ error_check_good txn [is_valid_txn $t $env] TRUE
+ set txn "-txn $t"
+ }
+ }
+ set db [eval {berkdb open -rdonly -unknown} \
+ $envarg $encarg {$dbname $subdb}]
+ error_check_good dbopen [is_valid_db $db] TRUE
+ $dump_func $db $txn $outfile $checkfunc $beg $cont
+ if { $txnenv == 1 } {
+ error_check_good txn [$t commit] 0
+ }
+ error_check_good db_close [$db close] 0
+}
+
+# Sequentially read a file and call checkfunc on each key/data pair.
+# Dump the keys out to the file specified by outfile.
+proc dump_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+proc dump_file_direction { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+ dump_file_walk $c $outfile $checkfunc $start $continue
+ error_check_good curs_close [$c close] 0
+}
+
+proc dump_file_walk { c outfile checkfunc start continue {flag ""} } {
+ set outf [open $outfile w]
+ for {set d [eval {$c get} $flag $start] } \
+ { [llength $d] != 0 } \
+ {set d [eval {$c get} $flag $continue] } {
+ set kd [lindex $d 0]
+ set k [lindex $kd 0]
+ set d2 [lindex $kd 1]
+ $checkfunc $k $d2
+ puts $outf $k
+ # XXX: Geoff Mainland
+ # puts $outf "$k $d2"
+ }
+ close $outf
+}
+
+proc dump_binkey_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_binkey_file_direction $db $txn $outfile $checkfunc \
+ "-first" "-next"
+}
+proc dump_bin_file { db txn outfile checkfunc } {
+ source ./include.tcl
+
+ dump_bin_file_direction $db $txn $outfile $checkfunc "-first" "-next"
+}
+
+# Note: the following procedure assumes that the binary-file-as-keys were
+# inserted into the database by put_file_as_key, and consist of the file
+# name followed by the file contents as key, to ensure uniqueness.
+proc dump_binkey_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good db_cursor [is_valid_cursor $c $db] TRUE
+
+ set inf $d1
+ for {set d [$c get $begin] } { [llength $d] != 0 } \
+ {set d [$c get $cont] } {
+ set kd [lindex $d 0]
+ set keyfile [lindex $kd 0]
+ set data [lindex $kd 1]
+
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+
+ # Chop off the first few bytes--that's the file name,
+ # added for uniqueness in put_file_as_key, which we don't
+ # want in the regenerated file.
+ set namelen [string length $data]
+ set keyfile [string range $keyfile $namelen end]
+ puts -nonewline $ofid $keyfile
+ close $ofid
+
+ $checkfunc $data $d1
+ puts $outf $data
+ flush $outf
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove $d1
+}
+
+proc dump_bin_file_direction { db txn outfile checkfunc begin cont } {
+ source ./include.tcl
+
+ set d1 $testdir/d1
+
+ set outf [open $outfile w]
+
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+
+ for {set d [$c get $begin] } \
+ { [llength $d] != 0 } {set d [$c get $cont] } {
+ set k [lindex [lindex $d 0] 0]
+ set data [lindex [lindex $d 0] 1]
+ set ofid [open $d1 w]
+ fconfigure $ofid -translation binary
+ puts -nonewline $ofid $data
+ close $ofid
+
+ $checkfunc $k $d1
+ puts $outf $k
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+ fileremove -f $d1
+}
+
+proc make_data_str { key } {
+ set datastr ""
+ for {set i 0} {$i < 10} {incr i} {
+ append datastr $key
+ }
+ return $datastr
+}
+
+proc error_check_bad { func result bad {txn 0}} {
+ if { [binary_compare $result $bad] == 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp] $func returned error value $bad"
+ }
+}
+
+proc error_check_good { func result desired {txn 0} } {
+ if { [binary_compare $desired $result] != 0 } {
+ if { $txn != 0 } {
+ $txn abort
+ }
+ flush stdout
+ flush stderr
+ error "FAIL:[timestamp]\
+ $func: expected $desired, got $result"
+ }
+}
+
+# Locks have the prefix of their manager.
+proc is_substr { str sub } {
+ if { [string first $sub $str] == -1 } {
+ return 0
+ } else {
+ return 1
+ }
+}
+
+proc release_list { l } {
+
+ # Now release all the locks
+ foreach el $l {
+ catch { $el put } ret
+ error_check_good lock_put $ret 0
+ }
+}
+
+proc debug { {stop 0} } {
+ global __debug_on
+ global __debug_print
+ global __debug_test
+
+ set __debug_on 1
+ set __debug_print 1
+ set __debug_test $stop
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_check { db txn tmpfile dlist {extra 0}} {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $key
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ #
+ # Some tests add an extra dup (like overflow entries)
+ # Check id if it exists.
+ if { $extra != 0} {
+ set okey $key
+ set rec [$c get "-next"]
+ if { [string length $rec] != 0 } {
+ set key [lindex [lindex $rec 0] 0]
+ #
+ # If this key has no extras, go back for
+ # next iteration.
+ if { [string compare $key $lastkey] != 0 } {
+ set key $okey
+ set rec [$c get "-prev"]
+ } else {
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ error_check_bad dupget.data1 $d $key
+ error_check_good dupget.id1 $id $extra
+ }
+ }
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Check if each key appears exactly [llength dlist] times in the file with
+# the duplicate tags matching those that appear in dlist.
+proc dup_file_check { db txn tmpfile dlist } {
+ source ./include.tcl
+
+ set outf [open $tmpfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ set lastkey ""
+ set done 0
+ while { $done != 1} {
+ foreach did $dlist {
+ set rec [$c get "-next"]
+ if { [string length $rec] == 0 } {
+ set done 1
+ break
+ }
+ set key [lindex [lindex $rec 0] 0]
+ if { [string compare $key $lastkey] != 0 } {
+ #
+ # If we changed files read in new contents.
+ #
+ set fid [open $key r]
+ fconfigure $fid -translation binary
+ set filecont [read $fid]
+ close $fid
+ }
+ set fulldata [lindex [lindex $rec 0] 1]
+ set id [id_of $fulldata]
+ set d [data_of $fulldata]
+ if { [string compare $key $lastkey] != 0 && \
+ $id != [lindex $dlist 0] } {
+ set e [lindex $dlist 0]
+ error "FAIL: \tKey \
+ $key, expected dup id $e, got $id"
+ }
+ error_check_good dupget.data $d $filecont
+ error_check_good dupget.id $id $did
+ set lastkey $key
+ }
+ if { $done != 1 } {
+ puts $outf $key
+ }
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+# Parse duplicate data entries of the form N:data. Data_of returns
+# the data part; id_of returns the numerical part
+proc data_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+ return [ string range $str [expr $ndx + 1] end]
+}
+
+proc id_of {str} {
+ set ndx [string first ":" $str]
+ if { $ndx == -1 } {
+ return ""
+ }
+
+ return [ string range $str 0 [expr $ndx - 1]]
+}
+
+proc nop { {args} } {
+ return
+}
+
+# Partial put test procedure.
+# Munges a data val through three different partial puts. Stores
+# the final munged string in the dvals array so that you can check
+# it later (dvals should be global). We take the characters that
+# are being replaced, make them capitals and then replicate them
+# some number of times (n_add). We do this at the beginning of the
+# data, at the middle and at the end. The parameters are:
+# db, txn, key -- as per usual. Data is the original data element
+# from which we are starting. n_replace is the number of characters
+# that we will replace. n_add is the number of times we will add
+# the replaced string back in.
+proc partial_put { method db txn gflags key data n_replace n_add } {
+ global dvals
+ source ./include.tcl
+
+ # Here is the loop where we put and get each key/data pair
+ # We will do the initial put and then three Partial Puts
+ # for the beginning, middle and end of the string.
+
+ eval {$db put} $txn {$key [chop_data $method $data]}
+
+ # Beginning change
+ set s [string range $data 0 [ expr $n_replace - 1 ] ]
+ set repl [ replicate [string toupper $s] $n_add ]
+
+ # This is gross, but necessary: if this is a fixed-length
+ # method, and the chopped length of $repl is zero,
+ # it's because the original string was zero-length and our data item
+ # is all nulls. Set repl to something non-NULL.
+ if { [is_fixed_length $method] && \
+ [string length [chop_data $method $repl]] == 0 } {
+ set repl [replicate "." $n_add]
+ }
+
+ set newstr [chop_data $method $repl[string range $data $n_replace end]]
+ set ret [eval {$db put} $txn {-partial [list 0 $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # End Change
+ set len [string length $newstr]
+ set spl [expr $len - $n_replace]
+ # Handle case where $n_replace > $len
+ if { $spl < 0 } {
+ set spl 0
+ }
+
+ set s [string range $newstr [ expr $len - $n_replace ] end ]
+ # Handle zero-length keys
+ if { [string length $s] == 0 } { set s "A" }
+
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method \
+ [string range $newstr 0 [expr $spl - 1 ] ]$repl]
+
+ set ret [eval {$db put} $txn \
+ {-partial [list $spl $n_replace] $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ # Middle Change
+ set len [string length $newstr]
+ set mid [expr $len / 2 ]
+ set beg [expr $mid - [expr $n_replace / 2] ]
+ set end [expr $beg + $n_replace - 1]
+ set s [string range $newstr $beg $end]
+ set repl [ replicate [string toupper $s] $n_add ]
+ set newstr [chop_data $method [string range $newstr 0 \
+ [expr $beg - 1 ] ]$repl[string range $newstr [expr $end + 1] end]]
+
+ set ret [eval {$db put} $txn {-partial [list $beg $n_replace] \
+ $key [chop_data $method $repl]}]
+ error_check_good put $ret 0
+
+ set ret [eval {$db get} $gflags $txn {$key}]
+ error_check_good get $ret [list [list $key [pad_data $method $newstr]]]
+
+ set dvals($key) [pad_data $method $newstr]
+}
+
+proc replicate { str times } {
+ set res $str
+ for { set i 1 } { $i < $times } { set i [expr $i * 2] } {
+ append res $res
+ }
+ return $res
+}
+
+proc repeat { str n } {
+ set ret ""
+ while { $n > 0 } {
+ set ret $str$ret
+ incr n -1
+ }
+ return $ret
+}
+
+proc isqrt { l } {
+ set s [expr sqrt($l)]
+ set ndx [expr [string first "." $s] - 1]
+ return [string range $s 0 $ndx]
+}
+
+# If we run watch_procs multiple times without an intervening
+# testdir cleanup, it's possible that old sentinel files will confuse
+# us. Make sure they're wiped out before we spawn any other processes.
+proc sentinel_init { } {
+ source ./include.tcl
+
+ set filelist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set filelist $result
+ }
+
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set filelist [concat $filelist $result]
+ }
+
+ foreach f $filelist {
+ fileremove $f
+ }
+}
+
+proc watch_procs { pidlist {delay 30} {max 3600} {quiet 0} } {
+ source ./include.tcl
+
+ set elapsed 0
+
+ # Don't start watching the processes until a sentinel
+ # file has been created for each one.
+ foreach pid $pidlist {
+ while { [file exists $testdir/begin.$pid] == 0 } {
+ tclsleep $delay
+ incr elapsed $delay
+ # If pids haven't been created in one-tenth
+ # of the time allowed for the whole test,
+ # there's a problem. Report an error and fail.
+ if { $elapsed > [expr {$max / 10}] } {
+ puts "FAIL: begin.pid not created"
+ break
+ }
+ }
+ }
+
+ while { 1 } {
+
+ tclsleep $delay
+ incr elapsed $delay
+
+ # Find the list of processes with outstanding sentinel
+ # files (i.e. a begin.pid and no end.pid).
+ set beginlist {}
+ set endlist {}
+ set ret [catch {glob $testdir/begin.*} result]
+ if { $ret == 0 } {
+ set beginlist $result
+ }
+ set ret [catch {glob $testdir/end.*} result]
+ if { $ret == 0 } {
+ set endlist $result
+ }
+
+ set bpids {}
+ catch {unset epids}
+ foreach begfile $beginlist {
+ lappend bpids [string range $begfile \
+ [string length $testdir/begin.] end]
+ }
+ foreach endfile $endlist {
+ set epids([string range $endfile \
+ [string length $testdir/end.] end]) 1
+ }
+
+ # The set of processes that we still want to watch, $l,
+ # is the set of pids that have begun but not ended
+ # according to their sentinel files.
+ set l {}
+ foreach p $bpids {
+ if { [info exists epids($p)] == 0 } {
+ lappend l $p
+ }
+ }
+
+ set rlist {}
+ foreach i $l {
+ set r [ catch { exec $KILL -0 $i } result ]
+ if { $r == 0 } {
+ lappend rlist $i
+ }
+ }
+ if { [ llength $rlist] == 0 } {
+ break
+ } else {
+ puts "[timestamp] processes running: $rlist"
+ }
+
+ if { $elapsed > $max } {
+ # We have exceeded the limit; kill processes
+ # and report an error
+ foreach i $l {
+ tclkill $i
+ }
+ }
+ }
+ if { $quiet == 0 } {
+ puts "All processes have exited."
+ }
+}
+
+# These routines are all used from within the dbscript.tcl tester.
+proc db_init { dbp do_data } {
+ global a_keys
+ global l_keys
+ source ./include.tcl
+
+ set txn ""
+ set nk 0
+ set lastkey ""
+
+ set a_keys() BLANK
+ set l_keys ""
+
+ set c [$dbp cursor]
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ incr nk
+ if { $do_data == 1 } {
+ if { [info exists a_keys($k)] } {
+ lappend a_keys($k) $d2]
+ } else {
+ set a_keys($k) $d2
+ }
+ }
+
+ lappend l_keys $k
+ }
+ error_check_good curs_close [$c close] 0
+
+ return $nk
+}
+
+proc pick_op { min max n } {
+ if { $n == 0 } {
+ return add
+ }
+
+ set x [berkdb random_int 1 12]
+ if {$n < $min} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8} {
+ return get
+ } else {
+ return add
+ }
+ } elseif {$n > $max} {
+ if { $x <= 4 } {
+ return put
+ } elseif { $x <= 8 } {
+ return get
+ } else {
+ return del
+ }
+
+ } elseif { $x <= 3 } {
+ return del
+ } elseif { $x <= 6 } {
+ return get
+ } elseif { $x <= 9 } {
+ return put
+ } else {
+ return add
+ }
+}
+
+# random_data: Generate a string of random characters.
+# If recno is 0 - Use average to pick a length between 1 and 2 * avg.
+# If recno is non-0, generate a number between 1 and 2 ^ (avg * 2),
+# that will fit into a 32-bit integer.
+# If the unique flag is 1, then make sure that the string is unique
+# in the array "where".
+proc random_data { avg unique where {recno 0} } {
+ upvar #0 $where arr
+ global debug_on
+ set min 1
+ set max [expr $avg+$avg-1]
+ if { $recno } {
+ #
+ # Tcl seems to have problems with values > 30.
+ #
+ if { $max > 30 } {
+ set max 30
+ }
+ set maxnum [expr int(pow(2, $max))]
+ }
+ while {1} {
+ set len [berkdb random_int $min $max]
+ set s ""
+ if {$recno} {
+ set s [berkdb random_int 1 $maxnum]
+ } else {
+ for {set i 0} {$i < $len} {incr i} {
+ append s [int_to_char [berkdb random_int 0 25]]
+ }
+ }
+
+ if { $unique == 0 || [info exists arr($s)] == 0 } {
+ break
+ }
+ }
+
+ return $s
+}
+
+proc random_key { } {
+ global l_keys
+ global nkeys
+ set x [berkdb random_int 0 [expr $nkeys - 1]]
+ return [lindex $l_keys $x]
+}
+
+proc is_err { desired } {
+ set x [berkdb random_int 1 100]
+ if { $x <= $desired } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc pick_cursput { } {
+ set x [berkdb random_int 1 4]
+ switch $x {
+ 1 { return "-keylast" }
+ 2 { return "-keyfirst" }
+ 3 { return "-before" }
+ 4 { return "-after" }
+ }
+}
+
+proc random_cursor { curslist } {
+ global l_keys
+ global nkeys
+
+ set x [berkdb random_int 0 [expr [llength $curslist] - 1]]
+ set dbc [lindex $curslist $x]
+
+ # We want to randomly set the cursor. Pick a key.
+ set k [random_key]
+ set r [$dbc get "-set" $k]
+ error_check_good cursor_get:$k [is_substr Error $r] 0
+
+ # Now move forward or backward some hops to randomly
+ # position the cursor.
+ set dist [berkdb random_int -10 10]
+
+ set dir "-next"
+ set boundary "-first"
+ if { $dist < 0 } {
+ set dir "-prev"
+ set boundary "-last"
+ set dist [expr 0 - $dist]
+ }
+
+ for { set i 0 } { $i < $dist } { incr i } {
+ set r [ record $dbc get $dir $k ]
+ if { [llength $d] == 0 } {
+ set r [ record $dbc get $k $boundary ]
+ }
+ error_check_bad dbcget [llength $r] 0
+ }
+ return { [linsert r 0 $dbc] }
+}
+
+proc record { args } {
+# Recording every operation makes tests ridiculously slow on
+# NT, so we are commenting this out; for debugging purposes,
+# it will undoubtedly be useful to uncomment this.
+# puts $args
+# flush stdout
+ return [eval $args]
+}
+
+proc newpair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+ lappend l_keys $k
+ incr nkeys
+}
+
+proc rempair { k } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ unset a_keys($k)
+ set n [lsearch $l_keys $k]
+ error_check_bad rempair:$k $n -1
+ set l_keys [lreplace $l_keys $n $n]
+ incr nkeys -1
+}
+
+proc changepair { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set a_keys($k) $data
+}
+
+proc changedup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n $newdata]
+}
+
+# Insert a dup into the a_keys array with DB_KEYFIRST.
+proc adddup { k olddata newdata } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d $a_keys($k)
+ if { [llength $d] == 0 } {
+ lappend l_keys $k
+ incr nkeys
+ set a_keys($k) { $newdata }
+ }
+
+ set ndx 0
+
+ set d [linsert d $ndx $newdata]
+ set a_keys($k) $d
+}
+
+proc remdup { k data } {
+ global l_keys
+ global a_keys
+ global nkeys
+
+ set d [$a_keys($k)]
+ error_check_bad changedup:$k [llength $d] 0
+
+ set n [lsearch $d $olddata]
+ error_check_bad changedup:$k $n -1
+
+ set a_keys($k) [lreplace $a_keys($k) $n $n]
+}
+
+proc dump_full_file { db txn outfile checkfunc start continue } {
+ source ./include.tcl
+
+ set outf [open $outfile w]
+ # Now we will get each key from the DB and dump to outfile
+ set c [eval {$db cursor} $txn]
+ error_check_good dbcursor [is_valid_cursor $c $db] TRUE
+
+ for {set d [$c get $start] } { [string length $d] != 0 } {
+ set d [$c get $continue] } {
+ set k [lindex [lindex $d 0] 0]
+ set d2 [lindex [lindex $d 0] 1]
+ $checkfunc $k $d2
+ puts $outf "$k\t$d2"
+ }
+ close $outf
+ error_check_good curs_close [$c close] 0
+}
+
+proc int_to_char { i } {
+ global alphabet
+
+ return [string index $alphabet $i]
+}
+
+proc dbcheck { key data } {
+ global l_keys
+ global a_keys
+ global nkeys
+ global check_array
+
+ if { [lsearch $l_keys $key] == -1 } {
+ error "FAIL: Key |$key| not in list of valid keys"
+ }
+
+ set d $a_keys($key)
+
+ if { [info exists check_array($key) ] } {
+ set check $check_array($key)
+ } else {
+ set check {}
+ }
+
+ if { [llength $d] > 1 } {
+ if { [llength $check] != [llength $d] } {
+ # Make the check array the right length
+ for { set i [llength $check] } { $i < [llength $d] } \
+ {incr i} {
+ lappend check 0
+ }
+ set check_array($key) $check
+ }
+
+ # Find this data's index
+ set ndx [lsearch $d $data]
+ if { $ndx == -1 } {
+ error "FAIL: \
+ Data |$data| not found for key $key. Found |$d|"
+ }
+
+ # Set the bit in the check array
+ set check_array($key) [lreplace $check_array($key) $ndx $ndx 1]
+ } elseif { [string compare $d $data] != 0 } {
+ error "FAIL: \
+ Invalid data |$data| for key |$key|. Expected |$d|."
+ } else {
+ set check_array($key) 1
+ }
+}
+
+# Dump out the file and verify it
+proc filecheck { file txn } {
+ global check_array
+ global l_keys
+ global nkeys
+ global a_keys
+ source ./include.tcl
+
+ if { [info exists check_array] == 1 } {
+ unset check_array
+ }
+
+ open_and_dump_file $file NULL $file.dump dbcheck dump_full_file \
+ "-first" "-next"
+
+ # Check that everything we checked had all its data
+ foreach i [array names check_array] {
+ set count 0
+ foreach j $check_array($i) {
+ if { $j != 1 } {
+ puts -nonewline "Key |$i| never found datum"
+ puts " [lindex $a_keys($i) $count]"
+ }
+ incr count
+ }
+ }
+
+ # Check that all keys appeared in the checked array
+ set count 0
+ foreach k $l_keys {
+ if { [info exists check_array($k)] == 0 } {
+ puts "filecheck: key |$k| not found. Data: $a_keys($k)"
+ }
+ incr count
+ }
+
+ if { $count != $nkeys } {
+ puts "filecheck: Got $count keys; expected $nkeys"
+ }
+}
+
+proc cleanup { dir env { quiet 0 } } {
+ global gen_upgrade
+ global is_qnx_test
+ global old_encrypt
+ global passwd
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ source ./include.tcl
+
+ if { $gen_upgrade == 1 } {
+ set vers [berkdb version]
+ set maj [lindex $vers 0]
+ set min [lindex $vers 1]
+
+ # Is this machine big or little endian? We want to mark
+ # the test directories appropriately, since testing
+ # little-endian databases generated by a big-endian machine,
+ # and/or vice versa, is interesting.
+ if { [big_endian] } {
+ set myendianness be
+ } else {
+ set myendianness le
+ }
+
+ if { $upgrade_be == 1 } {
+ set version_dir "$myendianness-$maj.${min}be"
+ set en be
+ } else {
+ set version_dir "$myendianness-$maj.${min}le"
+ set en le
+ }
+
+ set dest $upgrade_dir/$version_dir/$upgrade_method
+ exec mkdir -p $dest
+
+ set dbfiles [glob -nocomplain $dir/*.db]
+ foreach dbfile $dbfiles {
+ set basename [string range $dbfile \
+ [expr [string length $dir] + 1] end-3]
+
+ set newbasename $upgrade_name-$basename
+
+ # db_dump file
+ error_check_good db_dump($dbfile) \
+ [catch {exec $util_path/db_dump -k $dbfile > \
+ $dir/$newbasename.dump}] 0
+
+ # tcl_dump file
+ upgrade_dump $dbfile \
+ $dir/$newbasename.tcldump
+
+ # Rename dbfile and any dbq files.
+ file rename $dbfile $dir/$newbasename-$en.db
+ foreach dbq \
+ [glob -nocomplain $dir/__dbq.$basename.db.*] {
+ set s [string length $dir/__dbq.]
+ set newname [string replace $dbq $s \
+ [expr [string length $basename] + $s - 1] \
+ $newbasename-$en]
+ file rename $dbq $newname
+ }
+ set cwd [pwd]
+ cd $dir
+ catch {eval exec tar -cvf $dest/$newbasename.tar \
+ [glob $newbasename* __dbq.$newbasename-$en.db.*]}
+ catch {exec gzip -9v $dest/$newbasename.tar}
+ cd $cwd
+ }
+ }
+
+# check_handles
+ set remfiles {}
+ set ret [catch { glob $dir/* } result]
+ if { $ret == 0 } {
+ foreach fileorig $result {
+ #
+ # We:
+ # - Ignore any env-related files, which are
+ # those that have __db.* or log.* if we are
+ # running in an env. Also ignore files whose
+ # names start with REPDIR_; these are replication
+ # subdirectories.
+ # - Call 'dbremove' on any databases.
+ # Remove any remaining temp files.
+ #
+ switch -glob -- $fileorig {
+ */DIR_* -
+ */__db.* -
+ */log.* {
+ if { $env != "NULL" } {
+ continue
+ } else {
+ if { $is_qnx_test } {
+ catch {berkdb envremove -force \
+ -home $dir} r
+ }
+ lappend remfiles $fileorig
+ }
+ }
+ *.db {
+ set envargs ""
+ set encarg ""
+ #
+ # If in an env, it should be open crypto
+ # or not already.
+ #
+ if { $env != "NULL"} {
+ set file [file tail $fileorig]
+ set envargs " -env $env "
+ if { [is_txnenv $env] } {
+ append envargs " -auto_commit "
+ }
+ } else {
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set file $fileorig
+ }
+
+ # If a database is left in a corrupt
+ # state, dbremove might not be able to handle
+ # it (it does an open before the remove).
+ # Be prepared for this, and if necessary,
+ # just forcibly remove the file with a warning
+ # message.
+ set ret [catch \
+ {eval {berkdb dbremove} $envargs $encarg \
+ $file} res]
+ if { $ret != 0 } {
+ # If it failed, there is a chance
+ # that the previous run was using
+ # encryption and we cannot know about
+ # it (different tclsh instantiation).
+ # Try to remove it with crypto.
+ if { $env == "NULL" && \
+ $old_encrypt == 0} {
+ set ret [catch \
+ {eval {berkdb dbremove} \
+ -encryptany $passwd \
+ $envargs $file} res]
+ }
+ if { $ret != 0 } {
+ if { $quiet == 0 } {
+ puts \
+ "FAIL: dbremove in cleanup failed: $res"
+ }
+ set file $fileorig
+ lappend remfiles $file
+ }
+ }
+ }
+ default {
+ lappend remfiles $fileorig
+ }
+ }
+ }
+ if {[llength $remfiles] > 0} {
+ eval fileremove -f $remfiles
+ }
+ }
+}
+
+proc log_cleanup { dir } {
+ source ./include.tcl
+
+ set files [glob -nocomplain $dir/log.*]
+ if { [llength $files] != 0} {
+ foreach f $files {
+ fileremove -f $f
+ }
+ }
+}
+
+proc env_cleanup { dir } {
+ global old_encrypt
+ global passwd
+ source ./include.tcl
+
+ set encarg ""
+ if { $old_encrypt != 0 } {
+ set encarg "-encryptany $passwd"
+ }
+ set stat [catch {eval {berkdb envremove -home} $dir $encarg} ret]
+ #
+ # If something failed and we are left with a region entry
+ # in /dev/shmem that is zero-length, the envremove will
+ # succeed, and the shm_unlink will succeed, but it will not
+ # remove the zero-length entry from /dev/shmem. Remove it
+ # using fileremove or else all other tests using an env
+ # will immediately fail.
+ #
+ if { $is_qnx_test == 1 } {
+ set region_files [glob -nocomplain /dev/shmem/$dir*]
+ if { [llength $region_files] != 0 } {
+ foreach f $region_files {
+ fileremove -f $f
+ }
+ }
+ }
+ log_cleanup $dir
+ cleanup $dir NULL
+}
+
+proc remote_cleanup { server dir localdir } {
+ set home [file tail $dir]
+ error_check_good cleanup:remove [berkdb envremove -home $home \
+ -server $server] 0
+ catch {exec rsh $server rm -f $dir/*} ret
+ cleanup $localdir NULL
+}
+
+proc help { cmd } {
+ if { [info command $cmd] == $cmd } {
+ set is_proc [lsearch [info procs $cmd] $cmd]
+ if { $is_proc == -1 } {
+ # Not a procedure; must be a C command
+ # Let's hope that it takes some parameters
+ # and that it prints out a message
+ puts "Usage: [eval $cmd]"
+ } else {
+ # It is a tcl procedure
+ puts -nonewline "Usage: $cmd"
+ set args [info args $cmd]
+ foreach a $args {
+ set is_def [info default $cmd $a val]
+ if { $is_def != 0 } {
+ # Default value
+ puts -nonewline " $a=$val"
+ } elseif {$a == "args"} {
+ # Print out flag values
+ puts " options"
+ args
+ } else {
+ # No default value
+ puts -nonewline " $a"
+ }
+ }
+ puts ""
+ }
+ } else {
+ puts "$cmd is not a command"
+ }
+}
+
+# Run a recovery test for a particular operation
+# Notice that we catch the return from CP and do not do anything with it.
+# This is because Solaris CP seems to exit non-zero on occasion, but
+# everything else seems to run just fine.
+#
+# We split it into two functions so that the preparation and command
+# could be executed in a different process than the recovery.
+#
+proc op_codeparse { encodedop op } {
+ set op1 ""
+ set op2 ""
+ switch $encodedop {
+ "abort" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "commit" {
+ set op1 $encodedop
+ set op2 ""
+ }
+ "prepare-abort" {
+ set op1 "prepare"
+ set op2 "abort"
+ }
+ "prepare-commit" {
+ set op1 "prepare"
+ set op2 "commit"
+ }
+ "prepare-discard" {
+ set op1 "prepare"
+ set op2 "discard"
+ }
+ }
+
+ if { $op == "op" } {
+ return $op1
+ } else {
+ return $op2
+ }
+}
+
+proc op_recover { encodedop dir env_cmd dbfile cmd msg } {
+ source ./include.tcl
+
+ set op [op_codeparse $encodedop "op"]
+ set op2 [op_codeparse $encodedop "sub"]
+ puts "\t$msg $encodedop"
+ set gidf ""
+ if { $op == "prepare" } {
+ sentinel_init
+
+ # Fork off a child to run the cmd
+ # We append the gid, so start here making sure
+ # we don't have old gid's around.
+ set outfile $testdir/childlog
+ fileremove -f $testdir/gidfile
+ set gidf $testdir/gidfile
+ set pidlist {}
+ # puts "$tclsh_path $test_path/recdscript.tcl $testdir/recdout \
+ # $op $dir $env_cmd $dbfile $gidf $cmd"
+ set p [exec $tclsh_path $test_path/wrap.tcl recdscript.tcl \
+ $testdir/recdout $op $dir $env_cmd $dbfile $gidf $cmd &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/recdout r]
+ set r [read $f1]
+ puts -nonewline $r
+ close $f1
+ fileremove -f $testdir/recdout
+ } else {
+ op_recover_prep $op $dir $env_cmd $dbfile $gidf $cmd
+ }
+ op_recover_rec $op $op2 $dir $env_cmd $dbfile $gidf
+}
+
+proc op_recover_prep { op dir env_cmd dbfile gidf cmd } {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ source ./include.tcl
+
+ #puts "op_recover: $op $dir $env $dbfile $cmd"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ # Save the initial file and open the environment and the file
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.init } res
+ copy_extent_file $dir $dbfile init
+
+ convert_encrypt $env_cmd
+ set env [eval $env_cmd]
+ error_check_good envopen [is_valid_env $env] TRUE
+
+ set db [berkdb open -auto_commit -env $env $dbfile]
+ error_check_good dbopen [is_valid_db $db] TRUE
+
+ # Dump out file contents for initial case
+ open_and_dump_file $dbfile $env $init_file nop \
+ dump_file_direction "-first" "-next"
+
+ set t [$env txn]
+ error_check_bad txn_begin $t NULL
+ error_check_good txn_begin [is_substr $t "txn"] 1
+
+ # Now fill in the db, tmgr, and the txnid in the command
+ set exec_cmd $cmd
+
+ set i [lsearch $cmd ENV]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $env]
+ }
+
+ set i [lsearch $cmd TXNID]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $t]
+ }
+
+ set i [lsearch $exec_cmd DB]
+ if { $i != -1 } {
+ set exec_cmd [lreplace $exec_cmd $i $i $db]
+ }
+
+ # To test DB_CONSUME, we need to expect a record return, not "0".
+ set i [lsearch $exec_cmd "-consume"]
+ if { $i != -1 } {
+ set record_exec_cmd_ret 1
+ } else {
+ set record_exec_cmd_ret 0
+ }
+
+ # For the DB_APPEND test, we need to expect a return other than
+ # 0; set this flag to be more lenient in the error_check_good.
+ set i [lsearch $exec_cmd "-append"]
+ if { $i != -1 } {
+ set lenient_exec_cmd_ret 1
+ } else {
+ set lenient_exec_cmd_ret 0
+ }
+
+ # Execute command and commit/abort it.
+ set ret [eval $exec_cmd]
+ if { $record_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [llength [lindex $ret 0]] 2
+ } elseif { $lenient_exec_cmd_ret == 1 } {
+ error_check_good "\"$exec_cmd\"" [expr $ret > 0] 1
+ } else {
+ error_check_good "\"$exec_cmd\"" $ret 0
+ }
+
+ set record_exec_cmd_ret 0
+ set lenient_exec_cmd_ret 0
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.afterop } res
+ copy_extent_file $dir $dbfile afterop
+ open_and_dump_file $dir/$dbfile.afterop NULL \
+ $afterop_file nop dump_file_direction "-first" "-next"
+
+ #puts "\t\t\tExecuting txn_$op:$t"
+ if { $op == "prepare" } {
+ set gid [make_gid global:$t]
+ set gfd [open $gidf w+]
+ puts $gfd $gid
+ close $gfd
+ error_check_good txn_$op:$t [$t $op $gid] 0
+ } else {
+ error_check_good txn_$op:$t [$t $op] 0
+ }
+
+ switch $op {
+ "commit" { puts "\t\tCommand executed and committed." }
+ "abort" { puts "\t\tCommand executed and aborted." }
+ "prepare" { puts "\t\tCommand executed and prepared." }
+ }
+
+ # Sync the file so that we can capture a snapshot to test recovery.
+ error_check_good sync:$db [$db sync] 0
+
+ catch { file copy -force $dir/$dbfile $dir/$dbfile.final } res
+ copy_extent_file $dir $dbfile final
+ open_and_dump_file $dir/$dbfile.final NULL \
+ $final_file nop dump_file_direction "-first" "-next"
+
+ # If this is an abort or prepare-abort, it should match the
+ # original file.
+ # If this was a commit or prepare-commit, then this file should
+ # match the afterop file.
+ # If this was a prepare without an abort or commit, we still
+ # have transactions active, and peering at the database from
+ # another environment will show data from uncommitted transactions.
+ # Thus we just skip this in the prepare-only case; what
+ # we care about are the results of a prepare followed by a
+ # recovery, which we test later.
+ if { $op == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } elseif { $op == "abort" } {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ } else {
+ # Make sure this really is one of the prepare tests
+ error_check_good assert:prepare-test $op "prepare"
+ }
+
+ # Running recovery on this database should not do anything.
+ # Flush all data to disk, close the environment and save the
+ # file.
+ # XXX DO NOT CLOSE FILE ON PREPARE -- if you are prepared,
+ # you really have an active transaction and you're not allowed
+ # to close files that are being acted upon by in-process
+ # transactions.
+ if { $op != "prepare" } {
+ error_check_good close:$db [$db close] 0
+ }
+
+ #
+ # If we are running 'prepare' don't close the env with an
+ # active transaction. Leave it alone so the close won't
+ # quietly abort it on us.
+ if { [is_substr $op "prepare"] != 1 } {
+ error_check_good envclose [$env close] 0
+ }
+ return
+}
+
+proc op_recover_rec { op op2 dir env_cmd dbfile gidf} {
+ global log_log_record_types
+ global recd_debug
+ global recd_id
+ global recd_op
+ global encrypt
+ global passwd
+ source ./include.tcl
+
+ #puts "op_recover_rec: $op $op2 $dir $env_cmd $dbfile $gidf"
+
+ set init_file $dir/t1
+ set afterop_file $dir/t2
+ set final_file $dir/t3
+
+ # Keep track of the log types we've seen
+ if { $log_log_record_types == 1} {
+ logtrack_read $dir
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\top_recover_rec: Running recovery ... "
+ flush stdout
+
+ set recargs "-h $dir -c "
+ if { $encrypt > 0 } {
+ append recargs " -P $passwd "
+ }
+ set stat [catch {eval exec $util_path/db_recover -e $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ #
+ # We cannot run db_recover here because that will open an env, run
+ # recovery, then close it, which will abort the outstanding txns.
+ # We want to do it ourselves.
+ #
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_widget $env env] TRUE
+
+ error_check_good db_verify [verify_dir $testdir "\t\t" 0 1] 0
+ puts "verified"
+
+ # If we left a txn as prepared, but not aborted or committed,
+ # we need to do a txn_recover. Make sure we have the same
+ # number of txns we want.
+ if { $op == "prepare"} {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set gfd [open $gidf r]
+ set origgid [read -nonewline $gfd]
+ close $gfd
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_$op2:$t"
+ error_check_good txn_$op2:$t [$t $op2] 0
+ #
+ # If we are testing discard, we do need to resolve
+ # the txn, so get the list again and now abort it.
+ #
+ if { $op2 == "discard" } {
+ set txns [$env txn_recover]
+ error_check_bad txnrecover [llength $txns] 0
+ set txnlist [lindex $txns 0]
+ set t [lindex $txnlist 0]
+ set gid [lindex $txnlist 1]
+ error_check_good gidcompare $gid $origgid
+ puts "\t\t\tExecuting txn_abort:$t"
+ error_check_good disc_txn_abort:$t [$t abort] 0
+ }
+ }
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $afterop_file $afterop_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(post-$op,pre-commit):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # Now close the environment, substitute a file that will need
+ # recovery and try running recovery again.
+ reset_env $env
+ if { $op == "commit" || $op2 == "commit" } {
+ catch { file copy -force $dir/$dbfile.init $dir/$dbfile } res
+ move_file_extent $dir $dbfile init copy
+ } else {
+ catch { file copy -force $dir/$dbfile.afterop $dir/$dbfile } res
+ move_file_extent $dir $dbfile afterop copy
+ }
+
+ berkdb debug_check
+ puts -nonewline "\t\tRunning recovery on pre-op database ... "
+ flush stdout
+
+ set stat [catch {eval exec $util_path/db_recover $recargs} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+ puts -nonewline "complete ... "
+
+ error_check_good db_verify_preop [verify_dir $testdir "\t\t" 0 1] 0
+
+ puts "verified"
+
+ set env [eval $env_cmd]
+
+ open_and_dump_file $dir/$dbfile NULL $final_file nop \
+ dump_file_direction "-first" "-next"
+ if { $op == "commit" || $op2 == "commit" } {
+ filesort $final_file $final_file.sort
+ filesort $afterop_file $afterop_file.sort
+ error_check_good \
+ diff(post-$op,recovered):diff($afterop_file,$final_file) \
+ [filecmp $afterop_file.sort $final_file.sort] 0
+ } else {
+ filesort $init_file $init_file.sort
+ filesort $final_file $final_file.sort
+ error_check_good \
+ diff(initial,post-$op):diff($init_file,$final_file) \
+ [filecmp $init_file.sort $final_file.sort] 0
+ }
+
+ # This should just close the environment, not blow it away.
+ reset_env $env
+}
+
+proc populate { db method txn n dups bigdata } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ if { [is_record_based $method] == 1 } {
+ set key [expr $count + 1]
+ } elseif { $dups == 1 } {
+ set key duplicate_key
+ } else {
+ set key $str
+ }
+ if { $bigdata == 1 && [berkdb random_int 1 3] == 1} {
+ set str [replicate $str 1000]
+ }
+
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc big_populate { db txn n } {
+ source ./include.tcl
+
+ set did [open $dict]
+ set count 0
+ while { [gets $did str] != -1 && $count < $n } {
+ set key [replicate $str 50]
+ set ret [$db put -txn $txn $key $str]
+ error_check_good db_put:$key $ret 0
+ incr count
+ }
+ close $did
+ return 0
+}
+
+proc unpopulate { db txn num } {
+ source ./include.tcl
+
+ set c [eval {$db cursor} "-txn $txn"]
+ error_check_bad $db:cursor $c NULL
+ error_check_good $db:cursor [is_substr $c $db] 1
+
+ set i 0
+ for {set d [$c get -first] } { [llength $d] != 0 } {
+ set d [$c get -next] } {
+ $c del
+ incr i
+ if { $num != 0 && $ >= $num } {
+ break
+ }
+ }
+ error_check_good cursor_close [$c close] 0
+ return 0
+}
+
+proc reset_env { env } {
+ error_check_good env_close [$env close] 0
+}
+
+proc minlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc maxlocks { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc minwrites { myenv locker_id obj_id num } {
+ return [countlocks $myenv $locker_id $obj_id $num ]
+}
+
+proc countlocks { myenv locker_id obj_id num } {
+ set locklist ""
+ for { set i 0} {$i < [expr $obj_id * 4]} { incr i } {
+ set r [catch {$myenv lock_get read $locker_id \
+ [expr $obj_id * 1000 + $i]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ # Now acquire a write lock
+ if { $obj_id != 1 } {
+ set r [catch {$myenv lock_get write $locker_id \
+ [expr $obj_id * 1000 + 10]} l ]
+ if { $r != 0 } {
+ puts $l
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $l $myenv] 1
+ lappend locklist $l
+ }
+ }
+
+ set ret [ring $myenv $locker_id $obj_id $num]
+
+ foreach l $locklist {
+ error_check_good lockput:$l [$l put] 0
+ }
+
+ return $ret
+}
+
+# This routine will let us obtain a ring of deadlocks.
+# Each locker will get a lock on obj_id, then sleep, and
+# then try to lock (obj_id + 1) % num.
+# When the lock is finally granted, we release our locks and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker deadlocks and the
+# rest all finish successfully.
+proc ring { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock1 $myenv] 1
+ }
+
+ tclsleep 30
+ set nextobj [expr ($obj_id + 1) % $num]
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $nextobj} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ puts $lock2
+ set ret ERROR
+ }
+ } else {
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_bad lockget:$obj_id $lock2 NULL
+ error_check_good lockget:$obj_id [is_substr $lock2 $myenv] 1
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+}
+
+# This routine will create massive deadlocks.
+# Each locker will get a readlock on obj_id, then sleep, and
+# then try to upgrade the readlock to a write lock.
+# When the lock is finally granted, we release our first lock and
+# return 1 if we got both locks and DEADLOCK if we deadlocked.
+# The results here should be that 1 locker succeeds in getting all
+# the locks and everyone else deadlocks.
+proc clump { myenv locker_id obj_id num } {
+ source ./include.tcl
+
+ set obj_id 10
+ if {[catch {$myenv lock_get read $locker_id $obj_id} lock1] != 0} {
+ puts $lock1
+ return ERROR
+ } else {
+ error_check_good lockget:$obj_id \
+ [is_valid_lock $lock1 $myenv] TRUE
+ }
+
+ tclsleep 30
+ set ret 1
+ if {[catch {$myenv lock_get write $locker_id $obj_id} lock2] != 0} {
+ if {[string match "*DEADLOCK*" $lock2] == 1} {
+ set ret DEADLOCK
+ } else {
+ set ret ERROR
+ }
+ } else {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ }
+
+ # Now release the first lock
+ error_check_good lockput:$lock1 [$lock1 put] 0
+
+ if {$ret == 1} {
+ error_check_good \
+ lockget:$obj_id [is_valid_lock $lock2 $myenv] TRUE
+ error_check_good lockput:$lock2 [$lock2 put] 0
+ }
+ return $ret
+ }
+
+proc dead_check { t procs timeout dead clean other } {
+ error_check_good $t:$procs:other $other 0
+ switch $t {
+ ring {
+ # with timeouts the number of deadlocks is unpredictable
+ if { $timeout != 0 && $dead > 1 } {
+ set clean [ expr $clean + $dead - 1]
+ set dead 1
+ }
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ clump {
+ error_check_good $t:$procs:deadlocks $dead \
+ [expr $procs - 1]
+ error_check_good $t:$procs:success $clean 1
+ }
+ oldyoung {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ maxlocks {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ minwrites {
+ error_check_good $t:$procs:deadlocks $dead 1
+ error_check_good $t:$procs:success $clean \
+ [expr $procs - 1]
+ }
+ default {
+ error "Test $t not implemented"
+ }
+ }
+}
+
+proc rdebug { id op where } {
+ global recd_debug
+ global recd_id
+ global recd_op
+
+ set recd_debug $where
+ set recd_id $id
+ set recd_op $op
+}
+
+proc rtag { msg id } {
+ set tag [lindex $msg 0]
+ set tail [expr [string length $tag] - 2]
+ set tag [string range $tag $tail $tail]
+ if { $id == $tag } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc zero_list { n } {
+ set ret ""
+ while { $n > 0 } {
+ lappend ret 0
+ incr n -1
+ }
+ return $ret
+}
+
+proc check_dump { k d } {
+ puts "key: $k data: $d"
+}
+
+proc reverse { s } {
+ set res ""
+ for { set i 0 } { $i < [string length $s] } { incr i } {
+ set res "[string index $s $i]$res"
+ }
+
+ return $res
+}
+
+#
+# This is a internal only proc. All tests should use 'is_valid_db' etc.
+#
+proc is_valid_widget { w expected } {
+ # First N characters must match "expected"
+ set l [string length $expected]
+ incr l -1
+ if { [string compare [string range $w 0 $l] $expected] != 0 } {
+ return $w
+ }
+
+ # Remaining characters must be digits
+ incr l 1
+ for { set i $l } { $i < [string length $w] } { incr i} {
+ set c [string index $w $i]
+ if { $c < "0" || $c > "9" } {
+ return $w
+ }
+ }
+
+ return TRUE
+}
+
+proc is_valid_db { db } {
+ return [is_valid_widget $db db]
+}
+
+proc is_valid_env { env } {
+ return [is_valid_widget $env env]
+}
+
+proc is_valid_cursor { dbc db } {
+ return [is_valid_widget $dbc $db.c]
+}
+
+proc is_valid_lock { lock env } {
+ return [is_valid_widget $lock $env.lock]
+}
+
+proc is_valid_logc { logc env } {
+ return [is_valid_widget $logc $env.logc]
+}
+
+proc is_valid_mpool { mpool env } {
+ return [is_valid_widget $mpool $env.mp]
+}
+
+proc is_valid_page { page mpool } {
+ return [is_valid_widget $page $mpool.pg]
+}
+
+proc is_valid_txn { txn env } {
+ return [is_valid_widget $txn $env.txn]
+}
+
+proc is_valid_mutex { m env } {
+ return [is_valid_widget $m $env.mutex]
+}
+
+proc is_valid_lock {l env} {
+ return [is_valid_widget $l $env.lock]
+}
+
+proc is_valid_locker {l } {
+ return [is_valid_widget $l ""]
+}
+
+proc send_cmd { fd cmd {sleep 2}} {
+ source ./include.tcl
+
+ puts $fd "if \[catch {set v \[$cmd\] ; puts \$v} ret\] { \
+ puts \"FAIL: \$ret\" \
+ }"
+ puts $fd "flush stdout"
+ flush $fd
+ berkdb debug_check
+ tclsleep $sleep
+
+ set r [rcv_result $fd]
+ return $r
+}
+
+proc rcv_result { fd } {
+ set r [gets $fd result]
+ error_check_bad remote_read $r -1
+
+ return $result
+}
+
+proc send_timed_cmd { fd rcv_too cmd } {
+ set c1 "set start \[timestamp -r\]; "
+ set c2 "puts \[expr \[timestamp -r\] - \$start\]"
+ set full_cmd [concat $c1 $cmd ";" $c2]
+
+ puts $fd $full_cmd
+ puts $fd "flush stdout"
+ flush $fd
+ return 0
+}
+
+#
+# The rationale behind why we have *two* "data padding" routines is outlined
+# below:
+#
+# Both pad_data and chop_data truncate data that is too long. However,
+# pad_data also adds the pad character to pad data out to the fixed length
+# record length.
+#
+# Which routine you call does not depend on the length of the data you're
+# using, but on whether you're doing a put or a get. When we do a put, we
+# have to make sure the data isn't longer than the size of a record because
+# otherwise we'll get an error (use chop_data). When we do a get, we want to
+# check that db padded everything correctly (use pad_data on the value against
+# which we are comparing).
+#
+# We don't want to just use the pad_data routine for both purposes, because
+# we want to be able to test whether or not db is padding correctly. For
+# example, the queue access method had a bug where when a record was
+# overwritten (*not* a partial put), only the first n bytes of the new entry
+# were written, n being the new entry's (unpadded) length. So, if we did
+# a put with key,value pair (1, "abcdef") and then a put (1, "z"), we'd get
+# back (1,"zbcdef"). If we had used pad_data instead of chop_data, we would
+# have gotten the "correct" result, but we wouldn't have found this bug.
+proc chop_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1 && \
+ [string length $data] > $fixed_len} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc pad_data {method data} {
+ global fixed_len
+
+ if {[is_fixed_length $method] == 1} {
+ return [eval {binary format a$fixed_len $data}]
+ } else {
+ return $data
+ }
+}
+
+proc make_fixed_length {method data {pad 0}} {
+ global fixed_len
+ global fixed_pad
+
+ if {[is_fixed_length $method] == 1} {
+ if {[string length $data] > $fixed_len } {
+ error_check_bad make_fixed_len:TOO_LONG 1 1
+ }
+ while { [string length $data] < $fixed_len } {
+ set data [format $data%c $fixed_pad]
+ }
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 127 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+proc make_gid {data} {
+ while { [string length $data] < 128 } {
+ set data [format ${data}0]
+ }
+ return $data
+}
+
+# shift data for partial
+# pad with fixed pad (which is NULL)
+proc partial_shift { data offset direction} {
+ global fixed_len
+
+ set len [expr $fixed_len - 1]
+
+ if { [string compare $direction "right"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [binary format x1a$len $data]
+ }
+ } elseif { [string compare $direction "left"] == 0 } {
+ for { set i 1} { $i <= $offset } {incr i} {
+ set data [string range $data 1 end]
+ set data [binary format a$len $data]
+ }
+ }
+ return $data
+}
+
+# string compare does not always work to compare
+# this data, nor does expr (==)
+# specialized routine for comparison
+# (for use in fixed len recno and q)
+proc binary_compare { data1 data2 } {
+ if { [string length $data1] != [string length $data2] || \
+ [string compare -length \
+ [string length $data1] $data1 $data2] != 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc convert_method { method } {
+ switch -- $method {
+ -btree -
+ -dbtree -
+ dbtree -
+ -ddbtree -
+ ddbtree -
+ -rbtree -
+ BTREE -
+ DB_BTREE -
+ DB_RBTREE -
+ RBTREE -
+ bt -
+ btree -
+ db_btree -
+ db_rbtree -
+ rbt -
+ rbtree { return "-btree" }
+
+ -dhash -
+ -ddhash -
+ -hash -
+ DB_HASH -
+ HASH -
+ dhash -
+ ddhash -
+ db_hash -
+ h -
+ hash { return "-hash" }
+
+ -queue -
+ DB_QUEUE -
+ QUEUE -
+ db_queue -
+ q -
+ qam -
+ queue { return "-queue" }
+
+ -queueextent -
+ QUEUEEXTENT -
+ qe -
+ qamext -
+ -queueext -
+ queueextent -
+ queueext { return "-queue" }
+
+ -frecno -
+ -recno -
+ -rrecno -
+ DB_FRECNO -
+ DB_RECNO -
+ DB_RRECNO -
+ FRECNO -
+ RECNO -
+ RRECNO -
+ db_frecno -
+ db_recno -
+ db_rrecno -
+ frec -
+ frecno -
+ rec -
+ recno -
+ rrec -
+ rrecno { return "-recno" }
+
+ default { error "FAIL:[timestamp] $method: unknown method" }
+ }
+}
+
+proc split_encargs { largs encargsp } {
+ global encrypt
+ upvar $encargsp e
+ set eindex [lsearch $largs "-encrypta*"]
+ if { $eindex == -1 } {
+ set e ""
+ set newl $largs
+ } else {
+ set eend [expr $eindex + 1]
+ set e [lrange $largs $eindex $eend]
+ set newl [lreplace $largs $eindex $eend "-encrypt"]
+ }
+ return $newl
+}
+
+proc convert_encrypt { largs } {
+ global encrypt
+ global old_encrypt
+
+ set old_encrypt $encrypt
+ set encrypt 0
+ if { [lsearch $largs "-encrypt*"] != -1 } {
+ set encrypt 1
+ }
+}
+
+# If recno-with-renumbering or btree-with-renumbering is specified, then
+# fix the arguments to specify the DB_RENUMBER/DB_RECNUM option for the
+# -flags argument.
+proc convert_args { method {largs ""} } {
+ global fixed_len
+ global fixed_pad
+ global gen_upgrade
+ global upgrade_be
+ source ./include.tcl
+
+ if { [string first - $largs] == -1 &&\
+ [string compare $largs ""] != 0 &&\
+ [string compare $largs {{}}] != 0 } {
+ set errstring "args must contain a hyphen; does this test\
+ have no numeric args?"
+ puts "FAIL:[timestamp] $errstring (largs was $largs)"
+ return -code return
+ }
+
+ convert_encrypt $largs
+ if { $gen_upgrade == 1 && $upgrade_be == 1 } {
+ append largs " -lorder 4321 "
+ } elseif { $gen_upgrade == 1 && $upgrade_be != 1 } {
+ append largs " -lorder 1234 "
+ }
+
+ if { [is_rrecno $method] == 1 } {
+ append largs " -renumber "
+ } elseif { [is_rbtree $method] == 1 } {
+ append largs " -recnum "
+ } elseif { [is_dbtree $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddbtree $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_dhash $method] == 1 } {
+ append largs " -dup "
+ } elseif { [is_ddhash $method] == 1 } {
+ append largs " -dup "
+ append largs " -dupsort "
+ } elseif { [is_queueext $method] == 1 } {
+ append largs " -extent 2 "
+ }
+
+ if {[is_fixed_length $method] == 1} {
+ append largs " -len $fixed_len -pad $fixed_pad "
+ }
+ return $largs
+}
+
+proc is_btree { method } {
+ set names { -btree BTREE DB_BTREE bt btree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dbtree { method } {
+ set names { -dbtree dbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddbtree { method } {
+ set names { -ddbtree ddbtree }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rbtree { method } {
+ set names { -rbtree rbtree RBTREE db_rbtree DB_RBTREE rbt }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_recno { method } {
+ set names { -recno DB_RECNO RECNO db_recno rec recno}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rrecno { method } {
+ set names { -rrecno rrecno RRECNO db_rrecno DB_RRECNO rrec }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_frecno { method } {
+ set names { -frecno frecno frec FRECNO db_frecno DB_FRECNO}
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_hash { method } {
+ set names { -hash DB_HASH HASH db_hash h hash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_dhash { method } {
+ set names { -dhash dhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_ddhash { method } {
+ set names { -ddhash ddhash }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queue { method } {
+ if { [is_queueext $method] == 1 } {
+ return 1
+ }
+
+ set names { -queue DB_QUEUE QUEUE db_queue q queue qam }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_queueext { method } {
+ set names { -queueextent queueextent QUEUEEXTENT qe qamext \
+ queueext -queueext }
+ if { [lsearch $names $method] >= 0 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_record_based { method } {
+ if { [is_recno $method] || [is_frecno $method] ||
+ [is_rrecno $method] || [is_queue $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_fixed_length { method } {
+ if { [is_queue $method] || [is_frecno $method] } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+# Sort lines in file $in and write results to file $out.
+# This is a more portable alternative to execing the sort command,
+# which has assorted issues on NT [#1576].
+# The addition of a "-n" argument will sort numerically.
+proc filesort { in out { arg "" } } {
+ set i [open $in r]
+
+ set ilines {}
+ while { [gets $i line] >= 0 } {
+ lappend ilines $line
+ }
+
+ if { [string compare $arg "-n"] == 0 } {
+ set olines [lsort -integer $ilines]
+ } else {
+ set olines [lsort $ilines]
+ }
+
+ close $i
+
+ set o [open $out w]
+ foreach line $olines {
+ puts $o $line
+ }
+
+ close $o
+}
+
+# Print lines up to the nth line of infile out to outfile, inclusive.
+# The optional beg argument tells us where to start.
+proc filehead { n infile outfile { beg 0 } } {
+ set in [open $infile r]
+ set out [open $outfile w]
+
+ # Sed uses 1-based line numbers, and so we do too.
+ for { set i 1 } { $i < $beg } { incr i } {
+ if { [gets $in junk] < 0 } {
+ break
+ }
+ }
+
+ for { } { $i <= $n } { incr i } {
+ if { [gets $in line] < 0 } {
+ break
+ }
+ puts $out $line
+ }
+
+ close $in
+ close $out
+}
+
+# Remove file (this replaces $RM).
+# Usage: fileremove filenames =~ rm; fileremove -f filenames =~ rm -rf.
+proc fileremove { args } {
+ set forceflag ""
+ foreach a $args {
+ if { [string first - $a] == 0 } {
+ # It's a flag. Better be f.
+ if { [string first f $a] != 1 } {
+ return -code error "bad flag to fileremove"
+ } else {
+ set forceflag "-force"
+ }
+ } else {
+ eval {file delete $forceflag $a}
+ }
+ }
+}
+
+proc findfail { args } {
+ foreach a $args {
+ if { [file exists $a] == 0 } {
+ continue
+ }
+ set f [open $a r]
+ while { [gets $f line] >= 0 } {
+ if { [string first FAIL $line] == 0 } {
+ close $f
+ return 1
+ }
+ }
+ close $f
+ }
+ return 0
+}
+
+# Sleep for s seconds.
+proc tclsleep { s } {
+ # On Windows, the system time-of-day clock may update as much
+ # as 55 ms late due to interrupt timing. Don't take any
+ # chances; sleep extra-long so that when tclsleep 1 returns,
+ # it's guaranteed to be a new second.
+ after [expr $s * 1000 + 56]
+}
+
+# Kill a process.
+proc tclkill { id } {
+ source ./include.tcl
+
+ while { [ catch {exec $KILL -0 $id} ] == 0 } {
+ catch {exec $KILL -9 $id}
+ tclsleep 5
+ }
+}
+
+# Compare two files, a la diff. Returns 1 if non-identical, 0 if identical.
+proc filecmp { file_a file_b } {
+ set fda [open $file_a r]
+ set fdb [open $file_b r]
+
+ set nra 0
+ set nrb 0
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ while { $nra >= 0 && $nrb >= 0 } {
+ set nra [gets $fda aline]
+ set nrb [gets $fdb bline]
+
+ if { $nra != $nrb || [string compare $aline $bline] != 0} {
+ close $fda
+ close $fdb
+ return 1
+ }
+ }
+
+ close $fda
+ close $fdb
+ return 0
+}
+
+# Give two SORTED files, one of which is a complete superset of the other,
+# extract out the unique portions of the superset and put them in
+# the given outfile.
+proc fileextract { superset subset outfile } {
+ set sup [open $superset r]
+ set sub [open $subset r]
+ set outf [open $outfile w]
+
+ # The gets can't be in the while condition because we'll
+ # get short-circuit evaluated.
+ set nrp [gets $sup pline]
+ set nrb [gets $sub bline]
+ while { $nrp >= 0 } {
+ if { $nrp != $nrb || [string compare $pline $bline] != 0} {
+ puts $outf $pline
+ } else {
+ set nrb [gets $sub bline]
+ }
+ set nrp [gets $sup pline]
+ }
+
+ close $sup
+ close $sub
+ close $outf
+ return 0
+}
+
+# Verify all .db files in the specified directory.
+proc verify_dir { {directory $testdir} \
+ { pref "" } { noredo 0 } { quiet 0 } { nodump 0 } { cachesize 0 } } {
+ global encrypt
+ global passwd
+
+ # If we're doing database verification between tests, we don't
+ # want to do verification twice without an intervening cleanup--some
+ # test was skipped. Always verify by default (noredo == 0) so
+ # that explicit calls to verify_dir during tests don't require
+ # cleanup commands.
+ if { $noredo == 1 } {
+ if { [file exists $directory/NOREVERIFY] == 1 } {
+ if { $quiet == 0 } {
+ puts "Skipping verification."
+ }
+ return
+ }
+ set f [open $directory/NOREVERIFY w]
+ close $f
+ }
+
+ if { [catch {glob $directory/*.db} dbs] != 0 } {
+ # No files matched
+ return
+ }
+ if { [file exists /dev/stderr] == 1 } {
+ set errfilearg "-errfile /dev/stderr "
+ } else {
+ set errfilearg ""
+ }
+ set errpfxarg {-errpfx "FAIL: verify" }
+ set errarg $errfilearg$errpfxarg
+ set ret 0
+
+ # Open an env, so that we have a large enough cache. Pick
+ # a fairly generous default if we haven't specified something else.
+
+ if { $cachesize == 0 } {
+ set cachesize [expr 1024 * 1024]
+ }
+ set encarg ""
+ if { $encrypt != 0 } {
+ set encarg "-encryptaes $passwd"
+ }
+
+ set env [eval {berkdb_env -create -private} $encarg \
+ {-cachesize [list 0 $cachesize 0]}]
+ set earg " -env $env $errarg "
+
+ foreach db $dbs {
+ if { [catch {eval {berkdb dbverify} $earg $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Verification of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good verify:$db $res 0
+ if { $quiet == 0 } {
+ puts "${pref}Verification of $db succeeded."
+ }
+ }
+
+ # Skip the dump if it's dangerous to do it.
+ if { $nodump == 0 } {
+ if { [catch {eval dumploadtest $db} res] != 0 } {
+ puts $res
+ puts "FAIL:[timestamp] Dump/load of $db failed."
+ set ret 1
+ continue
+ } else {
+ error_check_good dumpload:$db $res 0
+ if { $quiet == 0 } {
+ puts \
+ "${pref}Dump/load of $db succeeded."
+ }
+ }
+ }
+ }
+
+ error_check_good vrfyenv_close [$env close] 0
+
+ return $ret
+}
+
+# Is the database handle in $db a master database containing subdbs?
+proc check_for_subdbs { db } {
+ set stat [$db stat]
+ for { set i 0 } { [string length [lindex $stat $i]] > 0 } { incr i } {
+ set elem [lindex $stat $i]
+ if { [string compare [lindex $elem 0] Flags] == 0 } {
+ # This is the list of flags; look for
+ # "subdatabases".
+ if { [is_substr [lindex $elem 1] subdatabases] } {
+ return 1
+ }
+ }
+ }
+ return 0
+}
+
+proc dumploadtest { db {subdb ""} } {
+ global util_path
+ global encrypt
+ global passwd
+
+ set newdbname $db-dumpload.db
+
+ # Open original database, or subdb if we have one.
+ set dbarg ""
+ set utilflag ""
+ if { $encrypt != 0 } {
+ set dbarg "-encryptany $passwd"
+ set utilflag "-P $passwd"
+ }
+ set max_size [expr 15 * 1024]
+ if { [string length $subdb] == 0 } {
+ set olddb [eval {berkdb_open -rdonly} $dbarg $db]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ if { [check_for_subdbs $olddb] } {
+ # If $db has subdatabases, dumploadtest each one
+ # separately.
+ set oc [$olddb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+
+ for { set dbt [$oc get -first] } \
+ { [llength $dbt] > 0 } \
+ { set dbt [$oc get -next] } {
+ set subdb [lindex [lindex $dbt 0] 0]
+
+ # Skip any files over this size. The problem is
+ # that when when we dump/load it, files that are
+ # too big result in E2BIG errors because the
+ # arguments to db_dump are too long. 64K seems
+ # to be the limit (on FreeBSD), cut it to 32K
+ # just to be safe.
+ if {[string length $subdb] < $max_size && \
+ [string length $subdb] != 0} {
+ dumploadtest $db $subdb
+ }
+ }
+ error_check_good oldcclose [$oc close] 0
+ error_check_good olddbclose [$olddb close] 0
+ return 0
+ }
+ # No subdatabase
+ set have_subdb 0
+ } else {
+ set olddb [eval {berkdb_open -rdonly} $dbarg {$db $subdb}]
+ error_check_good olddb($db) [is_valid_db $olddb] TRUE
+
+ set have_subdb 1
+ }
+
+ # Do a db_dump test. Dump/load each file.
+ if { $have_subdb } {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ -s {$subdb} $db | \
+ $util_path/db_load $utilflag $newdbname} res]
+ } else {
+ set rval [catch {eval {exec $util_path/db_dump} $utilflag -k \
+ $db | $util_path/db_load $utilflag $newdbname} res]
+ }
+ error_check_good db_dump/db_load($db:$res) $rval 0
+
+ # Now open new database.
+ set newdb [eval {berkdb_open -rdonly} $dbarg $newdbname]
+ error_check_good newdb($db) [is_valid_db $newdb] TRUE
+
+ # Walk through olddb and newdb and make sure their contents
+ # are identical.
+ set oc [$olddb cursor]
+ set nc [$newdb cursor]
+ error_check_good orig_cursor($db) \
+ [is_valid_cursor $oc $olddb] TRUE
+ error_check_good new_cursor($db) \
+ [is_valid_cursor $nc $newdb] TRUE
+
+ for { set odbt [$oc get -first] } { [llength $odbt] > 0 } \
+ { set odbt [$oc get -next] } {
+ set ndbt [$nc get -get_both \
+ [lindex [lindex $odbt 0] 0] [lindex [lindex $odbt 0] 1]]
+ error_check_good db_compare($db/$newdbname) $ndbt $odbt
+ }
+
+ for { set ndbt [$nc get -first] } { [llength $ndbt] > 0 } \
+ { set ndbt [$nc get -next] } {
+ set odbt [$oc get -get_both \
+ [lindex [lindex $ndbt 0] 0] [lindex [lindex $ndbt 0] 1]]
+ error_check_good db_compare_back($db) $odbt $ndbt
+ }
+
+ error_check_good orig_cursor_close($db) [$oc close] 0
+ error_check_good new_cursor_close($db) [$nc close] 0
+
+ error_check_good orig_db_close($db) [$olddb close] 0
+ error_check_good new_db_close($db) [$newdb close] 0
+
+ eval berkdb dbremove $dbarg $newdbname
+
+ return 0
+}
+
+# Generate randomly ordered, guaranteed-unique four-character strings that can
+# be used to differentiate duplicates without creating duplicate duplicates.
+# (test031 & test032) randstring_init is required before the first call to
+# randstring and initializes things for up to $i distinct strings; randstring
+# gets the next string.
+proc randstring_init { i } {
+ global rs_int_list alphabet
+
+ # Fail if we can't generate sufficient unique strings.
+ if { $i > [expr 26 * 26 * 26 * 26] } {
+ set errstring\
+ "Duplicate set too large for random string generator"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set rs_int_list {}
+
+ # generate alphabet array
+ for { set j 0 } { $j < 26 } { incr j } {
+ set a($j) [string index $alphabet $j]
+ }
+
+ # Generate a list with $i elements, { aaaa, aaab, ... aaaz, aaba ...}
+ for { set d1 0 ; set j 0 } { $d1 < 26 && $j < $i } { incr d1 } {
+ for { set d2 0 } { $d2 < 26 && $j < $i } { incr d2 } {
+ for { set d3 0 } { $d3 < 26 && $j < $i } { incr d3 } {
+ for { set d4 0 } { $d4 < 26 && $j < $i } \
+ { incr d4 } {
+ lappend rs_int_list \
+ $a($d1)$a($d2)$a($d3)$a($d4)
+ incr j
+ }
+ }
+ }
+ }
+
+ # Randomize the list.
+ set rs_int_list [randomize_list $rs_int_list]
+}
+
+# Randomize a list. Returns a randomly-reordered copy of l.
+proc randomize_list { l } {
+ set i [llength $l]
+
+ for { set j 0 } { $j < $i } { incr j } {
+ # Pick a random element from $j to the end
+ set k [berkdb random_int $j [expr $i - 1]]
+
+ # Swap it with element $j
+ set t1 [lindex $l $j]
+ set t2 [lindex $l $k]
+
+ set l [lreplace $l $j $j $t2]
+ set l [lreplace $l $k $k $t1]
+ }
+
+ return $l
+}
+
+proc randstring {} {
+ global rs_int_list
+
+ if { [info exists rs_int_list] == 0 || [llength $rs_int_list] == 0 } {
+ set errstring "randstring uninitialized or used too often"
+ puts "FAIL:[timestamp] $errstring"
+ return -code return $errstring
+ }
+
+ set item [lindex $rs_int_list 0]
+ set rs_int_list [lreplace $rs_int_list 0 0]
+
+ return $item
+}
+
+# Takes a variable-length arg list, and returns a list containing the list of
+# the non-hyphenated-flag arguments, followed by a list of each alphanumeric
+# flag it finds.
+proc extractflags { args } {
+ set inflags 1
+ set flags {}
+ while { $inflags == 1 } {
+ set curarg [lindex $args 0]
+ if { [string first "-" $curarg] == 0 } {
+ set i 1
+ while {[string length [set f \
+ [string index $curarg $i]]] > 0 } {
+ incr i
+ if { [string compare $f "-"] == 0 } {
+ set inflags 0
+ break
+ } else {
+ lappend flags $f
+ }
+ }
+ set args [lrange $args 1 end]
+ } else {
+ set inflags 0
+ }
+ }
+ return [list $args $flags]
+}
+
+# Wrapper for berkdb open, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_open { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb open} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_open_noerr { args } {
+ eval {berkdb open} $args
+}
+
+# Wrapper for berkdb env, used throughout the test suite so that we can
+# set an errfile/errpfx as appropriate.
+proc berkdb_env { args } {
+ global is_envmethod
+
+ if { [info exists is_envmethod] == 0 } {
+ set is_envmethod 0
+ }
+
+ set errargs {}
+ if { $is_envmethod == 0 && [file exists /dev/stderr] == 1 } {
+ append errargs " -errfile /dev/stderr "
+ append errargs " -errpfx \\F\\A\\I\\L"
+ }
+
+ eval {berkdb env} $errargs $args
+}
+
+# Version without errpfx/errfile, used when we're expecting a failure.
+proc berkdb_env_noerr { args } {
+ eval {berkdb env} $args
+}
+
+proc check_handles { {outf stdout} } {
+ global ohandles
+
+ set handles [berkdb handles]
+ if {[llength $handles] != [llength $ohandles]} {
+ puts $outf "WARNING: Open handles during cleanup: $handles"
+ }
+ set ohandles $handles
+}
+
+proc open_handles { } {
+ return [llength [berkdb handles]]
+}
+
+proc move_file_extent { dir dbfile tag op } {
+ set curfiles [get_extfiles $dir $dbfile ""]
+ set tagfiles [get_extfiles $dir $dbfile $tag]
+ #
+ # We want to copy or rename only those that have been saved,
+ # so delete all the current extent files so that we don't
+ # end up with extra ones we didn't restore from our saved ones.
+ foreach extfile $curfiles {
+ file delete -force $extfile
+ }
+ foreach extfile $tagfiles {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ set dbq [make_ext_filename $dir $dbfile $extnum]
+ #
+ # We can either copy or rename
+ #
+ file $op -force $extfile $dbq
+ }
+}
+
+proc copy_extent_file { dir dbfile tag { op copy } } {
+ set files [get_extfiles $dir $dbfile ""]
+ foreach extfile $files {
+ set i [string last "." $extfile]
+ incr i
+ set extnum [string range $extfile $i end]
+ file $op -force $extfile $dir/__dbq.$dbfile.$tag.$extnum
+ }
+}
+
+proc get_extfiles { dir dbfile tag } {
+ if { $tag == "" } {
+ set filepat $dir/__dbq.$dbfile.\[0-9\]*
+ } else {
+ set filepat $dir/__dbq.$dbfile.$tag.\[0-9\]*
+ }
+ return [glob -nocomplain -- $filepat]
+}
+
+proc make_ext_filename { dir dbfile extnum } {
+ return $dir/__dbq.$dbfile.$extnum
+}
+
+# All pids for Windows 9X are negative values. When we want to have
+# unsigned int values, unique to the process, we'll take the absolute
+# value of the pid. This avoids unsigned/signed mistakes, yet
+# guarantees uniqueness, since each system has pids that are all
+# either positive or negative.
+#
+proc sanitized_pid { } {
+ set mypid [pid]
+ if { $mypid < 0 } {
+ set mypid [expr - $mypid]
+ }
+ puts "PID: [pid] $mypid\n"
+ return $mypid
+}
+
+#
+# Extract the page size field from a stat record. Return -1 if
+# none is found.
+#
+proc get_pagesize { stat } {
+ foreach field $stat {
+ set title [lindex $field 0]
+ if {[string compare $title "Page size"] == 0} {
+ return [lindex $field 1]
+ }
+ }
+ return -1
+}
+
+# Get a globbed list of source files and executables to use as large
+# data items in overflow page tests.
+proc get_file_list { {small 0} } {
+ global is_windows_test
+ global is_qnx_test
+ global src_root
+
+ if { $is_qnx_test } {
+ set small 1
+ }
+ if { $small && $is_windows_test } {
+ return [glob $src_root/*/*.c */env*.obj]
+ } elseif { $small } {
+ return [glob $src_root/*/*.c ./env*.o]
+ } elseif { $is_windows_test } {
+ return \
+ [glob $src_root/*/*.c */*.obj */libdb??.dll */libdb??d.dll]
+ } else {
+ return [glob $src_root/*/*.c ./*.o ./.libs/libdb-?.?.s?]
+ }
+}
+
+proc is_cdbenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -cdb] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_lockenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -lock] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_logenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -log] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_mpoolenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -mpool] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_rpcenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -rpc] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_secenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -crypto] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc is_txnenv { env } {
+ set sys [$env attributes]
+ if { [lsearch $sys -txn] != -1 } {
+ return 1
+ } else {
+ return 0
+ }
+}
+
+proc get_home { env } {
+ set sys [$env attributes]
+ set h [lsearch $sys -home]
+ if { $h == -1 } {
+ return NULL
+ }
+ incr h
+ return [lindex $sys $h]
+}
+
+proc reduce_dups { nent ndp } {
+ upvar $nent nentries
+ upvar $ndp ndups
+
+ # If we are using a txnenv, assume it is using
+ # the default maximum number of locks, cut back
+ # so that we don't run out of locks. Reduce
+ # by 25% until we fit.
+ #
+ while { [expr $nentries * $ndups] > 5000 } {
+ set nentries [expr ($nentries / 4) * 3]
+ set ndups [expr ($ndups / 4) * 3]
+ }
+}
+
+proc getstats { statlist field } {
+ foreach pair $statlist {
+ set txt [lindex $pair 0]
+ if { [string equal $txt $field] == 1 } {
+ return [lindex $pair 1]
+ }
+ }
+ return -1
+}
+
+proc big_endian { } {
+ global tcl_platform
+ set e $tcl_platform(byteOrder)
+ if { [string compare $e littleEndian] == 0 } {
+ return 0
+ } elseif { [string compare $e bigEndian] == 0 } {
+ return 1
+ } else {
+ error "FAIL: Unknown endianness $e"
+ }
+}
diff --git a/storage/bdb/test/txn001.tcl b/storage/bdb/test/txn001.tcl
new file mode 100644
index 00000000000..406ef35751c
--- /dev/null
+++ b/storage/bdb/test/txn001.tcl
@@ -0,0 +1,116 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn001.tcl,v 11.35 2002/05/10 17:44:28 sue Exp $
+#
+
+# TEST txn001
+# TEST Begin, commit, abort testing.
+proc txn001 { {tnum "01"} { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Basic begin, commit, abort"
+
+ if { $tnum != "01"} {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ # Open environment
+ env_cleanup $testdir
+
+ set env [eval {berkdb_env -create -mode 0644 -txn \
+ -txn_max $max -home $testdir}]
+ error_check_good evn_open [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [ $env txn_id_set $txn_curid $txn_maxid ] 0
+ txn001_suba $ntxns $env $tnum
+ txn001_subb $ntxns $env $tnum
+ txn001_subc $ntxns $env $tnum
+ # Close and unlink the file
+ error_check_good env_close:$env [$env close] 0
+}
+
+proc txn001_suba { ntxns env tnum } {
+ source ./include.tcl
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing $ntxns Transactions in $env"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+}
+
+proc txn001_subb { ntxns env tnum } {
+ # We will create a bunch of transactions and abort them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.b: Beginning/Aborting Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now abort them all
+ foreach t $txn_list {
+ error_check_good txn_abort:$t [$t abort] 0
+ }
+}
+
+proc txn001_subc { ntxns env tnum } {
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.c: Beginning/Prepare/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+
+ # Now prepare them all
+ foreach t $txn_list {
+ error_check_good txn_prepare:$t \
+ [$t prepare [make_gid global:$t]] 0
+ }
+
+ # Now commit them all
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+}
+
diff --git a/storage/bdb/test/txn002.tcl b/storage/bdb/test/txn002.tcl
new file mode 100644
index 00000000000..5107472644d
--- /dev/null
+++ b/storage/bdb/test/txn002.tcl
@@ -0,0 +1,91 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn002.tcl,v 11.38 2002/05/10 17:44:29 sue Exp $
+#
+
+# TEST txn002
+# TEST Verify that read-only transactions do not write log records.
+proc txn002 { {tnum "02" } { max 1024 } { ntxns 50 } } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Read-only transaction test ($max) ($ntxns)"
+
+ if { $tnum != "02" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+
+ env_cleanup $testdir
+ set env [berkdb \
+ env -create -mode 0644 -txn -txn_max $max -home $testdir]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid ] 0
+
+ # Save the current bytes in the log.
+ set off_start [txn002_logoff $env]
+
+ # We will create a bunch of transactions and commit them.
+ set txn_list {}
+ set tid_list {}
+ puts "\tTxn0$tnum.a: Beginning/Committing Transactions"
+ for { set i 0 } { $i < $ntxns } { incr i } {
+ set txn [$env txn]
+ error_check_good txn_begin [is_valid_txn $txn $env] TRUE
+
+ lappend txn_list $txn
+
+ set tid [$txn id]
+ error_check_good tid_check [lsearch $tid_list $tid] -1
+
+ lappend tid_list $tid
+ }
+ foreach t $txn_list {
+ error_check_good txn_commit:$t [$t commit] 0
+ }
+
+ # Make sure we haven't written any new log records except
+ # potentially some recycle records if we were wrapping txnids.
+ set off_stop [txn002_logoff $env]
+ if { $off_stop != $off_start } {
+ txn002_recycle_only $testdir
+ }
+
+ error_check_good env_close [$env close] 0
+}
+
+proc txn002_logoff { env } {
+ set stat [$env log_stat]
+ foreach i $stat {
+ foreach {txt val} $i {break}
+ if { [string compare \
+ $txt {Current log file offset}] == 0 } {
+ return $val
+ }
+ }
+}
+
+# Make sure that the only log records found are txn_recycle records
+proc txn002_recycle_only { dir } {
+ global util_path
+
+ set tmpfile $dir/printlog.out
+ set stat [catch {exec $util_path/db_printlog -h $dir > $tmpfile} ret]
+ error_check_good db_printlog $stat 0
+
+ set f [open $tmpfile r]
+ while { [gets $f record] >= 0 } {
+ set r [regexp {\[[^\]]*\]\[[^\]]*\]([^\:]*)\:} $record whl name]
+ if { $r == 1 } {
+ error_check_good record_type __txn_recycle $name
+ }
+ }
+ close $f
+ fileremove $tmpfile
+}
diff --git a/storage/bdb/test/txn003.tcl b/storage/bdb/test/txn003.tcl
new file mode 100644
index 00000000000..71e450cf9ce
--- /dev/null
+++ b/storage/bdb/test/txn003.tcl
@@ -0,0 +1,238 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn003.tcl,v 11.40 2002/09/05 17:23:08 sandstro Exp $
+#
+
+# TEST txn003
+# TEST Test abort/commit/prepare of txns with outstanding child txns.
+proc txn003 { {tnum "03"} } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ puts -nonewline "Txn0$tnum: Outstanding child transaction test"
+
+ if { $tnum != "03" } {
+ puts " (with ID wrap)"
+ } else {
+ puts ""
+ }
+ env_cleanup $testdir
+ set testfile txn003.db
+
+ set env_cmd "berkdb_env_noerr -create -txn -home $testdir"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ error_check_good txn_id_set \
+ [$env txn_id_set $txn_curid $txn_maxid] 0
+
+ set oflags {-auto_commit -create -btree -mode 0644 -env $env $testfile}
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ #
+ # Put some data so that we can check commit or abort of child
+ #
+ set key 1
+ set origdata some_data
+ set newdata this_is_new_data
+ set newdata2 some_other_new_data
+
+ error_check_good db_put [$db put -auto_commit $key $origdata] 0
+ error_check_good dbclose [$db close] 0
+
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ txn003_check $db $key "Origdata" $origdata
+
+ puts "\tTxn0$tnum.a: Parent abort"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_abort [$parent abort] 0
+ txn003_check $db $key "parent_abort" $origdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+ puts "\tTxn0$tnum.b: Parent commit"
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ error_check_good parent_commit [$parent commit] 0
+ txn003_check $db $key "parent_commit" $newdata
+ # Check child handle is invalid
+ set stat [catch {$child abort} ret]
+ error_check_good child_handle $stat 1
+ error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+
+ #
+ # Since the data check assumes what has come before, the 'commit'
+ # operation must be last.
+ #
+ set hdr "\tTxn0$tnum"
+ set rlist {
+ {begin ".c"}
+ {prepare ".d"}
+ {abort ".e"}
+ {commit ".f"}
+ }
+ set count 0
+ foreach pair $rlist {
+ incr count
+ set op [lindex $pair 0]
+ set msg [lindex $pair 1]
+ set msg $hdr$msg
+ txn003_body $env_cmd $testfile $testdir $key $newdata2 $msg $op
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+
+ berkdb debug_check
+ set db [eval {berkdb_open} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+ #
+ # For prepare we'll then just
+ # end up aborting after we test what we need to.
+ # So set gooddata to the same as abort.
+ switch $op {
+ abort {
+ set gooddata $newdata
+ }
+ begin {
+ set gooddata $newdata
+ }
+ commit {
+ set gooddata $newdata2
+ }
+ prepare {
+ set gooddata $newdata
+ }
+ }
+ txn003_check $db $key "parent_$op" $gooddata
+ error_check_good dbclose [$db close] 0
+ error_check_good env_close [$env close] 0
+ }
+
+ # We can't do the attempted child discard on Windows
+ # because it will leave open files that can't be removed.
+ # Skip the remainder of the test for Windows.
+ if { $is_windows_test == 1 } {
+ puts "Skipping remainder of test for Windows"
+ return
+ }
+ puts "\tTxn0$tnum.g: Attempt child prepare"
+ set env [eval $env_cmd]
+ error_check_good dbenv [is_valid_env $env] TRUE
+ berkdb debug_check
+ set db [eval {berkdb_open_noerr} $oflags]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ set parent [$env txn]
+ error_check_good txn_begin [is_valid_txn $parent $env] TRUE
+ set child [$env txn -parent $parent]
+ error_check_good txn_begin [is_valid_txn $child $env] TRUE
+ error_check_good db_put [$db put -txn $child $key $newdata] 0
+ set gid [make_gid child_prepare:$child]
+ set stat [catch {$child prepare $gid} ret]
+ error_check_good child_prepare $stat 1
+ error_check_good child_prep_err [is_substr $ret "txn prepare"] 1
+
+ puts "\tTxn0$tnum.h: Attempt child discard"
+ set stat [catch {$child discard} ret]
+ error_check_good child_discard $stat 1
+
+ # We just panic'd the region, so the next operations will fail.
+ # No matter, we still have to clean up all the handles.
+
+ set stat [catch {$parent commit} ret]
+ error_check_good parent_commit $stat 1
+ error_check_good parent_commit:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$db close} ret]
+ error_check_good db_close $stat 1
+ error_check_good db_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+
+ set stat [catch {$env close} ret]
+ error_check_good env_close $stat 1
+ error_check_good env_close:fail [is_substr $ret "DB_RUNRECOVERY"] 1
+}
+
+proc txn003_body { env_cmd testfile dir key newdata2 msg op } {
+ source ./include.tcl
+
+ berkdb debug_check
+ sentinel_init
+ set gidf $dir/gidfile
+ fileremove -f $gidf
+ set pidlist {}
+ puts "$msg.0: Executing child script to prepare txns"
+ berkdb debug_check
+ set p [exec $tclsh_path $test_path/wrap.tcl txnscript.tcl \
+ $testdir/txnout $env_cmd $testfile $gidf $key $newdata2 &]
+ lappend pidlist $p
+ watch_procs $pidlist 5
+ set f1 [open $testdir/txnout r]
+ set r [read $f1]
+ puts $r
+ close $f1
+ fileremove -f $testdir/txnout
+
+ berkdb debug_check
+ puts -nonewline "$msg.1: Running recovery ... "
+ flush stdout
+ berkdb debug_check
+ set env [eval $env_cmd "-recover"]
+ error_check_good dbenv-recover [is_valid_env $env] TRUE
+ puts "complete"
+
+ puts "$msg.2: getting txns from txn_recover"
+ set txnlist [$env txn_recover]
+ error_check_good txnlist_len [llength $txnlist] 1
+ set tpair [lindex $txnlist 0]
+
+ set gfd [open $gidf r]
+ set ret [gets $gfd parentgid]
+ close $gfd
+ set txn [lindex $tpair 0]
+ set gid [lindex $tpair 1]
+ if { $op == "begin" } {
+ puts "$msg.2: $op new txn"
+ } else {
+ puts "$msg.2: $op parent"
+ }
+ error_check_good gidcompare $gid $parentgid
+ if { $op == "prepare" } {
+ set gid [make_gid prepare_recover:$txn]
+ set stat [catch {$txn $op $gid} ret]
+ error_check_good prep_error $stat 1
+ error_check_good prep_err \
+ [is_substr $ret "transaction already prepared"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } elseif { $op == "begin" } {
+ set stat [catch {$env txn} ret]
+ error_check_good begin_error $stat 1
+ error_check_good begin_err \
+ [is_substr $ret "not yet committed transactions is incomplete"] 1
+ error_check_good txn:prep_abort [$txn abort] 0
+ } else {
+ error_check_good txn:$op [$txn $op] 0
+ }
+ error_check_good envclose [$env close] 0
+}
+
+proc txn003_check { db key msg gooddata } {
+ set kd [$db get $key]
+ set data [lindex [lindex $kd 0] 1]
+ error_check_good $msg $data $gooddata
+}
diff --git a/storage/bdb/test/txn004.tcl b/storage/bdb/test/txn004.tcl
new file mode 100644
index 00000000000..75e1b40043f
--- /dev/null
+++ b/storage/bdb/test/txn004.tcl
@@ -0,0 +1,62 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn004.tcl,v 11.39 2002/05/15 17:14:06 sandstro Exp $
+#
+
+# TEST txn004
+# TEST Test of wraparound txnids (txn001)
+proc txn004 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn004.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn001 "04.1"
+ puts "\tTxn004.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn001 "04.2"
+
+ puts "\tTxn004.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
+proc txn_idwrap_check { testdir } {
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_substr $e env] 1
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ # txn3 will require a wraparound txnid
+ # XXX How can we test it has a wrapped id?
+ set txn3 [$e txn]
+ error_check_good wrap_txn3 [is_valid_txn $txn3 $e] TRUE
+
+ error_check_good free_txn1 [$txn1 commit] 0
+ error_check_good free_txn2 [$txn2 commit] 0
+ error_check_good free_txn3 [$txn3 commit] 0
+
+ error_check_good close [$e close] 0
+}
+
diff --git a/storage/bdb/test/txn005.tcl b/storage/bdb/test/txn005.tcl
new file mode 100644
index 00000000000..604f3ad7de4
--- /dev/null
+++ b/storage/bdb/test/txn005.tcl
@@ -0,0 +1,75 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn005.tcl,v 11.35 2002/08/08 15:38:14 bostic Exp $
+#
+
+# TEST txn005
+# TEST Test transaction ID wraparound and recovery.
+proc txn005 {} {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ env_cleanup $testdir
+ puts "Txn005: Test transaction wraparound recovery"
+
+ # Open/create the txn region
+ puts "\tTxn005.a: Create environment"
+ set e [berkdb_env -create -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set txn1 [$e txn]
+ error_check_good txn1 [is_valid_txn $txn1 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn1 -create -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+ error_check_good txn1_commit [$txn1 commit] 0
+
+ puts "\tTxn005.b: Set txn ids"
+ error_check_good txn_id_set \
+ [$e txn_id_set [expr $txn_maxid - 1] $txn_maxid] 0
+
+ # txn2 and txn3 will require a wraparound txnid
+ set txn2 [$e txn]
+ error_check_good txn2 [is_valid_txn $txn2 $e] TRUE
+
+ error_check_good put [$db put -txn $txn2 "a" ""] 0
+ error_check_good txn2_commit [$txn2 commit] 0
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+
+ error_check_good close [$db close] 0
+
+ set txn3 [$e txn]
+ error_check_good txn3 [is_valid_txn $txn3 $e] TRUE
+
+ set db [berkdb_open -env $e -txn $txn3 -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good put2 [$db put -txn $txn3 "b" ""] 0
+ error_check_good sync [$db sync] 0
+ error_check_good txn3_abort [$txn3 abort] 0
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+
+ puts "\tTxn005.c: Run recovery"
+ set stat [catch {exec $util_path/db_recover -h $testdir -e -c} result]
+ if { $stat == 1 } {
+ error "FAIL: Recovery error: $result."
+ }
+
+ puts "\tTxn005.d: Check data"
+ set e [berkdb_env -txn -home $testdir]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ set db [berkdb_open -env $e -auto_commit -btree txn005.db]
+ error_check_good db [is_valid_db $db] TRUE
+
+ error_check_good get_a [$db get "a"] "{a {}}"
+ error_check_bad get_b [$db get "b"] "{b {}}"
+ error_check_good dbclose [$db close] 0
+ error_check_good eclose [$e close] 0
+}
diff --git a/storage/bdb/test/txn006.tcl b/storage/bdb/test/txn006.tcl
new file mode 100644
index 00000000000..7bf37d34dfc
--- /dev/null
+++ b/storage/bdb/test/txn006.tcl
@@ -0,0 +1,47 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn006.tcl,v 1.5 2002/08/01 19:59:19 sue Exp $
+#
+#
+#TEST txn006
+#TEST Test dump/load in transactional environment.
+proc txn006 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn006.db
+
+ puts "Txn006: Test dump/load in transaction environment"
+ env_cleanup $testdir
+
+ puts "\tTxn006.a: Create environment and database"
+ # Open/create the txn region
+ set e [berkdb_env -create -home $testdir -txn]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb_open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+
+ puts "\tTxn006.b: Put data"
+ # Put some data
+ for { set i 1 } { $i < $iter } { incr i } {
+ error_check_good put [$db put -txn $txn key$i data$i] 0
+ }
+
+ # End transaction, close db
+ error_check_good txn_commit [$txn commit] 0
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+
+ puts "\tTxn006.c: dump/load"
+ # Dump and load
+ exec $util_path/db_dump -p -h $testdir $testfile | \
+ $util_path/db_load -h $testdir $testfile
+}
diff --git a/storage/bdb/test/txn007.tcl b/storage/bdb/test/txn007.tcl
new file mode 100644
index 00000000000..f67dc209f92
--- /dev/null
+++ b/storage/bdb/test/txn007.tcl
@@ -0,0 +1,57 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn007.tcl,v 11.3 2002/08/08 15:38:14 bostic Exp $
+#
+#TEST txn007
+#TEST Test of DB_TXN_WRITE_NOSYNC
+proc txn007 { { iter 50 } } {
+ source ./include.tcl
+ set testfile txn007.db
+
+ puts "Txn007: DB_TXN_WRITE_NOSYNC"
+ env_cleanup $testdir
+
+ # Open/create the txn region
+ puts "\tTxn007.a: Create env and database with -wrnosync"
+ set e [berkdb_env -create -home $testdir -txn -wrnosync]
+ error_check_good env_open [is_valid_env $e] TRUE
+
+ # Open/create database
+ set db [berkdb open -auto_commit -env $e \
+ -create -btree -dup $testfile]
+ error_check_good db_open [is_valid_db $db] TRUE
+
+ # Put some data
+ puts "\tTxn007.b: Put $iter data items in individual transactions"
+ for { set i 1 } { $i < $iter } { incr i } {
+ # Start a transaction
+ set txn [$e txn]
+ error_check_good txn [is_valid_txn $txn $e] TRUE
+ $db put -txn $txn key$i data$i
+ error_check_good txn_commit [$txn commit] 0
+ }
+ set stat [$e log_stat]
+ puts "\tTxn007.c: Check log stats"
+ foreach i $stat {
+ set txt [lindex $i 0]
+ if { [string equal $txt {Times log written}] == 1 } {
+ set wrval [lindex $i 1]
+ }
+ if { [string equal $txt {Times log flushed}] == 1 } {
+ set syncval [lindex $i 1]
+ }
+ }
+ error_check_good wrval [expr $wrval >= $iter] 1
+ #
+ # We should have written at least 'iter' number of times,
+ # but not synced on any of those.
+ #
+ set val [expr $wrval - $iter]
+ error_check_good syncval [expr $syncval <= $val] 1
+
+ error_check_good db_close [$db close] 0
+ error_check_good env_close [$e close] 0
+}
diff --git a/storage/bdb/test/txn008.tcl b/storage/bdb/test/txn008.tcl
new file mode 100644
index 00000000000..ad57ea0eeaa
--- /dev/null
+++ b/storage/bdb/test/txn008.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn008.tcl,v 11.3 2002/05/10 17:55:54 sue Exp $
+#
+
+# TEST txn008
+# TEST Test of wraparound txnids (txn002)
+proc txn008 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn008.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn002 "08.1"
+ puts "\tTxn008.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn002 "08.2"
+
+ puts "\tTxn008.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/storage/bdb/test/txn009.tcl b/storage/bdb/test/txn009.tcl
new file mode 100644
index 00000000000..784c0068a41
--- /dev/null
+++ b/storage/bdb/test/txn009.tcl
@@ -0,0 +1,32 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txn009.tcl,v 11.3 2002/05/10 17:55:55 sue Exp $
+#
+
+# TEST txn009
+# TEST Test of wraparound txnids (txn003)
+proc txn009 { } {
+ source ./include.tcl
+ global txn_curid
+ global txn_maxid
+
+ set orig_curid $txn_curid
+ set orig_maxid $txn_maxid
+ puts "\tTxn009.1: wraparound txnids"
+ set txn_curid [expr $txn_maxid - 2]
+ txn003 "09.1"
+ puts "\tTxn009.2: closer wraparound txnids"
+ set txn_curid [expr $txn_maxid - 3]
+ set txn_maxid [expr $txn_maxid - 2]
+ txn003 "09.2"
+
+ puts "\tTxn009.3: test wraparound txnids"
+ txn_idwrap_check $testdir
+ set txn_curid $orig_curid
+ set txn_maxid $orig_maxid
+ return
+}
+
diff --git a/storage/bdb/test/txnscript.tcl b/storage/bdb/test/txnscript.tcl
new file mode 100644
index 00000000000..1a4a1b6f2ec
--- /dev/null
+++ b/storage/bdb/test/txnscript.tcl
@@ -0,0 +1,67 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1996-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: txnscript.tcl,v 11.3 2002/01/23 15:33:40 bostic Exp $
+#
+# Txn003 script - outstanding child prepare script
+# Usage: txnscript envcmd dbcmd gidf key data
+# envcmd: command to open env
+# dbfile: name of database file
+# gidf: name of global id file
+# key: key to use
+# data: new data to use
+
+source ./include.tcl
+source $test_path/test.tcl
+source $test_path/testutils.tcl
+
+set usage "txnscript envcmd dbfile gidfile key data"
+
+# Verify usage
+if { $argc != 5 } {
+ puts stderr "FAIL:[timestamp] Usage: $usage"
+ exit
+}
+
+# Initialize arguments
+set envcmd [ lindex $argv 0 ]
+set dbfile [ lindex $argv 1 ]
+set gidfile [ lindex $argv 2 ]
+set key [ lindex $argv 3 ]
+set data [ lindex $argv 4 ]
+
+set dbenv [eval $envcmd]
+error_check_good envopen [is_valid_env $dbenv] TRUE
+
+set usedb 1
+set db [berkdb_open -auto_commit -env $dbenv $dbfile]
+error_check_good dbopen [is_valid_db $db] TRUE
+
+puts "\tTxnscript.a: begin parent and child txn"
+set parent [$dbenv txn]
+error_check_good parent [is_valid_txn $parent $dbenv] TRUE
+set child [$dbenv txn -parent $parent]
+error_check_good parent [is_valid_txn $child $dbenv] TRUE
+
+puts "\tTxnscript.b: Modify data"
+error_check_good db_put [$db put -txn $child $key $data] 0
+
+set gfd [open $gidfile w+]
+set gid [make_gid txnscript:$parent]
+puts $gfd $gid
+puts "\tTxnscript.c: Prepare parent only"
+error_check_good txn_prepare:$parent [$parent prepare $gid] 0
+close $gfd
+
+puts "\tTxnscript.d: Check child handle"
+set stat [catch {$child abort} ret]
+error_check_good child_handle $stat 1
+error_check_good child_h2 [is_substr $ret "invalid command name"] 1
+
+#
+# We do not close the db or env, but exit with the txns outstanding.
+#
+puts "\tTxnscript completed successfully"
+flush stdout
diff --git a/storage/bdb/test/update.tcl b/storage/bdb/test/update.tcl
new file mode 100644
index 00000000000..2bedfacc793
--- /dev/null
+++ b/storage/bdb/test/update.tcl
@@ -0,0 +1,93 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: update.tcl,v 11.11 2002/01/11 15:53:58 bostic Exp $
+
+source ./include.tcl
+global update_dir
+set update_dir "$test_path/update_test"
+
+proc update { } {
+ source ./include.tcl
+ global update_dir
+
+ foreach version [glob $update_dir/*] {
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $update_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $update_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+ foreach endianness {"le" "be"} {
+ puts "Update:\
+ $version $method $name $endianness"
+ set ret [catch {_update $update_dir $testdir $version $method $name $endianness 1 1} message]
+ if { $ret != 0 } {
+ puts $message
+ }
+ }
+ }
+ }
+ }
+}
+
+proc _update { source_dir temp_dir \
+ version method file endianness do_db_load_test do_update_test } {
+ source include.tcl
+ global errorInfo
+
+ cleanup $temp_dir NULL
+
+ exec sh -c \
+"gzcat $source_dir/$version/$method/$file.tar.gz | (cd $temp_dir && tar xf -)"
+
+ if { $do_db_load_test } {
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update load: $version $method $file $message" $ret 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f "$temp_dir/update.dump" \
+ "$temp_dir/update.db"} message]
+ error_check_good \
+ "Update dump: $version $method $file $message" $ret 0
+
+ error_check_good "Update diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.1.2: $version $method $file" $ret ""
+ }
+
+ if { $do_update_test } {
+ set ret [catch \
+ {berkdb open -update "$temp_dir/$file-$endianness.db"} db]
+ if { $ret == 1 } {
+ if { ![is_substr $errorInfo "version upgrade"] } {
+ set fnl [string first "\n" $errorInfo]
+ set theError \
+ [string range $errorInfo 0 [expr $fnl - 1]]
+ error $theError
+ }
+ } else {
+ error_check_good dbopen [is_valid_db $db] TRUE
+ error_check_good dbclose [$db close] 0
+
+ set ret [catch \
+ {exec $util_path/db_dump -f \
+ "$temp_dir/update.dump" \
+ "$temp_dir/$file-$endianness.db"} message]
+ error_check_good "Update\
+ dump: $version $method $file $message" $ret 0
+
+ error_check_good \
+ "Update diff.2: $version $method $file" \
+ [filecmp "$temp_dir/$file.dump" \
+ "$temp_dir/update.dump"] 0
+ error_check_good \
+ "Update diff.2: $version $method $file" $ret ""
+ }
+ }
+}
diff --git a/storage/bdb/test/upgrade.tcl b/storage/bdb/test/upgrade.tcl
new file mode 100644
index 00000000000..1c0ffc5461a
--- /dev/null
+++ b/storage/bdb/test/upgrade.tcl
@@ -0,0 +1,294 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 1999-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: upgrade.tcl,v 11.22 2002/07/28 03:22:41 krinsky Exp $
+
+source ./include.tcl
+
+global upgrade_dir
+# set upgrade_dir "$test_path/upgrade_test"
+set upgrade_dir "$test_path/upgrade/databases"
+
+global gen_upgrade
+set gen_upgrade 0
+
+global upgrade_dir
+global upgrade_be
+global upgrade_method
+global upgrade_name
+
+proc upgrade { { archived_test_loc "DEFAULT" } } {
+ source ./include.tcl
+ global upgrade_dir
+
+ set saved_upgrade_dir $upgrade_dir
+
+ puts -nonewline "Upgrade test: "
+ if { $archived_test_loc == "DEFAULT" } {
+ puts "using default archived databases in $upgrade_dir."
+ } else {
+ set upgrade_dir $archived_test_loc
+ puts "using archived databases in $upgrade_dir."
+ }
+
+ foreach version [glob $upgrade_dir/*] {
+ if { [string first CVS $version] != -1 } { continue }
+ regexp \[^\/\]*$ $version version
+ foreach method [glob $upgrade_dir/$version/*] {
+ regexp \[^\/\]*$ $method method
+ foreach file [glob $upgrade_dir/$version/$method/*] {
+ regexp (\[^\/\]*)\.tar\.gz$ $file dummy name
+
+ cleanup $testdir NULL 1
+ #puts "$upgrade_dir/$version/$method/$name.tar.gz"
+ set curdir [pwd]
+ cd $testdir
+ set tarfd [open "|tar xf -" w]
+ cd $curdir
+
+ catch {exec gunzip -c "$upgrade_dir/$version/$method/$name.tar.gz" >@$tarfd}
+ close $tarfd
+
+ set f [open $testdir/$name.tcldump {RDWR CREAT}]
+ close $f
+
+ # It may seem suboptimal to exec a separate
+ # tclsh for each subtest, but this is
+ # necessary to keep the testing process
+ # from consuming a tremendous amount of
+ # memory.
+ if { [file exists $testdir/$name-le.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name le"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ if { [file exists $testdir/$name-be.db] } {
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _upgrade_test $testdir $version\
+ $method\
+ $name be"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+ }
+
+ set ret [catch {exec $tclsh_path\
+ << "source $test_path/test.tcl;\
+ _db_load_test $testdir $version $method\
+ $name"} message]
+ puts $message
+ if { $ret != 0 } {
+ #exit
+ }
+
+ }
+ }
+ }
+ set upgrade_dir $saved_upgrade_dir
+
+ # Don't provide a return value.
+ return
+}
+
+proc _upgrade_test { temp_dir version method file endianness } {
+ source include.tcl
+ global errorInfo
+
+ puts "Upgrade: $version $method $file $endianness"
+
+ set ret [berkdb upgrade "$temp_dir/$file-$endianness.db"]
+ error_check_good dbupgrade $ret 0
+
+ error_check_good dbupgrade_verify [verify_dir $temp_dir "" 0 0 1] 0
+
+ upgrade_dump "$temp_dir/$file-$endianness.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.$endianness: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc _db_load_test { temp_dir version method file } {
+ source include.tcl
+ global errorInfo
+
+ puts "db_load: $version $method $file"
+
+ set ret [catch \
+ {exec $util_path/db_load -f "$temp_dir/$file.dump" \
+ "$temp_dir/upgrade.db"} message]
+ error_check_good \
+ "Upgrade load: $version $method $file $message" $ret 0
+
+ upgrade_dump "$temp_dir/upgrade.db" "$temp_dir/temp.dump"
+
+ error_check_good "Upgrade diff.1.1: $version $method $file" \
+ [filecmp "$temp_dir/$file.tcldump" "$temp_dir/temp.dump"] 0
+}
+
+proc gen_upgrade { dir } {
+ global gen_upgrade
+ global upgrade_dir
+ global upgrade_be
+ global upgrade_method
+ global upgrade_name
+ global num_test
+ global parms
+ source ./include.tcl
+
+ set gen_upgrade 1
+ set upgrade_dir $dir
+
+ foreach i "btree rbtree hash recno rrecno frecno queue queueext" {
+ puts "Running $i tests"
+ set upgrade_method $i
+ set start 1
+ for { set j $start } { $j <= $num_test(test) } { incr j } {
+ set upgrade_name [format "test%03d" $j]
+ if { [info exists parms($upgrade_name)] != 1 } {
+ continue
+ }
+
+ foreach upgrade_be { 0 1 } {
+ if [catch {exec $tclsh_path \
+ << "source $test_path/test.tcl;\
+ global gen_upgrade upgrade_be;\
+ global upgrade_method upgrade_name;\
+ set gen_upgrade 1;\
+ set upgrade_be $upgrade_be;\
+ set upgrade_method $upgrade_method;\
+ set upgrade_name $upgrade_name;\
+ run_method -$i $j $j"} res] {
+ puts "FAIL: $upgrade_name $i"
+ }
+ puts $res
+ cleanup $testdir NULL 1
+ }
+ }
+ }
+ set gen_upgrade 0
+}
+
+proc upgrade_dump { database file {stripnulls 0} } {
+ global errorInfo
+
+ set db [berkdb open $database]
+ set dbc [$db cursor]
+
+ set f [open $file w+]
+ fconfigure $f -encoding binary -translation binary
+
+ #
+ # Get a sorted list of keys
+ #
+ set key_list ""
+ set pair [$dbc get -first]
+
+ while { 1 } {
+ if { [llength $pair] == 0 } {
+ break
+ }
+ set k [lindex [lindex $pair 0] 0]
+ lappend key_list $k
+ set pair [$dbc get -next]
+ }
+
+ # Discard duplicated keys; we now have a key for each
+ # duplicate, not each unique key, and we don't want to get each
+ # duplicate multiple times when we iterate over key_list.
+ set uniq_keys ""
+ foreach key $key_list {
+ if { [info exists existence_list($key)] == 0 } {
+ lappend uniq_keys $key
+ }
+ set existence_list($key) 1
+ }
+ set key_list $uniq_keys
+
+ set key_list [lsort -command _comp $key_list]
+
+ #
+ # Get the data for each key
+ #
+ set i 0
+ foreach key $key_list {
+ set pair [$dbc get -set $key]
+ if { $stripnulls != 0 } {
+ # the Tcl interface to db versions before 3.X
+ # added nulls at the end of all keys and data, so
+ # we provide functionality to strip that out.
+ set key [strip_null $key]
+ }
+ set data_list {}
+ catch { while { [llength $pair] != 0 } {
+ set data [lindex [lindex $pair 0] 1]
+ if { $stripnulls != 0 } {
+ set data [strip_null $data]
+ }
+ lappend data_list [list $data]
+ set pair [$dbc get -nextdup]
+ } }
+ #lsort -command _comp data_list
+ set data_list [lsort -command _comp $data_list]
+ puts -nonewline $f [binary format i [string length $key]]
+ puts -nonewline $f $key
+ puts -nonewline $f [binary format i [llength $data_list]]
+ for { set j 0 } { $j < [llength $data_list] } { incr j } {
+ puts -nonewline $f [binary format i [string length [concat [lindex $data_list $j]]]]
+ puts -nonewline $f [concat [lindex $data_list $j]]
+ }
+ if { [llength $data_list] == 0 } {
+ puts "WARNING: zero-length data list"
+ }
+ incr i
+ }
+
+ close $f
+ error_check_good upgrade_dump_c_close [$dbc close] 0
+ error_check_good upgrade_dump_db_close [$db close] 0
+}
+
+proc _comp { a b } {
+ if { 0 } {
+ # XXX
+ set a [strip_null [concat $a]]
+ set b [strip_null [concat $b]]
+ #return [expr [concat $a] < [concat $b]]
+ } else {
+ set an [string first "\0" $a]
+ set bn [string first "\0" $b]
+
+ if { $an != -1 } {
+ set a [string range $a 0 [expr $an - 1]]
+ }
+ if { $bn != -1 } {
+ set b [string range $b 0 [expr $bn - 1]]
+ }
+ }
+ #puts "$a $b"
+ return [string compare $a $b]
+}
+
+proc strip_null { str } {
+ set len [string length $str]
+ set last [expr $len - 1]
+
+ set termchar [string range $str $last $last]
+ if { [string compare $termchar \0] == 0 } {
+ set ret [string range $str 0 [expr $last - 1]]
+ } else {
+ set ret $str
+ }
+
+ return $ret
+}
diff --git a/storage/bdb/test/wordlist b/storage/bdb/test/wordlist
new file mode 100644
index 00000000000..03ea15f7277
--- /dev/null
+++ b/storage/bdb/test/wordlist
@@ -0,0 +1,10001 @@
+cooperate
+benighted
+apologist's
+addresser
+cataract
+colonially
+atoned
+avow
+bathroom
+anaesthesia
+columnated
+bogs
+astral
+barbed
+captives
+acclaims
+adjutants
+affidavits
+baptisms
+bubbling
+classic
+allaying
+component
+battlement
+backtrack
+
+courage
+bore
+advertisement
+attests
+bunny's
+airlifts
+cajole
+cataloging
+airily
+collected
+abridged
+compel
+aftermath
+barrow
+approve
+chillier
+bequest
+attendant
+abjures
+adjudication
+banished
+asymptotes
+borrower
+caustic
+claim
+cohabitation
+corporacies
+buoy
+benchmark's
+averting
+anecdote's
+caress
+annihilate
+cajoles
+anywhere
+apparitions
+coves
+bribed
+casually
+clue's
+asserted
+architects
+abstained
+attitude
+accumulating
+coalesced
+angelic
+agnostic
+breathed
+bother
+congregating
+amatory
+caging
+countryside
+chapel
+buttonhole
+bartenders
+bridging
+bombardment
+accurately
+confirmed
+alleviated
+acquiring
+bruise
+antelope
+albums
+allusive
+corker
+cavity's
+compliment
+climb
+caterpillar
+almond
+authenticated
+balkan
+assembly's
+acidity
+abases
+bonny
+been
+abbots
+abductor's
+aerials
+cancels
+chalked
+beeps
+affirms
+contrariness
+clearest
+appropriations
+critiquing
+affluence
+bouts
+abiding
+comprises
+brunches
+biology
+conceptualization's
+assaying
+abutter
+adorable
+beatable
+appenders
+aggressors
+agrarian
+bottleneck
+angled
+beholds
+bereaved
+creation
+animated
+candied
+bar
+aeronautics
+cousin's
+cleaver
+alienation
+billet
+bungler
+contention
+businessman
+braids
+assert
+boisterous
+consolidate
+breathing
+ballot
+averted
+conscientiously
+bellow
+brazenness
+coaches
+bulldog
+classify
+checksum
+almond's
+cornered
+caskets
+capacitors
+beefer
+connoisseurs
+consisted
+adore
+circumvented
+colonels
+addenda
+boost
+compatibility's
+bumblebee
+commonest
+containment
+active
+absorption's
+creaks
+administer
+beset
+aborted
+aforesaid
+aridity
+broken
+azimuths
+aerial
+addition's
+aggrieve
+anthology
+circuitous
+checks
+alley's
+beam
+boss
+corrupting
+absolutes
+asteroid's
+bandstands
+beatitude's
+analogue's
+busts
+confession
+bedstead
+affairs
+blackmailers
+collared
+buckboard
+assassin
+accessor
+adjudging
+binders
+constituent's
+blister
+aromas
+approved
+absorbent
+barbarously
+cat's
+builder
+brandish
+assailing
+constitute
+christening
+acutely
+amount
+blurry
+blocks
+advertise
+chain
+brigade's
+confusion
+beds
+arrangers
+colonizers
+beautifying
+bankruptcy
+bedazzles
+candidates
+clearness
+admonishment's
+behind
+abbreviations
+basting
+ballasts
+amateurism
+celled
+constituted
+bonfire
+bugled
+advisee's
+battled
+budded
+burners
+causeway's
+calibrate
+brambly
+befuddles
+azure
+busiest
+admiringly
+appropriator
+accumulator
+cables
+abhor
+civil
+botulinus
+creaked
+bismuth
+astronomical
+abscissas
+bodice
+aunt
+cascades
+cares
+comradeship
+assemblages
+boater
+bellmen
+admission's
+ambitious
+baldness
+abortive
+controlled
+chinked
+coded
+courtrooms
+arteriolar
+cooler's
+cared
+brewer
+christians
+barbecues
+contacts
+blackjack's
+buzzing
+blasters
+accords
+braziers
+allegretto
+catered
+breveting
+cleaning
+amicably
+bummed
+consulted
+allegro's
+accumulator's
+compartmented
+condemned
+concludes
+bitwise
+cheered
+appropriator's
+accessors
+casting
+carolina's
+accompanying
+budding
+correspond
+bach's
+angel's
+bearing
+arresters
+biweekly
+character
+badgering
+cantankerous
+avalanching
+adjudges
+barometer
+append
+continuations
+burped
+boxtop's
+abstention
+amp
+axiomatized
+bimonthlies
+aghast
+arresting
+breakwater's
+continuing
+bridle
+bobbin's
+antagonistically
+blindly
+biochemical
+biologically
+antifundamentalist
+confer
+cloudiness
+bonded
+comfortingly
+caption
+blackmailed
+bidders
+breakpoint
+brigadier
+criminals
+coyotes
+casserole's
+annex
+cereals
+breadboxes
+belgian
+conductivity
+counterexample
+anarchist
+couches
+atavistic
+clipped
+button
+axiomatic
+capping
+correcting
+chase
+chastise
+angle
+burnished
+beauteously
+antipodes
+crippling
+crowns
+amends
+bah
+brigadiers
+alleged
+correctives
+bristles
+buzzards
+barbs
+bagel
+adaptation
+caliber
+browner
+apprehensions
+bonnet
+anachronistically
+composites
+bothered
+assurer
+arc
+chaser
+bastards
+calmed
+bunches
+apocalypse
+countably
+crowned
+contrivance
+boomerang's
+airplane's
+boarded
+consumption
+attuning
+blamed
+cooing
+annihilation
+abused
+absence
+coin
+coronaries
+applicatively
+binomial
+ablates
+banishes
+boating
+companions
+bilking
+captivate
+comment
+claimants
+admonish
+ameliorated
+bankruptcies
+author
+cheat
+chocolates
+botch
+averring
+beneath
+crudely
+creeping
+acolytes
+ass's
+cheese's
+checksum's
+chillers
+bracelet
+archenemy
+assistantship
+baroque
+butterfly
+coolie's
+anecdote
+coring
+cleansing
+accreditation
+ceaselessly
+attitudes
+bag
+belong
+assented
+aped
+constrains
+balalaikas
+consent
+carpeting
+conspiracy
+allude
+contradictory
+adverb's
+constitutive
+arterial
+admirable
+begot
+affectation
+antiquate
+attribution
+competition's
+bovine
+commodores
+alerters
+abatements
+corks
+battlements
+cave
+buoys
+credible
+bowdlerizes
+connector
+amorphously
+boredom
+bashing
+creams
+arthropods
+amalgamated
+ballets
+chafe
+autograph
+age
+aid
+colleague's
+atrocious
+carbonizing
+chutes
+barbecued
+circuits
+bandages
+corporations
+beehive
+bandwagon
+accommodated
+councillor's
+belted
+airdrop
+confrontations
+chieftain's
+canonicalization
+amyl
+abjectness
+choke
+consider
+adjuster
+crossover's
+agreeing
+consolations
+capitalizers
+binges
+annihilating
+callers
+coordinate
+banshees
+biscuits
+absorbency
+corollary
+corresponded
+aristocrat's
+banally
+cruiser
+bathtub's
+abbreviated
+balkiness
+crew
+acidulous
+air
+birdies
+canvassing
+concretion
+blackjacks
+controller's
+aquarius
+charm
+clip
+awarder
+consistently
+calibrated
+bushwhacking
+avaricious
+ceaselessness
+basically
+accolades
+adduction
+commending
+consulates
+certifiable
+admire
+bankers
+appropriateness
+bandlimits
+chill
+adds
+constable
+chirping
+cologne
+cowardice
+baklava
+amusedly
+blackberry
+crises
+bedeviling
+botching
+backbend
+attaining
+continuity
+artistry
+beginner
+cleaner's
+adores
+commemorating
+amusement
+burial
+bungalow's
+abstinence
+contractually
+advancement's
+conjecture
+buckling
+conferrer
+cherub's
+belonged
+classifications
+baseball
+carbonation
+craved
+bans
+aphid
+arbor
+ague
+acropolis
+applied
+aspired
+calibrating
+abundance
+appeased
+chanted
+ascent
+convenes
+beep
+bottles
+aborigines
+clips
+acquainting
+aiming
+creditor's
+abolitionists
+cloves
+containments
+bungling
+bunt
+anchors
+brazed
+communicator's
+brew
+accumulate
+addicting
+actively
+befog
+anachronisms
+bumblers
+closest
+calculators
+absurdity
+colleagues
+college
+assesses
+conflicted
+associational
+betide
+conceptualization
+adjutant
+alliances
+corresponding
+barometers
+cot
+brooch's
+coiled
+arboreal
+convicted
+artless
+certificates
+bourbon
+astonish
+bust
+correlate
+amounts
+anal
+abstraction's
+corns
+conqueror's
+boldly
+bob's
+beer
+blanks
+corpses
+contingent
+blackly
+backed
+appearances
+cancers
+actuating
+apprehension's
+colorings
+anglicanism
+armament
+armer
+bizarre
+begotten
+actions
+archly
+capriciously
+clue
+contractor
+contributions
+agendas
+coached
+blamable
+annoyers
+coupons
+brooked
+assortment
+axes
+celebrates
+courageously
+baroqueness
+blasphemous
+asserter
+contents
+correctly
+challenged
+bulldoze
+casement
+acknowledge
+bitterness
+belongs
+allotments
+chalice's
+bequest's
+adjacent
+consumer's
+conservatively
+coalition
+background's
+backache
+befouls
+brushfire's
+analysts
+branch
+airways
+awaiting
+breakfast
+anoints
+baying
+contrary
+bilge
+chasm's
+babes
+afresh
+centerpiece's
+barked
+coffin
+assumed
+actresses
+accentuating
+aching
+abet
+balancers
+consumptively
+cagers
+backing
+angiography
+chord's
+cheapened
+bewailed
+arson
+begged
+convergent
+bowlers
+conflicting
+confiscated
+bitch
+bloody
+brushfires
+bleach
+computation's
+choppers
+circuitously
+chancing
+bunker
+concept's
+alacrity
+boyhood
+ammo
+bobwhites
+carter
+ardent
+bier
+airway's
+brownies
+aura
+cannibalizing
+confirms
+australian
+barrage
+closures
+assertive
+abstainer
+bicarbonate
+clone
+back
+cipher
+crown
+cannibalizes
+away
+crafty
+airings
+amtrak
+comical
+burnish
+continuum
+apparition
+apologizing
+blot
+blacker
+characters
+built
+apparent
+applicative
+assiduous
+attorneys
+affectionately
+bobbing
+baggy
+comic's
+attempt
+appealers
+amortize
+bonanza
+backwards
+bowers
+anemometer
+ambulance's
+creeps
+abduction's
+coal
+chiller
+adjudications
+clogging
+ascending
+bookkeeper
+crawlers
+battery's
+artifacts
+attributions
+amusements
+aftermost
+allophones
+bemoaned
+comptroller
+bugger's
+buoyancy
+booboo
+award
+amplifying
+certify
+bivariate
+attunes
+asteroidal
+chant
+collectively
+chasteness
+chapels
+copiousness
+benign
+armies
+competing
+buss
+awakened
+breakpoint's
+conceptualizing
+cleansers
+acorns
+conveyance's
+bluer
+battle
+budges
+characteristically
+be
+contour
+beguiling
+awarding
+armhole
+airship's
+bathtub
+breathable
+crowded
+compiles
+certain
+brutalizing
+bacteria
+baronies
+abode
+blacksmith
+brinkmanship
+capitalizations
+cousin
+botany
+avionic
+companion
+consists
+connoisseur's
+avalanched
+claimant's
+backstitches
+affixes
+bikes
+atomically
+cowed
+asleep
+becomingly
+acorn's
+complainers
+appreciated
+cross
+cringed
+booting
+attitudinal
+broadcasting
+childishly
+breeze's
+craven
+boll
+clause's
+burden
+appendages
+atemporal
+allah
+carnival's
+anchorage
+adjures
+besought
+abounding
+crucifying
+arrangements
+antiquarians
+burrows
+antipode
+canvas
+constable's
+coopers
+ascended
+companionship
+bakery's
+bayonets
+conclusively
+boasters
+beneficiaries
+conspicuous
+contriver
+architecture
+breakthroughs
+brownie's
+blur
+academics
+antagonist
+contemplates
+arena
+caravan's
+administers
+comprehensively
+convey
+bigot
+blitz
+bibliography's
+coerced
+assail
+amazons
+banned
+alabaster
+concluding
+bouquet
+barks
+acquaintances
+astonishment
+constraint
+backpack's
+breakthroughes
+blocking
+accomplishers
+catastrophe
+bushels
+algae
+ailment's
+anemometers
+beginning's
+chefs
+converse
+cornerstone
+astound
+assuring
+adornment
+anyone
+alumni
+club
+bestselling
+businessmen
+constructed
+attendee's
+cooped
+ablute
+chronicler
+alaska
+clam
+canonicals
+concerned
+aligned
+creek
+burrow
+allay
+admirals
+blackens
+compressing
+confirm
+cows
+battleship's
+belched
+affixing
+chalices
+choirs
+absentee's
+baseboard's
+apportionment
+adheres
+accounts
+chef
+access
+clearings
+accompanists
+concentrating
+ado
+bathos
+bailiff
+continuance
+ball
+bearer
+congress
+cites
+can't
+balloon
+crams
+consults
+bungled
+bike's
+apes
+assassinations
+colt's
+consecrate
+ancients
+chick
+analyst
+adsorbing
+burntly
+accompanist's
+apprehensive
+bengal
+boughs
+ankles
+anchored
+benefits
+accommodation
+amiss
+brink
+chewers
+blueberry's
+chairs
+adjoin
+bivalve
+autobiography's
+automated
+comparisons
+climbed
+artists
+congruent
+cold
+atonement
+cashier
+armageddon
+allocations
+bereavements
+bumblebees
+blew
+busboys
+bottoming
+alternations
+apprenticed
+bestial
+cinder's
+consumption's
+abbey's
+amended
+continued
+birefringent
+barbados
+ability's
+compulsory
+antler
+centerpieces
+accountant's
+arrogant
+ballads
+ascenders
+appliers
+adjustment's
+blabbed
+baits
+activity's
+clod's
+adjudicating
+bleak
+commutes
+bumming
+beating
+cohesiveness
+branded
+acknowledger
+communications
+blockhouses
+booklets
+consenters
+creek's
+consulting
+binary
+coaster
+ascription
+bushwhack
+boggles
+affidavit's
+arrangement's
+congressionally
+convenient
+avoider
+abaft
+bootlegger's
+befriending
+ceases
+carbonizes
+clumps
+commented
+competence
+conversing
+butting
+astonishing
+armful
+allegory's
+crisis
+critiques
+concurred
+conservative
+aristotelian
+blizzard's
+corner
+amateur's
+compare
+affiliations
+bestseller
+batch
+cleanly
+assayed
+bravos
+bowls
+conceptualized
+babe's
+algorithm's
+baptist
+cheeks
+conquerer
+bidder's
+behaving
+briefcase's
+analogues
+amply
+attitude's
+apple
+crossable
+ambushed
+besmirches
+creditors
+bandwagons
+continentally
+adjuncts
+concerns
+agers
+cop
+amoebas
+bisected
+bombing
+appendices
+cocking
+bused
+babied
+compounds
+asserts
+believably
+alert
+apostate
+catalysts
+aureomycin
+convex
+beetle's
+banishing
+agitating
+bystanders
+bow
+connotes
+blanch
+charmingly
+animal's
+baritones
+brier
+astronomer
+company's
+balding
+actually
+aunt's
+avalanches
+acquisition
+base
+compilations
+bathtubs
+actualization
+chanced
+atom
+banged
+befuddled
+apologized
+componentwise
+britisher
+began
+conservationist
+actuate
+crosser
+appended
+bitten
+ambivalence
+acetate
+conversions
+buzzwords
+askance
+abolishing
+birdied
+creeds
+anglers
+colossal
+bereft
+chock
+apprentice
+cooper
+besmirching
+allocating
+antiques
+bikini's
+bonders
+afflictive
+augmentation
+atheist
+bucket
+bibliophile
+annexes
+beguiles
+birdbaths
+amendments
+animators
+asymptotically
+communally
+barber
+biographers
+arguable
+confidant
+apologies
+adorns
+contacting
+coarsest
+artichokes
+arraign
+absorbing
+alden
+commercially
+cabbage's
+coincides
+clumping
+cents
+alleviater
+buzzard
+braked
+anesthetized
+bugling
+capitalist
+befriended
+appreciatively
+boomtown's
+cozier
+critic's
+correspondent
+bard
+attenuator
+bake
+brings
+chews
+anechoic
+brutal
+colder
+buckshot
+canvassers
+analytic
+allies
+alloys
+awake
+alienates
+bin's
+crimes
+constructible
+classifiers
+bulb
+cream
+banquet
+axiomatize
+adjourn
+converted
+auditioned
+comfortably
+bandwidth
+cannibalize
+ascensions
+bussing
+balloons
+contenders
+commemoration
+aspersions
+consultation
+cashes
+belting
+augurs
+architectural
+bluebird's
+breastworks
+absconded
+bullets
+bloodstain's
+blunder
+astronautics
+coo
+approves
+authority
+assure
+amsterdam
+acquitted
+adversity
+celebrate
+bred
+bridged
+bloc's
+bullied
+affinity
+breezes
+baptistry's
+constitutions
+avouch
+amazingly
+consolation
+abnormality
+clashes
+buttes
+buzzard's
+breathers
+chipmunk
+contented
+carol's
+armers
+amazedly
+comprehends
+canonicalize
+breakthrough
+arbitrator
+butterfat
+cases
+besiegers
+affianced
+amelia
+bush
+airplane
+annulled
+bike
+alternated
+attackers
+convene
+aficionado
+anachronism's
+crude
+carelessness
+akin
+combated
+assisting
+clocker
+attacked
+briefed
+antic's
+attendants
+attracting
+cope
+allotting
+bandwidths
+add
+assaulting
+breakage
+climes
+arrival's
+burp
+accelerator
+capacitance
+arabians
+bankruptcy's
+archeological
+coins
+browbeating
+chasm
+cardinalities
+compartmentalize
+courter
+assess
+abreaction
+brakes
+compatibly
+compression
+characterizable
+briefing's
+alto's
+classifiable
+contrast
+correlation
+colonial
+applying
+authorizers
+contesters
+basely
+cherries
+clicking
+cornfield's
+alarmingly
+conferences
+business's
+banker
+bloomed
+airfield
+attracts
+building
+commutative
+atomization
+competitions
+boatsmen
+acquirable
+arkansas
+command
+beings
+compactors
+anodize
+arguments
+conforming
+adsorption
+accustomed
+blends
+bowstring's
+blackout
+appender
+buggy
+bricklaying
+chart
+calmer
+cage
+attractive
+causation's
+athenian
+advise
+cranks
+containers
+besotter
+beret
+attender
+cone
+bills
+aligns
+brushlike
+brownest
+bosom's
+berth
+accountably
+bequeathed
+affirmatively
+boundless
+alleyways
+commute
+bendable
+abhors
+calculation
+affidavit
+answerable
+bellicose
+counterfeiting
+admiral's
+chisel
+bridesmaids
+believers
+aggregated
+conspicuously
+abased
+armenian
+conspirator
+canonical
+assignable
+barrage's
+clearance's
+casts
+administratively
+befoul
+chaffer
+amazer
+colorer
+broaching
+crevice
+aniline
+coursing
+compassionate
+adhesive
+bibliographies
+corrects
+augments
+between
+causer
+amorist
+cellist's
+acoustical
+baseless
+cigarettes
+astuteness
+appropriators
+convincing
+bellhop's
+bemoaning
+calmingly
+chronologically
+castles
+algebraically
+appointees
+academic
+blunderings
+assassins
+barrel
+accuracy
+amortized
+ballpark
+acrobat's
+brazier's
+abortively
+coarser
+airfields
+contester
+circus's
+creased
+amorphous
+accomplisher
+blabs
+butchers
+crackles
+bachelor
+aviators
+chariot's
+circumflex
+binocular
+alienating
+artificially
+agreement's
+aglow
+afghan
+abrupt
+annihilates
+apologetic
+barge
+betters
+algorithms
+conjurer
+chargeable
+brindle
+alphabetizes
+coder
+availing
+bandpass
+arrogance
+convent's
+advertiser
+connected
+basso
+breakfaster
+comic
+congenial
+beau
+courters
+adapters
+abruptly
+chemicals
+bringed
+creaming
+butterer
+attained
+actuals
+averred
+brainwash
+centerpiece
+blabbermouth
+byproduct's
+adaptable
+automata
+art
+cheery
+beheld
+beehive's
+claimed
+crucial
+brokenness
+agility
+combating
+cleft
+amenity
+after
+configuration
+contrasting
+coarsely
+brass
+barnstormed
+bowel
+bridesmaid's
+cornfield
+crazing
+autocracies
+adult
+conceptualizations
+corroboration
+bedders
+arroyo
+alarmist
+boatman
+chests
+burglary
+budgets
+canary's
+arraigning
+chin
+barnstorms
+blamers
+brimful
+calculate
+cellular
+contended
+challenges
+brusque
+bikinis
+arithmetics
+chairpersons
+class
+aircraft
+capably
+centralize
+awhile
+compacting
+courteous
+archaeologist's
+cram
+adagio
+affronts
+amplitude's
+bureau's
+audaciously
+autism
+blueberries
+an
+chips
+confiner
+chopper's
+chronology
+breaching
+bead
+amass
+camouflage
+compensation
+aspect
+broker
+atrophy
+balk
+bloodless
+barnyard
+benefactor's
+airdrops
+caused
+anthem
+activist's
+bottomless
+arrogates
+avoided
+bouncy
+clarified
+articulate
+almoner
+communists
+blokes
+butternut
+clockings
+barium
+blows
+criticism's
+associations
+brute
+bleeds
+alliteration's
+bluestocking
+boxwood
+clearer
+allegiance
+conceptualizes
+captivating
+bolshevik's
+belabored
+biographic
+contaminates
+chanticleer's
+adjusted
+childhood
+arguing
+cape
+conversantly
+compensating
+collaborations
+arraignment's
+blasted
+charging
+aggregation
+apprentices
+bird
+codifiers
+ballistic
+breve
+bells
+carolina
+chalk
+buckles
+boyfriend's
+adorn
+accoutrements
+availability
+antisymmetry
+blades
+alluded
+asterisks
+bookcases
+additive
+consents
+advanced
+balalaika
+coders
+caliph
+alundum
+are
+controllable
+blazing
+clattered
+asiatic
+axiomatizes
+ace
+coining
+column
+auditor's
+carol
+concatenated
+arrayed
+capital
+cautioner
+clan
+beauteous
+abbreviate
+asteroids
+canal's
+consolidation
+closets
+concealer
+crevices
+abed
+complex
+conviction's
+abide
+arrests
+begrudges
+adolescent
+conceals
+cells
+circles
+bravest
+compromiser
+bagels
+areas
+afore
+allergies
+arrangement
+attraction's
+amulets
+abstraction
+captured
+crouched
+brothers
+cash
+achieving
+bastard
+compete
+boiling
+beaching
+amphetamines
+clerking
+congestion
+alleviates
+angry
+bared
+comprehended
+bloodstain
+constituency's
+automating
+aerial's
+counterfeit
+besotted
+basses
+biofeedback
+compilation's
+band
+consulate
+appellant
+cough
+antennae
+contend
+anniversary
+boor
+artifactually
+aerobics
+booths
+chubbiest
+consumable
+assignments
+bromide's
+confined
+breakers
+alongside
+courtier
+boisterously
+bilaterally
+alternation
+auspiciously
+arbitrated
+condemning
+burns
+correspondents
+composition
+cavalierly
+coverlets
+capacities
+clatter
+apotheoses
+cartography
+ceased
+capitalized
+auditor
+appendicitis
+chops
+barony
+anemometry
+befouled
+briefer
+chest
+begetting
+bloats
+bookseller's
+commitment
+confides
+carcass's
+battering
+altruistically
+ballots
+adornments
+broaden
+angularly
+coefficient
+cataloged
+brae
+advantage
+anthems
+calculated
+counseling
+agitate
+accentuated
+camel
+ambivalent
+bedposts
+beacons
+chubbier
+cheerer
+assumes
+concord
+autumns
+convention's
+alpha
+adulterates
+arbiters
+archaically
+criteria
+achilles
+cheaper
+bulling
+associators
+bloater
+brawler
+ability
+adherents
+commonwealth
+coyote's
+centrally
+bequeathing
+abandonment
+circumstantially
+courteously
+borrow
+countermeasure's
+capricious
+allied
+anagram's
+absorptive
+assuage
+asset
+booked
+aspects
+commits
+crates
+capacitive
+condones
+assimilates
+carriage
+competitor's
+cocoons
+aggravated
+caravans
+arbitrator's
+baked
+balanced
+annihilated
+addressable
+autonomous
+bandwagon's
+contesting
+burrowing
+coroutines
+abjection
+correctable
+applauded
+bragged
+code
+aggressiveness
+cluttered
+attacking
+chide
+am
+coasters
+blizzard
+contentment
+altruism
+certifier
+capturing
+combinators
+carefree
+activate
+blindfolding
+assassinating
+approximate
+biplane's
+aplenty
+arteriosclerosis
+concentrates
+antisymmetric
+assurances
+anarchist's
+ascend
+advancing
+atrocities
+butt's
+bearable
+craftiness
+categorized
+barn
+contributor's
+arises
+bushy
+bisque
+coasted
+bargaining
+area's
+couples
+cabs
+barter
+bulletin
+chisels
+broadcasters
+contingency
+bywords
+antimicrobial
+coexisted
+blinding
+arithmetize
+coweringly
+convince
+competed
+bauble's
+crab
+boggling
+advocacy
+atlas
+assembled
+ancient
+bloodstream
+balking
+bin
+bully
+affirm
+cruelest
+atone
+conserved
+confession's
+bat
+captive
+aster
+blames
+colonel's
+bones
+borderline
+cleanses
+classified
+crudest
+contiguity
+bailing
+ablaze
+bender
+attendee
+clobbers
+aliasing
+autopilot
+coolers
+cache
+allayed
+barnyards
+britons
+appointment
+adaptor
+blockers
+abridges
+bloodiest
+betrothal
+bombards
+bony
+bus
+canary
+antinomy
+awash
+comrades
+ablating
+collectible
+boats
+brand
+church
+bandy
+adhering
+barred
+ammunition
+chime
+accompaniment's
+battleground's
+composing
+caveats
+armor
+amoeba
+composure
+collides
+avowed
+banding
+counsels
+asymmetric
+abbreviates
+balky
+adjudicates
+anointing
+accursed
+copse
+action
+construction's
+accents
+ambition's
+caressing
+cosmetic
+accession
+clutters
+censures
+allusions
+belittled
+armchair
+abode's
+conception's
+ascribe
+aliases
+ancestry
+ax
+companionable
+aright
+boxed
+brighteners
+alloy's
+checkable
+arraignments
+bed
+bunkhouses
+abbeys
+ceasing
+companies
+cherishing
+chunk's
+barony's
+chinning
+burdens
+briskness
+beggarly
+beloved
+clambered
+constitutionality
+beguiled
+archers
+alleyway
+apostle's
+consulate's
+antiformant
+categories
+construct
+aliments
+acquired
+blotted
+alterations
+adolescent's
+cranes
+bluntest
+accusation
+chafer
+airstrips
+abolished
+bothersome
+churchly
+airy
+bedded
+awareness
+alliterative
+arose
+amputates
+civilization's
+arenas
+certifying
+aspirators
+carbon's
+bunching
+aerates
+bilked
+checking
+cloned
+administrations
+canvasses
+colorless
+chamber
+circumspectly
+benedictine
+advisedly
+classifier
+approachable
+banners
+concurrently
+chores
+agape
+convention
+bindings
+budget
+comedies
+ants
+ambassadors
+chroniclers
+carrots
+colorful
+bulkhead's
+coherence
+buyer
+aggressions
+congressional
+commoners
+cheapen
+concealed
+columnates
+anarchy
+actress's
+baseboards
+creature's
+centuries
+barbarian
+concrete
+bicycles
+acceptably
+acclimating
+biceps
+bloodhound's
+becalmed
+apostle
+bible
+conjunctive
+comb
+ballers
+bickering
+adulterous
+austrian
+applicable
+blackberries
+creasing
+catalogs
+avert
+asparagus
+cambridge
+bird's
+belgians
+admonished
+admirations
+conscientious
+crescent's
+connectives
+blissful
+commenting
+bagged
+assimilate
+abounded
+copyright's
+advancement
+axiom's
+compilation
+circumlocution's
+catheter
+chances
+concretely
+codification
+browned
+clustering
+bum's
+clauses
+boundlessness
+arteriole's
+alfresco
+begrudged
+blustered
+anglican
+adjoined
+bamboo
+bathed
+consortium
+carrot's
+cloak
+album
+bunglers
+approbate
+colored
+aim
+cowboy
+alienate
+cleverest
+ambiguous
+confrontation's
+clear
+africa
+bowline's
+astronauts
+belayed
+censorship
+animation
+bedrooms
+chasms
+compared
+cogitated
+barbarians
+accomplices
+columnizes
+beaming
+busied
+counterpointing
+aluminum
+coconut's
+acclamation
+chokers
+biomedicine
+basalt
+buckwheat
+cardinality's
+bafflers
+arid
+chap's
+abound
+biblical
+backbone
+anticipation
+condemner
+angular
+advisability
+believing
+boiler
+arclike
+abetter
+bespeaks
+axiomatically
+coarse
+auditions
+bludgeoning
+clam's
+chief
+arrow
+cementing
+anxiety
+aberrations
+brushes
+cherub
+corollary's
+bunters
+beefers
+barbiturate
+circumlocution
+conjoined
+charities
+coverage
+campaigner
+burrowed
+barracks
+bristling
+accomplice
+abandoned
+bull
+caked
+century's
+bantu
+bristled
+airer
+bench
+bevy
+chamberlain's
+attention
+cloning
+camouflaging
+alder
+counter
+credibly
+approvingly
+breakup
+artillery
+celestially
+bail
+baker
+bullish
+canvass
+conversationally
+bringers
+augment
+creditably
+butterers
+botswana
+contemptible
+bribing
+adumbrate
+barb
+calico
+alludes
+amplified
+chills
+cloak's
+aver
+arthropod's
+budgeter
+bereavement
+cellars
+crewing
+blackmailer
+ayes
+bedsteads
+breachers
+bazaar
+centered
+celebrity
+blameless
+abscissa
+aerators
+awaited
+british
+adversary
+cowslip
+buttons
+confusing
+buggy's
+belts
+canceled
+addresses
+bribes
+condoning
+bonneted
+coarsen
+amazement
+angels
+chemise
+carbonates
+apostolic
+bandit's
+contending
+consummate
+counterclockwise
+beneficence
+benefitted
+contradicts
+comfortabilities
+anemone
+conductive
+articles
+bookcase
+burst
+baptizes
+countless
+costs
+agonizes
+byte
+creeper
+begs
+bunnies
+attract
+able
+calories
+baskets
+american
+brunt
+cognition
+closing
+chef's
+backbone's
+complicates
+cloister
+bedsprings
+arrays
+brigs
+archbishop
+buckler
+clove
+catholic's
+bellboys
+chairmen
+clap
+clarifications
+ambuscade
+bight
+bellyfull
+allowance's
+academy's
+acquiescence
+ambush
+catches
+at
+billion
+contact
+bees
+adopters
+approximately
+chiseled
+attributively
+criers
+codification's
+cowslips
+contradictions
+buttock's
+categorically
+counterpart's
+confessor
+appreciably
+adjusts
+altitude
+construe
+cancer
+bay
+aristocratic
+alleviaters
+binoculars
+axiomatizing
+changer
+bustle
+civic
+bostonians
+crops
+authorizations
+cogitation
+baptize
+caressed
+abase
+ariser
+axiomatization
+aggravates
+confiscation
+bowdlerize
+backspaced
+alters
+clarity
+blots
+bland
+belligerent's
+burgher
+cardinally
+bookcase's
+buggers
+byte's
+avarice
+crowding
+beriberi
+allegories
+coronets
+cell
+calculative
+adduce
+amperes
+bladders
+adages
+contests
+cognizant
+actuates
+ambiguity
+brighten
+concert
+conviction
+booty
+ashtray
+braves
+blouses
+avoiders
+confederate
+bombings
+couplings
+convictions
+attractiveness
+chronicled
+corers
+anger
+covertly
+aural
+asynchrony
+arrowheads
+breakdown's
+bulletins
+ceremonialness
+clipper
+bracelets
+anthropomorphically
+benedict
+connecting
+bacterium
+achievers
+abutter's
+autocorrelate
+coupling
+blanketer
+continental
+assignment
+conundrum
+arab
+besides
+cheerful
+blowup
+bastion
+arrive
+combines
+agar
+cookie
+astronaut's
+constraint's
+article's
+confiscations
+bounded
+adjudicate
+belligerently
+boron
+brownness
+adept
+creep
+abduction
+accosting
+asylum
+autographed
+clash
+chiseler
+clumsily
+capitally
+braking
+absenting
+bagatelle's
+comet
+basked
+anything
+buffeted
+absentia
+bounty
+carols
+characteristic's
+constructive
+comforting
+aflame
+brainwashed
+booby
+aspirations
+adjudge
+behaviorism
+computability
+assessment
+consultations
+bowstring
+acknowledgment
+arranger
+chancellor
+attest
+compresses
+concessions
+asymmetrically
+administering
+clamoring
+arraigned
+archived
+admonition
+actor's
+aimers
+colorers
+booklet
+calibers
+affix
+bushel's
+atomizes
+creeks
+bleedings
+casuals
+archives
+certainly
+animate
+cons
+affiliate
+answered
+coyote
+coughed
+alligator's
+antagonized
+arousal
+assisted
+aerated
+competently
+conquering
+acclaimed
+assign
+announcer
+controllers
+amalgamation
+comfort
+antihistorical
+availed
+balsa
+annoyed
+basted
+asymptomatically
+cropped
+combinational
+barging
+conversant
+causality
+botches
+bedspread
+considerately
+bookstores
+climate
+blessing
+accordion's
+cdr
+bonanza's
+construing
+bearings
+bluster
+backspaces
+babyish
+countermeasure
+crime
+battered
+audit
+associating
+corps
+application
+archangel's
+aided
+breasted
+compelled
+acrobats
+breakfasts
+chronologies
+beet's
+averts
+convergence
+attributable
+adverbial
+churns
+arrest
+breastwork
+beefs
+brownie
+create
+contradistinctions
+coordinators
+abandoning
+byline
+beatitude
+autosuggestibility
+bipartite
+annals
+assents
+conceives
+amalgams
+cleft's
+clicked
+appointers
+bible's
+boots
+caret
+attaches
+controversy's
+combinatorial
+bazaars
+cardinals
+bored
+catering
+christian's
+ashman
+consequence's
+austere
+clay
+birthday's
+amongst
+arbitrariness
+brainstorms
+chateaus
+coaxer
+applause
+cautiousness
+adorned
+compromises
+creatures
+compliance
+apartheid
+archiving
+amoeba's
+communal
+comedian's
+aggressive
+crop
+ante
+better
+chalice
+aristocrats
+circling
+belittle
+abortion's
+coldly
+certification
+befriends
+courthouse
+anesthesia
+accorder
+athletic
+blithe
+bedder
+abasements
+councils
+beware
+abductor
+assonant
+clench
+aspersion
+abortion
+abating
+birches
+breakpoints
+acyclic
+ablate
+canners
+cistern
+boxtop
+composite
+cloudless
+computation
+chastely
+abusing
+bunker's
+compounding
+alveolar
+chaplains
+bias
+audiological
+capability's
+bangle
+barren
+antidote's
+cranking
+baptizing
+bond
+borders
+automobile's
+allegoric
+chargers
+baltic
+autumn
+columns
+absolute
+connoisseur
+cranberry
+contiguous
+consoled
+confirmations
+argot
+blouse
+annotated
+callous
+astounded
+crashed
+autonavigators
+chivalry
+columnating
+beefed
+convincer
+allegorical
+bagger
+assume
+containable
+artistically
+calibration
+architectonic
+campaigns
+addressability
+crazier
+buy
+brightener
+bastion's
+blurb
+awaits
+commands
+chocolate
+bleaching
+antenna
+blowers
+chorused
+composers
+assigners
+aspires
+coils
+bid
+application's
+clamped
+bedding
+awkwardly
+coppers
+costumes
+borax
+caged
+candler
+badges
+clutches
+consign
+apprised
+buys
+adiabatically
+aggregately
+canned
+abstract
+acrimony
+coax
+analytically
+absurd
+alluring
+contradicted
+aspersion's
+bribe
+boos
+chattererz
+backache's
+complying
+continent
+cohabitate
+causation
+astronomer's
+cities
+bookie
+bleating
+cracking
+bicameral
+convoluted
+adjustable
+ambulance
+can
+boulders
+consideration
+announces
+briars
+antipode's
+bartered
+ancestor
+biplanes
+characterize
+crested
+bum
+bridling
+consolable
+bungles
+coffee
+buffets
+congratulation
+commitment's
+adequately
+clown
+capacitor's
+broomsticks
+agglutinate
+activations
+asians
+canon's
+authenticity
+complexities
+cripple
+bracket
+counselor's
+beatably
+bounced
+baton's
+crankiest
+barbell's
+caster
+casseroles
+ballad's
+bob
+batched
+attenuated
+beakers
+biologist
+bleary
+condescend
+blondes
+augustness
+boldface
+battlefronts
+acumen
+bolting
+articulatory
+butyrate
+bowel's
+backwater's
+colonel
+creating
+authorized
+bijection
+accruing
+admirably
+correctness
+citadels
+clasps
+bandlimit
+bib
+appalachia
+contrives
+bundle
+audiology
+circumventing
+blinker
+choked
+bilks
+clears
+affirmations
+arbitrating
+bites
+bootstraps
+capitals
+commuters
+billeted
+authentication
+choice
+attentively
+aggressor
+arterioles
+crowds
+chestnut
+backstitched
+attachments
+assimilating
+bewilderment
+atrophied
+chintz
+blackjack
+armadillos
+bonfire's
+ballast
+agonies
+busier
+coefficient's
+adventurous
+ballet's
+coil
+chewed
+come
+bonder
+catalogue
+coursed
+arise
+biennium
+ceremony's
+blanching
+appraisers
+acolyte
+argues
+beholden
+appanage
+astatine
+banana's
+coons
+civilians
+bodyguard
+archipelago
+bug's
+candles
+antique's
+accidently
+blighted
+belgium
+besieged
+burned
+abuse
+asian
+chute
+awkwardness
+abasing
+bottler
+ardently
+blab
+breakwater
+cavity
+cheated
+befall
+according
+chronicle
+airframes
+bats
+choring
+authorize
+consumed
+chatter
+annunciated
+capers
+anomalous
+clustered
+burner
+acquaintance's
+badger's
+basic
+affectations
+buzzy
+coast
+attendances
+activating
+beams
+cohesive
+attainable
+barbecueing
+beautiful
+acronyms
+communion
+client
+atypical
+antagonists
+conservations
+arguers
+agglomerate
+antigen
+battalion
+ambition
+countered
+assistant
+classed
+arming
+alveoli
+buff's
+backplanes
+busted
+bermuda
+converting
+brutish
+boot
+acidities
+confrontation
+chapel's
+berlin
+ascender
+behead
+buddy's
+commandment
+actuated
+brilliancy
+chance
+bedrock's
+bridgeheads
+arable
+avid
+arteries
+caresser
+ballyhoo
+attested
+african
+comradely
+consciences
+commencing
+antennas
+annulments
+bobolink's
+advisee
+acceptance
+crack
+ascendent
+appendage's
+accommodates
+accumulated
+clones
+apocryphal
+ages
+cluster
+capitols
+camper
+beading
+amble
+buffeting
+circumspect
+advances
+analyzes
+courier's
+aperiodic
+appealer
+atonally
+attentive
+conspire
+appropriating
+armed
+allergic
+agglomeration
+consternation
+blinks
+audibly
+aspirins
+bunions
+adverbs
+armload
+bet's
+caring
+carryover
+coordinator's
+afterthoughts
+allays
+abided
+brownish
+baiting
+capitalism
+coined
+conspirators
+automatic
+contradistinction
+conductor's
+backstitching
+conjure
+casings
+accountant
+clinched
+constrain
+alcohol
+bee
+anticompetitive
+britain
+bade
+camera's
+antimony
+activated
+burglarizes
+compatible
+cotyledon's
+artificiality
+bath
+citadel
+archivist
+chandelier
+addiction
+ampersand
+bitterer
+constructively
+afield
+bing
+attractor's
+cringe
+allergy's
+bigots
+assimilation
+ate
+capitalization
+abridge
+buzzword
+befit
+bandlimited
+commandant
+alabama
+acculturated
+brightening
+bulldozing
+cooky
+bunks
+centers
+bespectacled
+adherent's
+abducts
+another's
+condensation
+billeting
+bye
+chess
+craziest
+ballgown's
+archaism
+consorted
+chinned
+cowl
+beat
+bootlegger
+bravado
+classically
+bulging
+browbeat
+accommodate
+borne
+bronzed
+artifice
+arcade
+become
+backlog
+addressers
+amphitheaters
+befogging
+crochet
+aiding
+celebrated
+conversational
+backbends
+authentications
+advertisement's
+blockade
+bulldozes
+contraction's
+bricklayer's
+brain
+conveying
+anemia
+chronology's
+channeling
+caution
+commanding
+crosses
+artisan
+conditions
+admired
+authenticator
+airships
+blunter
+bridesmaid
+counseled
+cheeriness
+chiefs
+boils
+clerical
+atrocity's
+balls
+ambled
+canvases
+consoles
+abscessed
+abetting
+blitzkrieg
+bottlers
+beveled
+condemn
+alumna
+cords
+admittance
+annotates
+citing
+corrector
+appreciative
+branching
+betrays
+buttoned
+ailment
+boulevards
+bottlenecks
+chamberlains
+bedbug
+covenant's
+crispness
+considering
+broadcasts
+audubon
+arousing
+correction
+barrack
+closure
+contrastingly
+brittleness
+assassin's
+bursa
+bungalows
+balked
+conceptual
+carcasses
+arabia
+blueprint's
+affectingly
+consorting
+buses
+auger
+appointed
+brute's
+bosoms
+anyway
+arrowed
+anaphorically
+clarify
+approachability
+assistance
+buzzes
+commonplace
+bluebonnet's
+adroitness
+availers
+aquifers
+architecture's
+action's
+backgrounds
+abduct
+attired
+briber
+admissibility
+cease
+beck
+auctioneers
+birdbath's
+atomic
+crossing
+considerate
+biconvex
+bulge
+bedridden
+arising
+aggression's
+cherish
+bureaucratic
+abater
+amputating
+atop
+climber
+clutched
+afford
+bisections
+bonnets
+commendations
+bloke
+abundant
+clamp
+aloes
+aboard
+atheistic
+advantageously
+buffs
+chimney's
+cheerily
+benefactor
+ample
+bushwhacked
+captain
+buckskins
+contextually
+antiquarian's
+browns
+bubble
+ban's
+brine
+acculturates
+anhydrously
+beaver's
+advantaged
+bibliographic
+clasping
+clattering
+coerce
+colorado
+airmen
+bandlimiting
+balks
+boners
+attached
+chosen
+convened
+bordello
+composer
+botanist
+backtracks
+civilization
+commutativity
+bloodshed
+cohere
+bunkhouse
+archdiocese
+boycotted
+crosswords
+bedspread's
+anteaters
+cove
+apothecary
+chute's
+addressee
+climatically
+blower
+bane
+cask's
+beetling
+ambiguities
+before
+abstain
+arachnids
+bucket's
+amateurs
+blackouts
+adverb
+butchery
+conjunction's
+barricade
+audiologists
+aphorism
+complete
+butts
+bishops
+allotment's
+confusingly
+channeller's
+blanches
+bragging
+bathe
+comedians
+celestial
+citizens
+couple
+backpack
+aphasic
+brothels
+axles
+cancellations
+bonus's
+consolidates
+authoritative
+axle's
+acclimatization
+carolinas
+chime's
+antibiotic
+bisons
+biographically
+achieve
+bleachers
+bicentennial
+behavioral
+accomplish
+concealment
+biddies
+antitoxins
+arriving
+apprehend
+affluent
+cliffs
+bleached
+astronomers
+connection
+bride
+backs
+bog's
+casket's
+continual
+ampere
+cat
+alternator
+cotton
+athletes
+communicant's
+best
+befuddling
+benefactors
+appease
+annoyingly
+context
+astonished
+cracked
+amnesty
+autumn's
+binder
+babying
+contributory
+assumption
+cowls
+cocks
+airless
+consummated
+atypically
+beneficially
+chairing
+accusative
+commanded
+bufferrer's
+alerter
+arbiter
+civilly
+charms
+backscattering
+cheater
+bushes
+caverns
+chieftain
+calf
+comparing
+aurora
+butyl
+cower
+bemoans
+baptistry
+carpenter's
+capes
+bordered
+arrows
+blocker
+crest
+appeal
+arabic
+conventions
+axis
+brains
+bookkeeper's
+circle
+cooks
+circumlocutions
+adventists
+barringer
+affording
+anatomically
+basements
+barbarities
+configuration's
+contributes
+collaborating
+beach
+comet's
+bakes
+assigns
+ballerina
+cheapens
+clinging
+conquered
+bisecting
+closenesses
+bugle
+boatmen
+beatings
+complicator
+bight's
+banister's
+archaic
+anthropologists
+clams
+beginners
+committee's
+communicants
+alone
+bounteously
+bastes
+ascertain
+alphabetical
+bringing
+batters
+amazon's
+constituent
+benders
+being
+constitutionally
+audiometric
+blast
+copings
+bailiffs
+colts
+coolies
+airlift's
+boomerang
+bifocal
+clothes
+cashiers
+congenially
+billows
+boilerplate
+biochemistry
+betting
+brimmed
+complementers
+breading
+bragger
+adducting
+bisectors
+abrogates
+criticized
+comrade
+bucolic
+birthright
+blurs
+challenger
+complicated
+bluebonnet
+biscuit's
+classmates
+campus's
+boundary
+bedbug's
+adjustor's
+acre
+bicycling
+awe
+additions
+baiter
+authorizes
+beautify
+copier
+buffet
+belfries
+acquisitions
+brooch
+crickets
+caterpillars
+beefsteak
+complicating
+bedpost
+criminal
+celebrity's
+bookseller
+christened
+coerces
+clamors
+all
+boatyard's
+canoe's
+begin
+anaerobic
+bushing
+agreers
+concedes
+countermeasures
+beg
+agglutinin
+bunted
+ammonium
+aspiration's
+bathrobes
+changeable
+beached
+bestowal
+beaner
+catsup
+admires
+clockwise
+agile
+alarms
+ached
+chinks
+buffer's
+cartesian
+annunciate
+chanticleer
+avenue
+anchor
+alliterations
+blanking
+bargained
+breathtaking
+crime's
+assiduity
+argentina
+contiguously
+aqua
+bested
+borderlands
+appetite
+captive's
+bipolar
+conceal
+counters
+costumed
+arrestingly
+bunting
+blight
+champagne
+brusquely
+address
+bloodhounds
+associative
+creed
+arithmetical
+balustrade's
+belabors
+complementing
+checkout
+archivers
+badlands
+behaviors
+ampoules
+bridgehead's
+antiquarian
+clumsiness
+considerable
+apportions
+anglicans
+appealingly
+barfly's
+absorptions
+awards
+congregates
+cloister's
+armour
+avoid
+correctively
+chucks
+burps
+bums
+berry
+batches
+administration
+atones
+bishop's
+blonde's
+casualty's
+cores
+bodied
+alter
+assonance
+apprise
+antitoxin
+avariciously
+checkpoint's
+affirmative
+conjures
+angstrom
+aesthetically
+canyon
+binge
+crazed
+breastwork's
+aids
+boston
+conceits
+announcement's
+beechen
+accessory
+authorities
+constrained
+automation
+anaplasmosis
+commander
+commendation's
+belabor
+cornfields
+artemis
+asphalt
+contracted
+brochure
+crafted
+allegedly
+alien's
+auditory
+blowfish
+adducible
+confederations
+annuals
+britches
+acquaintance
+appallingly
+abounds
+burglarproof
+crossers
+bayous
+brisk
+authority's
+covetousness
+averse
+accomplished
+aromatic
+admiral
+bijective
+avenging
+bran
+boatyards
+beseeching
+challenging
+bares
+acts
+abductions
+compendium
+compulsion's
+calendar's
+clad
+blockage
+conventional
+craze
+cajoling
+acceptability
+bungalow
+buff
+cramps
+attackable
+calculator's
+asp
+braved
+colors
+balling
+contaminate
+crackling
+comes
+complimenters
+across
+astronomy
+aborigine
+bobwhite's
+autopilot's
+chattered
+appall
+autonavigator
+bashed
+acoustics
+beachhead's
+apartments
+convenience
+blackout's
+bands
+autonomously
+amounters
+centripetal
+achievable
+astringency
+attuned
+concatenating
+copyright
+coding
+assumption's
+anastomoses
+confiscate
+asking
+beneficial
+adhesions
+busboy
+bronzes
+audacity
+bruises
+crash
+beau's
+circuit's
+aborts
+baubles
+beliefs
+assuaged
+costed
+blinking
+characterized
+bowled
+block
+conquests
+confesses
+amusers
+ceiling
+berets
+berliner
+abstentions
+child
+authoritatively
+closeness
+bushel
+considered
+communicates
+cheerlessly
+autofluorescence
+aquarium
+affects
+appurtenances
+airbag
+approaches
+admonishments
+bets
+bounden
+courtly
+bodybuilder's
+campus
+brainstorm
+americans
+chairperson's
+botanical
+askew
+amazon
+bleed
+clime's
+cooperations
+commonness
+boatloads
+blinked
+courtyard
+adapted
+aforethought
+backwater
+burr
+cathode
+awaking
+buzzed
+bridgeable
+arrives
+adventuring
+beseech
+attrition
+copied
+colon
+client's
+bandstand's
+advice
+baptistries
+antithetical
+alcohol's
+contradicting
+ambidextrous
+belches
+category
+bluntness
+coupon's
+assimilations
+comfortable
+caller
+affliction's
+attends
+compactest
+baler
+beacon
+blind
+bleakness
+beseeches
+courts
+couch
+consequential
+adulterers
+craving
+biggest
+astray
+bigoted
+barfly
+charges
+ambiguity's
+commentary
+crankily
+cowerer
+carnival
+bachelor's
+bituminous
+continuance's
+calamities
+claws
+apiece
+century
+ascendancy
+charts
+animations
+aggression
+chickadee's
+carve
+confidence
+actor
+bubbled
+becalming
+convulsion
+chivalrous
+brightest
+centralized
+beautifies
+amateurishness
+birthrights
+alligator
+circumstantial
+constructors
+conceptions
+arranging
+cart
+cent
+ager
+congruence
+carrot
+chariots
+cloudier
+captivity
+conquerers
+compartmentalizes
+condensing
+celebrities
+chalks
+accordance
+chilled
+conversations
+apples
+conceiving
+average
+blessed
+creator
+ant
+cling
+annoyer
+aviation
+cohesively
+correspondences
+boor's
+apprehended
+bessel
+both
+characterizes
+bards
+cots
+acculturating
+cemeteries
+carting
+alcohols
+bitterest
+ascetic's
+conducts
+caking
+airspace
+autocrats
+ashes
+chimes
+broadcaster
+commuter
+basket
+borderland's
+broadened
+boyish
+allegretto's
+ban
+bidder
+christen
+blessings
+bury
+arranged
+choir's
+apathetic
+boring
+aryan
+appearing
+binds
+cooperates
+bounces
+airspeed
+complicators
+adapting
+babbled
+agglomerates
+bedraggled
+addictions
+bolt
+calmly
+blur's
+boatload's
+anesthetic
+bugs
+colt
+completing
+boxer
+billers
+affronting
+absurdity's
+chides
+comparatively
+braided
+clipper's
+cot's
+calves
+articulations
+branchings
+attraction
+concatenates
+alligators
+cake
+boom
+crashing
+afar
+abler
+beamed
+adverse
+adrenaline
+agriculture
+beehives
+crankier
+courthouses
+advises
+consigns
+bisect
+azimuth's
+carpets
+arthropod
+brewery's
+commonalities
+altruist
+astride
+appreciate
+carved
+briefs
+admitter
+celery
+congregate
+clocking
+assassinated
+adding
+canvasser
+civics
+contemptuously
+calculates
+advisees
+bumbling
+algorithmically
+cloudy
+algebras
+addiction's
+cop's
+assurers
+confidently
+affector
+analyzers
+chimneys
+burdening
+antitrust
+admix
+avoidance
+choking
+coexists
+accustoms
+cellar
+anchovy
+constructor's
+confinements
+consequently
+accelerations
+accoutrement
+churchman
+biller
+affected
+brigades
+cremating
+corridor's
+bagging
+ah
+berating
+collective
+acuteness
+arrestors
+cab's
+border
+agitation
+animism
+arches
+alveolus
+cessation's
+averrer
+abash
+counterrevolution
+attesting
+animateness
+bawdy
+americana
+bloodstained
+applicator
+annotating
+annunciator
+clamored
+acting
+aerosols
+axiomatization's
+brags
+coalesces
+avocation
+combining
+crazily
+bravery
+burying
+adored
+airfield's
+accounting
+broadeners
+anise
+chimney
+added
+avenges
+bellicosity
+cranberries
+arsenic
+communities
+comparable
+bunkered
+architect
+alphabetically
+beautified
+apogees
+communist
+anatomical
+complexity
+accost
+autographing
+browsing
+ameliorate
+bookers
+bandaging
+clinical
+appellants
+counteract
+clairvoyantly
+bootstrap's
+canner
+boastful
+attainer
+ash
+beaded
+brake
+barest
+befriend
+burglarproofing
+allegorically
+bunts
+believes
+accession's
+buck
+boathouse's
+byword's
+anthracite
+accuse
+conjunction
+burping
+commandant's
+creativity
+affirming
+bark
+amuses
+balcony's
+auditors
+counsel
+clamber
+borates
+cowboy's
+bickered
+boors
+combing
+biting
+breeze
+crowder
+corn
+bloke's
+bombast
+bookstore
+blared
+bedlam
+carbohydrate
+coops
+bundles
+blistering
+antarctic
+anterior
+bilinear
+chocolate's
+context's
+alternating
+annoyance
+constancy
+ambivalently
+buddy
+brutalize
+bobbin
+alleles
+commotion
+attributes
+airborne
+creed's
+bolstering
+coaxed
+airframe
+breaker
+accept
+abashes
+attentional
+contributor
+comparability
+auscultating
+cocked
+computationally
+buffered
+career's
+analyzable
+absently
+courtyard's
+buildups
+apportioned
+balkanized
+annulling
+cremation
+buffetings
+conditional
+confided
+airliner
+bulldozer
+approaching
+anagram
+apollonian
+canaries
+bloat
+bluebird
+collision
+cool
+connectedness
+abasement
+artisan's
+avoidably
+clerks
+afflict
+briton
+corroborates
+cameras
+counted
+boldest
+burglars
+brutes
+brows
+abhorrent
+configuring
+averaged
+ace's
+buying
+abandon
+bayou
+cottons
+auditioning
+amplifies
+clippers
+brainstorm's
+alto
+brutalities
+bunch
+agricultural
+bursts
+blunting
+archer
+activity
+carefulness
+bedroom's
+concomitant
+balm's
+artificer
+barking
+breathy
+babies
+acacia
+bodies
+cap's
+criticised
+conversed
+crewed
+ascendant
+budgeting
+coroutine's
+charmed
+bellboy's
+conservatism
+butler
+acculturation
+conclusion's
+adapt
+cellist
+contempt
+adumbrates
+borrowed
+confounds
+allegiance's
+blabbermouths
+accrues
+captor
+coop
+baseballs
+cottages
+apartment's
+assertiveness
+assent
+artfully
+bagger's
+abolishment
+acetylene
+accessory's
+blackbird
+baptist's
+consist
+cavern
+buttock
+corporal's
+autoregressive
+bailiff's
+birds
+corder
+bracketing
+antlered
+barbiturates
+county's
+addicted
+agglutinated
+abashed
+competitively
+captains
+bloating
+accepts
+choose
+ashamed
+backyard's
+apiary
+contradiction
+balalaika's
+arctic
+broom
+anvils
+coffee's
+alliance's
+agitator's
+change
+adjusters
+cremates
+complexes
+bodyguard's
+burl
+antithyroid
+ambient
+airfoil
+apricots
+athleticism
+abjectly
+bankrupts
+answerers
+alternatively
+confronter
+breaking
+baronial
+cannibalized
+appetites
+breaded
+blackboard's
+battlegrounds
+cosine
+barrenness
+abbreviation
+budging
+boolean
+acrobatics
+again
+ashtrays
+clashed
+contingent's
+compulsion
+bedazzled
+collapsing
+comparison's
+businesses
+compassionately
+achievement
+buffering
+candlesticks
+austerely
+awls
+associate
+absolved
+annexed
+airway
+clipping
+counselors
+conscience
+attempters
+constructing
+biases
+cautioners
+comma's
+cosines
+char
+auscultates
+afire
+comely
+amity
+beverage's
+anew
+ballplayer's
+adulterated
+authorship
+alterers
+burdened
+attributive
+afflictions
+blinded
+barrier's
+attachment
+brotherhood
+bridegroom
+atoms
+cobweb's
+copes
+controversies
+complexion
+crawling
+atomized
+adjust
+accuracies
+concern
+cinders
+authorization
+appraisingly
+bladder's
+cooked
+cowers
+batter
+commissioner
+close
+burglar's
+allocated
+anvil
+aftershock
+abrogating
+chemistries
+advisable
+conduct
+committee
+blaring
+appalling
+braveness
+alertly
+artificialities
+brevet
+collision's
+arizona
+bower
+creamers
+awnings
+arsenals
+crane
+city
+contemplative
+catheters
+administrators
+attorney
+churned
+attractions
+columnation
+bobbed
+centipedes
+bostonian's
+apprises
+buries
+allege
+botulism
+adobe
+ambassador's
+covenants
+boon
+asynchronously
+bigness
+axial
+chaffing
+battleships
+ant's
+anthropological
+accent
+brushing
+brassy
+consumptions
+battleship
+absorb
+beckons
+brook
+connectors
+clinches
+accesses
+beaters
+archaicness
+bursitis
+chided
+bomb
+assimilated
+addicts
+convening
+arianists
+counting
+altar's
+confusions
+attachment's
+clipping's
+amazing
+corset
+bossed
+attach
+commandingly
+animatedly
+allegations
+assuages
+annulment
+compress
+aptitude
+absurdities
+autobiographic
+aspect's
+concentrator
+burgesses
+anagrams
+bedeviled
+assemblers
+convinced
+commentary's
+agglomerated
+biological
+callousness
+axolotl's
+atmospheres
+authoritarian
+cancer's
+above
+charting
+aldermen
+battler
+cistern's
+bouncer
+amassed
+conquest
+altering
+arrogantly
+brokenly
+comparator
+counsellor's
+attenders
+cackle
+criticize
+authored
+ably
+believed
+compelling
+accepter
+cleansed
+afflicted
+backslash
+computed
+almighty
+attache
+braes
+carriage's
+benediction
+brigadier's
+contemporariness
+boomtown
+amplitudes
+breakwaters
+clod
+catch
+bar's
+activist
+caves
+assenting
+camp
+attainments
+brotherliness
+continuances
+appearance
+applicator's
+browbeats
+banjos
+addendum
+became
+adduces
+armadillo
+brothel
+almanac
+courageous
+assault
+chunk
+coaching
+atheist's
+blunted
+aperiodicity
+congresses
+boastfully
+burglarproofed
+broadest
+bashfulness
+affect
+acne
+bottleneck's
+criticisms
+corrupts
+colonized
+closeted
+canonicalizing
+auditorium
+antenna's
+awfully
+anti
+consumes
+agonize
+algebra's
+championing
+blush
+bugger
+antagonize
+beethoven
+blase
+boycotts
+compensatory
+bugged
+boroughs
+anatomic
+batons
+arguably
+affricates
+appreciations
+cavalry
+alumna's
+arcing
+backpacks
+braces
+contextual
+coupon
+chillingly
+allocates
+abuts
+contribution
+commodity
+admonishing
+coolly
+cabinet's
+collapsed
+confessions
+adjured
+capriciousness
+chastising
+babe
+aerodynamics
+accepting
+concept
+contour's
+consequentialities
+birthday
+bankrupted
+birthed
+benefit
+concentrations
+azalea
+channels
+chestnuts
+contenting
+antedate
+censors
+contagious
+abbot's
+channellers
+apt
+commend
+avocation's
+admonition's
+abolition
+confederation
+carried
+clumsy
+coincidences
+bumper
+burr's
+bugles
+bribers
+attainably
+consume
+comma
+creativeness
+accuser
+bombs
+abbey
+baffled
+aside
+clip's
+appeases
+compass
+bundling
+abstractionism
+confide
+creases
+apropos
+confronted
+corrective
+concurrencies
+autocratic
+alien
+attending
+antagonistic
+broadcast
+asymptote's
+belied
+breasts
+contrapositives
+coiner
+accordingly
+cohering
+computers
+cow
+bibs
+ancestral
+controller
+attacker
+alerts
+coconut
+agency
+alerted
+alcoholism
+ammoniac
+actinometers
+acquitter
+bud
+cessation
+alleging
+centralizes
+articulators
+council's
+carvings
+arduously
+blown
+anode's
+arrogate
+bisects
+centimeters
+burgeoning
+course
+appointee's
+ascribable
+communicate
+contrivance's
+adoptions
+attune
+acres
+abyss's
+corporal
+certifiers
+analyze
+augusta
+bestseller's
+checkpoint
+coexist
+attainers
+argon
+bearded
+crudeness
+averaging
+brick
+adducing
+annulment's
+chicks
+blocked
+cisterns
+afoul
+affiliates
+briskly
+adhesion
+ascertainable
+appeasement
+blueprints
+agreements
+blindfolds
+communicator
+characterization
+annoyances
+breeches
+brushed
+clinic
+competes
+chuckled
+cradled
+balmy
+antisubmarine
+alternate
+armpits
+barn's
+conjuncts
+adhere
+allows
+counteracted
+appetizer
+capturers
+cleanse
+avant
+abbe
+corpse's
+arduousness
+badge
+begets
+contemplated
+caveat
+copiously
+athena
+aggrieving
+alibi
+accumulation
+basket's
+aftershocks
+bass
+conjuncted
+chaps
+brunch
+colonials
+bibbed
+clusters
+antagonizing
+constituencies
+combings
+bearish
+continuously
+adequacy
+brow's
+catalog
+alderman
+comedic
+chemists
+concernedly
+conceded
+alarm
+arced
+buckle
+confidingly
+coherent
+closes
+buffoon
+brace
+adjustably
+crackers
+contamination
+burgess's
+aerobic
+constitutes
+baptismal
+broadness
+blimps
+concatenation
+claiming
+bard's
+aerosolize
+adjoins
+copies
+coats
+boggle
+corroborated
+concreteness
+bill
+cautions
+bantam
+bearably
+armchair's
+birthright's
+cravat's
+cone's
+courtiers
+asunder
+bulletin's
+biopsies
+alley
+contrive
+blasphemies
+amuser
+ballerinas
+blushed
+causticly
+brandy
+blinkers
+complimenting
+crimsoning
+angola
+apprehensiveness
+bolster
+columnate
+byproducts
+berths
+accusal
+chubby
+arrived
+camps
+blemish's
+anaconda
+cook
+airfoils
+atlantic
+boosted
+converge
+availer
+appalachians
+coffin's
+boarding
+alga
+crouch
+columnizing
+consul's
+chastises
+angling
+apple's
+billiard
+attentiveness
+adroit
+apprehensible
+cereal
+blouse's
+browning
+bodybuilder
+coaxing
+assertion's
+connective's
+commemorated
+accountability
+crooked
+blips
+chandeliers
+aristocracy
+bangs
+coke
+abutment
+community
+calculus
+congregated
+crepe
+compromised
+airlines
+contributing
+contingencies
+coordinated
+alginate
+batted
+contender
+alma
+antagonisms
+accompanied
+airport
+administrator's
+appraisal
+breadbox
+condemnation
+backlog's
+available
+consequents
+crooks
+commonwealths
+barring
+channeller
+crucially
+archaeological
+charming
+adventist
+credits
+appetizing
+breads
+clients
+climbing
+aloneness
+abstractness
+appearer
+astute
+clockers
+antagonizes
+agonized
+bastard's
+conjectured
+aqueducts
+aureole
+boatswains
+conjured
+chauffeur
+complementer
+behold
+bustards
+bivouac
+cluck
+anus
+bless
+catastrophic
+bounty's
+allowed
+answer
+concealers
+brainchild's
+coercion
+buzzword's
+bordellos
+appertain
+applier
+couriers
+aesthetic's
+craft
+capacitances
+capped
+coupler
+category's
+anvil's
+conquest's
+checksums
+clucking
+bronchus
+acrimonious
+changeably
+accenting
+argued
+conditioning
+brewing
+backwardness
+cascaded
+atomize
+contours
+arianist
+apart
+conflict
+carefully
+banshee's
+conveys
+arbitrates
+amphitheater's
+amen
+alimony
+bound
+buzz
+courtroom
+apparently
+coalescing
+circulating
+amounter
+bypasses
+breadth
+choral
+completion
+arisen
+anticipating
+bilges
+contractions
+bedspring
+commune
+blacklisted
+beagle
+alkaline
+atolls
+carelessly
+blimp
+corking
+brevity
+alterable
+canada
+bear
+bluntly
+cartridges
+connoted
+countries
+corroborate
+consecration
+corrupted
+appreciating
+combatant's
+alkalis
+affecting
+blues
+casserole
+ballad
+bewitches
+common
+as
+because
+bathroom's
+anchorages
+beguile
+connect
+convenience's
+counteracting
+assorted
+care
+contains
+centimeter
+ancestors
+briefings
+busses
+churchyards
+breakable
+amortizing
+courthouse's
+click
+courses
+ajar
+county
+covet
+confidences
+capitalizer
+agog
+backtracking
+copious
+bestsellers
+chilliness
+bringer
+browse
+centipede
+bawled
+bricklayer
+breath
+assailants
+abysses
+command's
+characterizer
+calculating
+america's
+aurally
+contain
+alias
+commentators
+confounded
+appending
+accidents
+chatters
+coordinates
+bleeder
+blueness
+badger
+bolsters
+astounding
+capitalist's
+conservation's
+commences
+aimed
+bun
+comparators
+competition
+bauble
+backbend's
+bled
+assassinate
+chop
+anemometer's
+cobbler
+coldness
+audiometry
+affinity's
+amalgamates
+cowardly
+consolidating
+beads
+brackish
+bookings
+accuses
+bog
+compartmentalizing
+clutching
+calming
+collars
+clambers
+banqueting
+beaked
+authoring
+correspondence
+apostrophes
+affirmation's
+bespeak
+costing
+brought
+complainer
+battalions
+asymmetry
+boathouse
+canyon's
+awarded
+amplitude
+anarchical
+anticipatory
+bolder
+cooperatives
+caterer
+adviser
+balkanizing
+augur
+cannibal's
+balustrades
+attaching
+collector's
+commercials
+capaciously
+coincidence's
+bumps
+ascot
+bale
+blackmail
+baby
+aftereffect
+bloomers
+buttresses
+avenues
+climaxes
+aqueduct
+cater
+brainchild
+avail
+bypassed
+bowl
+california
+cements
+boxes
+brained
+bedevils
+captors
+acuity
+ascends
+breakthrough's
+assigner
+caner
+bequests
+ceilings
+axers
+bookshelf
+autistic
+celebrations
+axons
+chiding
+asterisk
+allophonic
+blindingly
+cherubim
+boaster
+confining
+anxious
+clowning
+advisement
+approach
+anesthetic's
+crescent
+alertedly
+birdbath
+beardless
+bras
+auspices
+choosers
+approval's
+afflicts
+corrosion
+arpeggio's
+bodyweight
+cranky
+battlefront
+affirmation
+churchyard's
+aeroacoustic
+anders
+adjustment
+baneful
+citation's
+acetone
+blend
+binuclear
+boner
+annotation
+announce
+claimable
+contemporary
+clothing
+acquitting
+choosing
+attacher
+bananas
+binaural
+arrestor's
+aches
+conclude
+collaborators
+await
+blaspheme
+bequeaths
+crows
+balconies
+begging
+conducting
+abstracts
+assignee's
+causations
+approximation
+articulated
+considerably
+apricot's
+afferent
+assertively
+bonding
+calms
+cranberry's
+cost
+captaining
+agenda
+corridors
+complaint
+christens
+aggravate
+countess
+arbitrators
+ascribing
+breech's
+bellwether's
+burglarized
+confinement's
+animating
+adjectives
+cannister's
+bemoan
+cleanest
+acme
+cheapest
+activities
+allophone
+boy
+belaboring
+captions
+compactor's
+actuator's
+befouling
+arachnid's
+computerizes
+compile
+absorption
+bridled
+absorber
+convicts
+birch
+alkaloid's
+cannot
+bacilli
+charitableness
+abated
+ceaseless
+beavers
+bookshelves
+commensurate
+appreciates
+basil
+cartoons
+aides
+buxom
+cages
+cantor's
+acceptances
+antiquated
+amalgamate
+babyhood
+beers
+conforms
+bouquets
+canner's
+baste
+cashed
+argue
+butcher
+backbones
+absolve
+crib's
+cafes
+abstracted
+book
+committees
+authentically
+conference
+antisera
+bourgeoisie
+attribute
+biddy
+autobiographies
+chivalrousness
+coverlet
+ambiguously
+calorie
+anhydrous
+alignments
+around
+archfool
+advance
+bedpost's
+affective
+contained
+amain
+bromides
+clogs
+bricker
+arduous
+consistent
+amidst
+confess
+complain
+anniversaries
+coasting
+cobwebs
+aries
+benchmark
+aviaries
+bombard
+boxers
+ashtray's
+assyriology
+blaze
+ablative
+chaos
+burro
+arguer
+ashamedly
+crier
+allocator's
+aggressively
+carts
+advisory
+airship
+alkali's
+backup
+chaining
+continue
+cartoon
+circumference
+breadwinners
+autonomy
+banking
+armored
+cabin
+chunks
+antigens
+blistered
+airers
+breakaway
+belief's
+belays
+coveting
+auburn
+careful
+anybody
+bumbled
+cautious
+adopter
+ballplayers
+anteater
+citadel's
+avails
+agent's
+caliphs
+bridgehead
+already
+caterpillar's
+coachman
+centralizing
+alphabet
+concede
+barbell
+breadboard
+ballast's
+activators
+attendance
+blandly
+calculator
+codeword
+addressee's
+avenue's
+alcoves
+alternately
+admonishes
+concentrate
+crossbars
+adjoining
+basset
+carbons
+beast
+blonde
+castle
+clarification
+bitch's
+abrasion's
+books
+amputate
+bicycler
+aphonic
+arraigns
+acquiesce
+buster
+chaperon
+advisements
+buyer's
+attack
+birthdays
+blazed
+confuser
+crag
+ballet
+airports
+bison
+counterexamples
+arteriole
+colony's
+adamantly
+blunders
+chivalrously
+adult's
+authors
+amplifiers
+counterfeited
+complicity
+astrophysical
+axolotl
+bash
+battleground
+butterfly's
+axioms
+allegory
+blitzes
+blindfold
+bufferrers
+approximating
+byways
+computations
+alight
+avoiding
+assurance's
+barrages
+canonicalized
+callously
+auditing
+authenticating
+bag's
+asters
+artistic
+bonanzas
+applaud
+certainties
+auto's
+concession's
+cascade
+chubbiness
+churchyard
+afternoons
+antigen's
+baron's
+amphibian
+banister
+capitalize
+approval
+appropriated
+bureaucrat's
+covets
+cloisters
+circulate
+bivalve's
+beta
+collector
+among
+cane
+birdlike
+attenuating
+conjunctions
+appliance's
+coral
+crucify
+abnormal
+combined
+classroom
+buckskin
+commissions
+abolishments
+arching
+croak
+americium
+associates
+car's
+assuringly
+agreer
+anticoagulation
+closure's
+corkers
+attend
+alphabet's
+awakening
+composedly
+attracted
+construed
+cricket's
+applicability
+autonavigator's
+chloroplast's
+ashen
+beggars
+corporation
+another
+conflicts
+bootlegs
+archeologist
+alcove's
+agitates
+cargoes
+creditor
+cops
+advisably
+coronation
+bourgeois
+crochets
+cropper's
+cramp's
+adulterer's
+corroborations
+changing
+combinatorics
+calm
+comprehensible
+blooms
+coolness
+copying
+blacksmiths
+commodore
+compulsions
+clump
+afterward
+crucified
+brooder
+buckets
+accelerating
+accented
+boat
+adventitious
+baseline's
+courier
+calamity's
+atoll's
+brutalizes
+bundled
+chairperson
+cheeses
+continuation
+celebrating
+apologists
+behest
+bumpers
+consonants
+circulation
+betraying
+commuting
+breezily
+circumstance
+coughing
+benefiting
+conquerors
+chemically
+commencement
+adjustors
+angel
+congratulate
+conspired
+causally
+bud's
+conquers
+augmented
+bereaving
+advisor
+articulation
+angler
+admission
+bide
+competitors
+amusement's
+collecting
+adder
+arithmetized
+cheek's
+apostrophe
+blockages
+clockwork
+bubbly
+apricot
+adjudicated
+banter
+amused
+breacher
+bracketed
+aimer
+comprehending
+bunkers
+canton
+arcane
+absent
+capitol
+consequence
+cognitive
+abjuring
+clever
+coronet
+anathema
+artichoke
+controls
+credulous
+acid
+crawled
+coupled
+boomtowns
+aspen
+acted
+anyhow
+burdensome
+backdrop's
+apocalyptic
+cornerstone's
+cautiously
+blisters
+conveniences
+arbor's
+accessories
+alleges
+clubs
+accompaniment
+blazes
+annually
+clique's
+beamers
+ballgown
+autumnal
+acreage
+conjunct
+balances
+consoling
+canvas's
+competent
+aggrieves
+although
+afraid
+clearly
+cognizance
+acoustic
+colleague
+causing
+absences
+closers
+airs
+cinder
+adversaries
+altruistic
+brews
+ceremonially
+appraisal's
+commissioners
+army's
+assists
+acceptor
+comparison
+cooling
+conveniently
+couching
+changes
+clinic's
+confronting
+adjunct's
+blandness
+alternates
+bunter
+consequent
+clean
+autos
+accumulators
+carver
+aprons
+awful
+bobbins
+blasphemy
+assuming
+abscess
+assemble
+cabinet
+atomics
+blacklists
+audacious
+assay
+anthropology
+barnstorm
+awl
+bumping
+assembles
+capture
+compensates
+coverable
+amend
+array
+continually
+absented
+cigarette
+antiresonance
+backspace
+branched
+appellate
+courtroom's
+alienated
+austerity
+cement
+asked
+antelopes
+cottager
+bluebonnets
+booze
+amendment's
+backslashes
+begun
+bijections
+cafe's
+boatload
+collect
+appeals
+belittles
+befit's
+beauty
+arrogated
+academia
+contagion
+blemishes
+coverlet's
+comfortability
+antecedent
+controllably
+congressman
+complicate
+coincide
+arrears
+clumped
+credited
+buffoon's
+catholic
+accompanist
+beauty's
+aster's
+blatantly
+bothering
+bewilder
+canceling
+carbonizer
+accentuation
+backstairs
+anticipations
+bestowed
+civilian
+blooming
+blunts
+airlocks
+argo
+blueprint
+aristocrat
+cakes
+complements
+ale
+camping
+army
+adrift
+bengali
+barely
+blasphemes
+briefcase
+brooches
+ailments
+blazers
+crevice's
+bankrupt
+archiver
+articulator
+alphabets
+bonds
+colliding
+candidate
+cashier's
+bellwethers
+airstrip
+announcers
+calendars
+corrupter
+aqueduct's
+axiom
+bathing
+blusters
+ascribed
+admittedly
+angrily
+analytical
+contraption
+convertibility
+abysmal
+cathedral's
+aversion's
+algol
+articulately
+breveted
+bickers
+chatterer
+adoptive
+bijectively
+cloudiest
+coarseness
+carted
+cocktail's
+capacious
+anion
+buffoons
+bleeding
+bedrock
+adventurer
+compositions
+camouflages
+brittle
+chip's
+aloe
+chorus
+cargo
+critical
+biographer's
+abject
+blasphemousness
+charmer
+betray
+blacking
+awoke
+allele
+bags
+claimant
+clover
+biographies
+confound
+advertises
+crafter
+cripples
+bygone
+concentric
+couldn't
+contentions
+acrid
+costume
+aft
+aesthetic
+bandits
+adducts
+constellations
+coffer's
+created
+commercial
+art's
+cookie's
+ammonia
+adjunct
+articulateness
+congratulated
+crags
+brandishes
+annual
+byword
+affection's
+college's
+aboriginal
+bikini
+buttering
+allotter
+console
+advent
+activates
+beverage
+april
+acceptable
+barrel's
+boys
+attractor
+azimuth
+critics
+ballooner
+aren't
+adulterating
+criticise
+abeyance
+automatically
+collaborative
+capabilities
+crawls
+anomaly's
+climaxed
+animately
+aroma
+belie
+attires
+argumentation
+baseboard
+bluebirds
+cactus
+byproduct
+balancer
+beholder
+conservationist's
+betrayer
+agony
+accusingly
+convict
+coaxes
+breeds
+agitated
+championship
+brevets
+auscultate
+counselling
+cornerstones
+america
+canoes
+aspirator
+compensate
+antiseptic
+bereave
+absinthe
+compose
+collide
+alabamian
+candid
+civilized
+clamps
+authoritarianism
+colonist
+bugging
+bins
+abashing
+battlers
+canning
+berate
+assembler
+amateurish
+boasted
+angriest
+bluffs
+colonize
+balcony
+bleat
+bustard's
+attenuate
+contagiously
+bicep
+babel
+beatniks
+brush
+analogy's
+audiologist
+assessment's
+camera
+arbitrary
+alleyway's
+concession
+constructions
+accompanies
+accretion's
+aroused
+charcoaled
+belated
+bottom
+bloodshot
+bisques
+advocate
+arabs
+cathodes
+adamant
+challenge
+absurdly
+abolitionist
+cleavers
+bludgeons
+bassinet
+clause
+coiling
+cask
+boob
+azalea's
+afghanistan
+carriages
+blade's
+bobby
+asinine
+acclaiming
+absorbed
+blacken
+cheating
+bootleg
+anonymous
+addict
+astonishes
+awry
+adequate
+categorization
+casks
+blaster
+aspirants
+abscesses
+airing
+assumptions
+capitalists
+board
+asynchronism
+body
+aye
+contraction
+athens
+arsine
+cohabitations
+below
+bows
+aviator's
+ampoule
+connective
+adapter
+authenticate
+blackboard
+brilliant
+appoints
+attics
+conquer
+boning
+comestible
+camped
+blonds
+aisle
+coals
+billboards
+characterizers
+crow
+clout
+admirer
+actuarially
+abstruse
+accessing
+bonfires
+clenched
+characteristic
+catching
+chars
+canons
+barrier
+championed
+butterflies
+completely
+calendar
+artwork
+abjections
+burgher's
+correlates
+arrivals
+accepters
+circuses
+breadboards
+accomplishment
+analyzed
+appropriates
+cancel
+bordering
+aperture
+civilizing
+assortments
+blackest
+blitz's
+copy
+commenced
+admirers
+cheers
+croppers
+cliff's
+circumstance's
+bibles
+buttressed
+consecutively
+birefringence
+automaton
+cheerless
+chopping
+ballooned
+convent
+acknowledgers
+appointing
+belies
+comeliness
+bangle's
+communication
+bisector
+avocations
+clique
+brainstem
+campusses
+allocators
+bramble's
+assaults
+commemorate
+appendix
+agent
+apportioning
+bottled
+artifact's
+block's
+archery
+bagatelles
+candies
+catched
+cognitively
+creepers
+concentrated
+bout
+balustrade
+abodes
+carrying
+confirming
+cannibal
+chinners
+carbonate
+anguish
+butt
+colons
+ablated
+corporation's
+cock
+convincers
+beret's
+bluish
+compressive
+authenticates
+commemorative
+bureaucracies
+coinage
+coach
+assigning
+concentrators
+capitalizing
+appraisals
+belaying
+candy
+blossomed
+bricks
+atonal
+analogue
+caters
+barbaric
+applique
+clink
+audio
+actress
+assyrian
+apprehension
+conversation
+apsis
+bedevil
+comics
+affricate
+comings
+buttress
+angering
+buckboards
+bombed
+adversely
+adequacies
+commended
+causeways
+adherers
+codes
+aquaria
+ape
+bulks
+compactly
+brainwashes
+bleats
+commandants
+conditionally
+adjourns
+clobbering
+allowances
+buildings
+complemented
+blanker
+algeria
+brief
+creak
+adductor
+categorizer
+approacher
+argument's
+clocked
+bedazzle
+cause
+coordinator
+buildup
+countenance
+abhorrer
+backtracked
+bogus
+closer
+broilers
+chirps
+adjournment
+belles
+bitingly
+befogged
+contexts
+amorous
+breeding
+abortions
+blockage's
+alternatives
+bouncing
+beryl
+ballistics
+banters
+carpenters
+auction
+bowdlerizing
+brazen
+bonuses
+circulated
+adultery
+archival
+bears
+baptized
+burglaries
+borrowing
+barbarous
+casher
+adolescents
+atrophic
+busily
+aerating
+coatings
+athenians
+casing
+consuming
+alphanumeric
+beaches
+bisection's
+conjecturing
+aspirate
+biography's
+accompany
+bureaucrat
+broomstick's
+colony
+coalesce
+clock
+bequeath
+collaborates
+belonging
+configured
+burlesques
+anode
+consenter
+bug
+counterpoint
+counts
+bangladesh
+analogical
+accident
+bulky
+affinities
+abysmally
+boorish
+assiduously
+cannisters
+autocollimator
+bassinet's
+barrelling
+blurts
+carbonize
+candle
+act
+addressees
+constraints
+boast
+complaining
+coziness
+avocado
+coolest
+blank
+beadles
+anytime
+covetous
+appellant's
+angers
+academies
+ageless
+chased
+constitution
+consonant's
+boosting
+ascetics
+aerosol
+apse
+blushes
+clang
+confers
+confidentiality
+coolie
+colon's
+chickadees
+badminton
+argonaut
+constituting
+aloha
+contracts
+broomstick
+brackets
+attendant's
+connection's
+conciseness
+abstractor's
+composes
+chaste
+assures
+conjuring
+barbital
+bunion
+bases
+clowns
+barrelled
+audience
+auctioneer
+complexly
+aviator
+conjectures
+backscatters
+cheerfulness
+communicating
+agreement
+bricklayers
+bilabial
+abstruseness
+cobol
+cooperating
+admit
+blundering
+accelerates
+assaulted
+concealing
+anachronism
+bowels
+butane
+anniversary's
+converts
+convoyed
+climates
+barriers
+clubbing
+additives
+bask
+confessing
+caravan
+colonizes
+continuous
+cheerlessness
+boggled
+armpit's
+bridgework
+allegro
+cricket
+cannon
+adoption
+clanging
+auscultations
+billowed
+alphabetize
+airlift
+appointee
+boyfriend
+chaotic
+corrections
+bonus
+contrasted
+convulsion's
+confessors
+adumbrating
+autocrat's
+coronary
+authentic
+barley
+brawling
+aegis
+appends
+bolshevism
+charted
+applicant
+aileron
+considers
+chin's
+alkyl
+amendment
+boulevard's
+avian
+breather
+canyons
+cannon's
+apportion
+badgered
+augers
+advisers
+censuses
+beveling
+aught
+arthogram
+anonymity
+appliance
+atmospheric
+anesthetizing
+ambulances
+blustering
+burnt
+chestnut's
+collects
+aliment
+anxieties
+championship's
+channeled
+arrival
+amassing
+corpse
+bedtime
+blackbirds
+cats
+constants
+chemistry
+brewery
+brother's
+boasts
+accentual
+bellwether
+bely
+courted
+baroness
+configure
+collection
+aviary
+achieves
+belfry's
+beech
+baseman
+bacterial
+contestable
+blond
+contracting
+comparably
+consultation's
+booster
+conspiracies
+belief
+candidate's
+boardinghouses
+connectivity
+check
+crazy
+collided
+assistant's
+critic
+bilateral
+cheapening
+appalled
+autopsy
+balled
+abnormally
+acquires
+aloofness
+backwaters
+combative
+computerizing
+craters
+contributorily
+behaved
+comers
+axiomatizations
+analogously
+banjo's
+cleanser
+capitalizes
+chamberlain
+aggregates
+amenorrhea
+begins
+condone
+cleaved
+bustard
+adsorb
+airedale
+bridles
+audited
+could
+amour
+checkbooks
+admiring
+arrested
+commerce
+asbestos
+can's
+clamping
+bathers
+acknowledgments
+census
+acrobat
+bargains
+apogee
+creaking
+busboy's
+additional
+chants
+circumvents
+afloat
+anyplace
+alumnae
+anions
+classroom's
+ballerina's
+convents
+angered
+climbers
+citation
+cools
+clamor
+capaciousness
+beatific
+abrades
+advocating
+coverings
+claims
+brethren
+advertised
+atrophies
+coffer
+beagle's
+brazenly
+bitterly
+clergyman
+braiding
+compressible
+convicting
+agreeableness
+antithesis
+cogently
+botanist's
+bidirectional
+bewilders
+airlock
+costumer
+blamelessness
+agglutinins
+catalyst's
+allocation
+annunciates
+borderings
+accomplishes
+confronters
+clinically
+breadbox's
+canvassed
+communicative
+coercing
+backpointer's
+bramble
+congregations
+crave
+courtesy's
+cocoon's
+admitting
+chieftains
+acclimate
+consequences
+cones
+contradict
+axolotls
+contractual
+artist
+atrociously
+consecutive
+berated
+bluing
+attacks
+choruses
+blatant
+balance
+amplifier
+assist
+analyst's
+ambler
+conveyance
+compromising
+baffler
+corridor
+bed's
+condoned
+boulevard
+anomie
+averages
+basics
+apologia
+cabbages
+concretes
+alcoholic
+aliased
+chocks
+balsam
+collies
+censor
+arouses
+conundrum's
+academically
+bent
+codings
+coastal
+allots
+acclaim
+citations
+cantor
+circularly
+boarder
+caribou
+biologist's
+cowling
+connects
+chasing
+bootstrap
+backscatter
+abstractly
+corrupt
+alleviating
+biasing
+abrade
+arraignment
+beaten
+blanketing
+compactness
+adage
+coincided
+borate
+bra's
+concepts
+bootleger
+christian
+argos
+basal
+abate
+campuses
+abridging
+confusers
+cabin's
+audition's
+amphibians
+attractively
+adhesive's
+ascendency
+beforehand
+ache
+brokers
+bowler
+criminally
+american's
+chock's
+artillerist
+appropriation
+characterization's
+artifices
+annoys
+constituents
+bottle
+beaned
+consisting
+beholding
+ceremony
+carpeted
+absolutely
+anorexia
+accredited
+azaleas
+amaze
+commit
+afflicting
+contriving
+adventure
+blood
+blabbing
+absoluteness
+appreciable
+approachers
+bumptious
+behavioristic
+anticipates
+adults
+barnyard's
+banging
+banana
+bilge's
+aware
+coheres
+bronchi
+commissioned
+arrogation
+confines
+core
+attenuation
+afterwards
+clearing
+applies
+alphabetized
+cemetery's
+campaigning
+abolishes
+brig
+cheer
+combers
+backtracker
+clinker
+clouds
+clog
+berries
+advising
+childish
+clobbered
+bride's
+astrophysics
+canker
+concatenate
+bite
+chagrin
+bodybuilders
+calamity
+admiralty
+councillors
+competitive
+assessments
+copper's
+cabling
+casket
+conducted
+backplane
+boyfriends
+bingo
+broader
+confiscates
+communicated
+baton
+cocktails
+albanians
+boardinghouse's
+brats
+akimbo
+categorizers
+comparator's
+blackbird's
+accidentally
+companion's
+clippings
+accosted
+bell's
+burly
+aggregations
+boathouses
+airmails
+abreactions
+changers
+carbon
+cleaners
+bookkeeping
+correlations
+backer
+conclusions
+brainstem's
+anecdotes
+chateau
+cogitating
+amphibious
+compounded
+completeness
+comptroller's
+boatswain's
+bolstered
+acquiescing
+actors
+calorie's
+adaptability
+abstractor
+bimolecular
+belly's
+automobile
+automotive
+analyticities
+awesome
+colonizer
+approximated
+chemist
+coronet's
+classmate
+anteater's
+altars
+adulthood
+amid
+assails
+blizzards
+corroborative
+biographer
+compartment
+blooded
+bipartisan
+bluff
+aloof
+bronchiole
+clincher
+congratulations
+ablation
+caught
+collier
+chooses
+antidotes
+artery
+clearance
+civility
+basketball
+auscultated
+behaviorally
+crowning
+autobiographical
+cheaply
+brutally
+agonizing
+clerk
+comprising
+baller
+confuses
+acquiesced
+astonishingly
+birthplace
+covered
+chopper
+combinator
+benignly
+bedside
+blasts
+billboard
+appraise
+aboveground
+comforter
+credulousness
+battlefield
+barefoot
+cleverness
+apparatus
+bartering
+bromine
+aerodynamic
+crabs
+chains
+airflow
+allegrettos
+armchairs
+blacklist
+approvals
+bait
+collections
+antecedent's
+airbags
+casted
+content
+conferrer's
+crouching
+coughs
+canal
+amphetamine
+augustly
+bedraggle
+arithmetic
+cataloger
+alluding
+credulity
+coffees
+crueler
+beautifully
+caresses
+correlative
+consul
+criticizing
+couched
+baths
+alchemy
+bargain
+accomplishments
+conveyer
+benevolence
+broil
+chilling
+axed
+attire
+collisions
+categorizes
+cited
+aeration
+accommodating
+coordinations
+boxcar
+cattle
+bullion
+afternoon's
+captures
+afghans
+comets
+component's
+ark
+bounds
+adjusting
+bravely
+capability
+chap
+absolving
+aspirating
+arcs
+conspires
+collaborated
+admonishment
+astounds
+brasses
+compromise
+changed
+consumers
+connoting
+buttonholes
+cordial
+anionic
+chastisers
+archive
+alleviate
+burglarize
+acquainted
+copiers
+cashers
+antisocial
+creations
+bookie's
+censure
+beadle's
+banded
+circled
+bulged
+cheapness
+attorney's
+chewer
+bookshelf's
+councillor
+assertion
+broom's
+contemplations
+club's
+balkans
+cherubs
+alas
+chair
+apologizes
+compartments
+beyond
+aptly
+censured
+allegros
+boosts
+card
+arithmetizes
+attainment's
+arrester
+anding
+asker
+compatibilities
+confidentially
+commissioning
+cleaner
+aversion
+cooperative
+battalion's
+cemented
+charity's
+conceited
+capable
+anymore
+computing
+aping
+chiefly
+affair
+beaners
+allying
+caption's
+antipathy
+causal
+abyss
+botchers
+burglarizing
+confidant's
+activator
+continent's
+census's
+brat's
+antagonism
+bedspring's
+antiserum
+charge
+connector's
+alike
+believable
+belfry
+cast's
+bureaus
+beneficiary
+abolisher
+artichoke's
+broadly
+concurrent
+alteration
+bookies
+crafts
+bays
+ass
+bouquet's
+ave
+chords
+crazes
+anemic
+appoint
+beets
+billing
+contest
+assassination
+allot
+brindled
+acute
+absolves
+adsorbed
+auxiliaries
+belatedly
+businesslike
+assassinates
+bookkeepers
+bevel
+adders
+automate
+archangels
+breakfasted
+changeability
+contested
+cradles
+combatants
+besieging
+certainty
+attempts
+bankrupting
+compiler's
+complications
+banquets
+ancestor's
+ail
+abbreviating
+compacter
+approvers
+acknowledges
+comically
+almonds
+counsellors
+calmness
+assailed
+crane's
+baser
+big
+corruption
+circuitry
+briefness
+community's
+banquetings
+alms
+bass's
+bellowing
+adoption's
+blockading
+compellingly
+builders
+befallen
+bombproof
+cartons
+chore
+crimson
+anther
+clucks
+assemblies
+beatitudes
+aspiration
+compels
+angst
+balancing
+bowstrings
+bayonet's
+butte
+biomedical
+casualness
+accolade
+blackberry's
+bunched
+affright
+clung
+burlesque
+bare
+corrected
+arbitrate
+cropping
+coherently
+bloodhound
+circularity
+courtesies
+articulating
+concluded
+analogy
+brutalized
+airmail
+cooperator
+cousins
+centralization
+bibbing
+beside
+bravo
+abductors
+cars
+bovines
+bump
+absconding
+chins
+chasers
+boundary's
+antecedents
+awed
+counselled
+aback
+attenuator's
+blazer
+bettered
+awaken
+abreast
+beagles
+artisans
+buckled
+credence
+control's
+bewhiskered
+calloused
+breathe
+collaring
+blossoms
+bring
+actualities
+bivalves
+animals
+cowboys
+constituency
+affordable
+acrobatic
+attiring
+boatswain
+concurrence
+abrasions
+babel's
+cowerers
+chiffon
+bostonian
+criterion
+blinds
+cased
+affections
+conditioners
+clutter
+accrued
+attractors
+botcher
+compunction
+bludgeoned
+censored
+allah's
+chronic
+burrs
+commodity's
+appraiser
+asserters
+cheaters
+besting
+anchorite
+combine
+afforded
+cigarette's
+bathrooms
+apostles
+chloroplast
+bootlegging
+bibliographical
+beans
+bylaw
+benefited
+brochure's
+cordially
+brashly
+beastly
+bologna
+alderman's
+burning
+billow
+convert
+buffaloes
+comparatives
+assistances
+camouflaged
+announcement
+bobwhite
+brawl
+adducted
+cavern's
+affectation's
+bandying
+brunette
+architect's
+aphorisms
+cremate
+bray
+billed
+conception
+battlefield's
+bandaged
+broaches
+bazaar's
+beatification
+bigotry
+clergy
+abstains
+befits
+bantering
+conceivable
+attachers
+analogies
+bimonthly
+august
+additionally
+confirmation's
+ballooning
+cardboard
+belle's
+counterparts
+candor
+bishop
+comprehension
+affronted
+bravura
+courting
+antidote
+buggies
+arisings
+appendix's
+bright
+categorize
+cooking
+agnostic's
+billets
+amok
+bewitching
+audiograms
+column's
+bussed
+checkbook
+alteration's
+atherosclerosis
+broached
+based
+cacti
+boardinghouse
+bowdlerized
+anchoritism
+achievement's
+bald
+cover
+codifications
+capacitor
+brashness
+causes
+acyclically
+argument
+boarders
+audiometer
+compute
+contribute
+crisply
+bitters
+circumvent
+assailant
+bosun
+buyers
+alibis
+blurting
+coasts
+bivouacs
+arrogating
+albanian
+attempted
+acquisitiveness
+applauding
+alfalfa
+cantors
+canonicalizes
+alkaloid
+bruising
+associativity
+budgetary
+carbolic
+clashing
+buffalo
+acorn
+analyzing
+backyards
+comedian
+betwixt
+aces
+chartered
+additivity
+becalm
+combat
+characterizations
+clinics
+bulbs
+bloc
+amenable
+civilian's
+breech
+attainment
+bounding
+compiler
+cotyledons
+billboard's
+caper
+aphasia
+chester
+combats
+biddable
+articulates
+caps
+assignees
+bifocals
+beady
+chinese
+assertions
+allegation
+championships
+accrue
+containment's
+croaking
+classifying
+annum
+brightened
+bits
+appointer
+besieger
+citizen's
+cerebral
+canto
+bakers
+capitol's
+authorizer
+blockaded
+anodizes
+alarmed
+buttressing
+attenuates
+bumptiously
+chronological
+colleges
+coward
+contraption's
+abstractions
+controversial
+boric
+bids
+agents
+backpointer
+bumped
+bottoms
+bowlines
+captivated
+article
+cliche's
+chases
+choker
+bremsstrahlung
+consult
+adjudged
+auctioneer's
+covers
+accurateness
+clues
+bugler
+bareness
+cedar
+alleviation
+anesthetically
+backpointers
+arched
+administered
+arrowhead
+continues
+asks
+confessor's
+allure
+backlogs
+childishness
+appointive
+covering
+conscience's
+bellows
+blanked
+considerations
+appalachian
+aerate
+budged
+city's
+accordion
+cliche
+collectors
+comprehensive
+boomed
+chariot
+baffling
+bunkmate's
+bumbles
+contaminating
+corroborating
+applications
+bursting
+cabbage
+befalling
+acquittal
+compromisers
+components
+arpeggio
+brothel's
+credibility
+begrudge
+confirmation
+academy
+appertains
+calibrates
+bureaucrats
+bawl
+costuming
+biography
+adoration
+cloaks
+aggregating
+business
+aphorism's
+carters
+admixture
+coexistence
+anomalously
+adapts
+amide
+affiliation
+capillary
+biscuit
+brainy
+bellhops
+chartings
+cohered
+austria
+champions
+basin's
+cascading
+consultants
+bison's
+admixed
+arithmetically
+clothed
+betterments
+conspirator's
+addition
+adolescence
+bolsheviks
+abominable
+breathless
+cozy
+arouse
+bumble
+about
+apace
+astronaut
+asteroid
+cable
+crab's
+beachhead
+assets
+analyses
+bisection
+coconuts
+alleys
+armament's
+bloodstains
+arpeggios
+apologist
+blithely
+anabaptist's
+beadle
+channelled
+confuse
+annoy
+beautifiers
+cheats
+clenches
+amuse
+bewail
+constitutional
+birth
+appendixes
+amazed
+berry's
+bilingual
+blustery
+amplification
+clogged
+blackmailing
+breakables
+adduct
+bondsmen
+conferred
+codewords
+bequeathal
+abundantly
+banner's
+atrocity
+congested
+closely
+absolution
+concatenations
+anarchic
+crag's
+communicators
+cavities
+comptrollers
+backstage
+bewailing
+charcoal
+conveyances
+collar
+bores
+briefest
+comments
+awning's
+associator's
+antarctica
+correspondingly
+bidden
+ad
+clings
+bit's
+apollo
+bulldogs
+chateau's
+amounting
+cogitates
+bellhop
+bookish
+bout's
+cannister
+bicep's
+asses
+beef
+battlefields
+consort
+auspicious
+breezy
+buried
+beverages
+approximates
+conduction
+bleakly
+blanketers
+ascertained
+absentminded
+bolivia
+births
+behave
+bilk
+breaths
+charter
+abstaining
+appareled
+boulder's
+breadwinner's
+correct
+accessed
+befitted
+adulterer
+axe
+activation
+betrothed
+asymptote
+bullet's
+clusterings
+baud
+bustling
+ballplayer
+constraining
+cleared
+brown
+affirmed
+agencies
+churches
+backyard
+burntness
+bronchioles
+charmers
+backscattered
+abridgment
+claw
+blow
+adjourning
+constantly
+brightens
+autobiography
+cards
+bypassing
+alcibiades
+concurrency
+chuckles
+bests
+belligerents
+adjustments
+bolshevik
+cabins
+astronomically
+cartridge
+boxcars
+boned
+bottomed
+burgeoned
+adjourned
+apprenticeship
+chastiser
+breached
+boycott
+butchered
+coordinating
+cottage
+brainwashing
+confinement
+bandies
+absentee
+collapses
+cruel
+along
+alloy
+convoying
+assignment's
+crisp
+ambidextrously
+blindfolded
+chilly
+condenses
+avers
+broiler
+anesthetics
+beaker
+cholera
+brag
+coffins
+cranked
+allocator
+brutality
+acquire
+blushing
+briar
+abolish
+crossovers
+broiling
+consolers
+beatify
+almanac's
+cooled
+commencements
+clasp
+committing
+condemnations
+altar
+by
+bombastic
+confederates
+bong
+concerted
+compilers
+counterproductive
+brig's
+accurate
+avidity
+cleavage
+blame
+conceive
+assessor
+consolingly
+concise
+computes
+alliance
+clucked
+axon's
+annunciating
+baseball's
+allusion
+brays
+auras
+blond's
+bronchitis
+ciphers
+blowing
+broth
+canonically
+baseness
+byline's
+appetite's
+colonists
+condensed
+cawing
+beaning
+broadening
+colonist's
+apocrypha
+chauffeured
+cored
+branding
+carrier
+assessed
+collegiate
+chirped
+accounted
+clubbed
+antibodies
+behalf
+alphabetizing
+conqueror
+alpine
+budgeters
+casements
+appropriate
+compliments
+cast
+accountancy
+cathedral
+conserve
+accorders
+arbitrarily
+cowing
+bars
+bagel's
+climax
+attention's
+cautioning
+centipede's
+almost
+abstractionist
+carpenter
+containing
+arab's
+courtesy
+carton
+accelerated
+bowman
+boastings
+banal
+bucking
+accomplishment's
+classification
+baldly
+abruptness
+calibrations
+blocs
+biking
+assenter
+adversities
+compartmentalized
+chemical
+attic
+audiogram's
+applauds
+crests
+bad
+bounce
+accelerators
+contemptuous
+attentions
+cancellation
+battles
+aging
+advantages
+anthologies
+answers
+bruised
+castes
+any
+coped
+arcade's
+adaptively
+arsenal's
+confessed
+controllability
+acceptor's
+abrogated
+abutted
+amusingly
+apology
+broils
+court
+boundaries
+bode
+collie
+adiabatic
+ambitions
+charged
+awfulness
+consorts
+botanists
+blurring
+absents
+batten
+backwoods
+breaks
+certified
+chattering
+admitted
+bathrobe's
+analogous
+corporacy
+bijection's
+combatant
+checked
+condition
+amoral
+bayed
+bedroom
+chanting
+antics
+charity
+blip's
+biped
+brilliance
+catchers
+booted
+anabaptist
+clothe
+comforted
+complaints
+coacher
+admissible
+bang
+concisely
+cookery
+capita
+assurance
+codifying
+benchmarks
+aunts
+commentaries
+anon
+applicators
+constructor
+associated
+abuses
+choicest
+confiding
+antislavery
+apron
+ashore
+cheerfully
+betterment
+administration's
+campaign
+cremated
+ambulatory
+bleacher
+afterthought
+barkers
+choir
+crossly
+conducive
+cache's
+battery
+actinium
+countryman
+cajoled
+appeasing
+beamer
+cleaves
+anthem's
+clearing's
+cooperated
+barker
+crowing
+apprising
+accusation's
+beginning
+associator
+booking
+caved
+amicable
+codify
+clairvoyant
+bevels
+becalms
+brawn
+bunkhouse's
+arms
+antiredeposition
+belt
+antiphonal
+cried
+brae's
+bridal
+acronym
+clay's
+checkers
+auxiliary
+bind
+compares
+agilely
+askers
+blankly
+antagonist's
+bimodal
+captivation
+creditable
+concentration
+calling
+bartender's
+autopsied
+correspondent's
+carnivals
+abjure
+bystander's
+bungle
+chanticleers
+conceding
+burghers
+boards
+accessions
+compensations
+arabian
+churn
+crowed
+centering
+abnormalities
+courtier's
+congregation
+aberrant
+annexing
+blockhouse
+anthropomorphic
+bedder's
+abutting
+conundrums
+affiliated
+cancellation's
+bolts
+ballgowns
+augmenting
+bureaucracy's
+bootlegged
+audiometers
+blueberry
+affliction
+appreciation
+codifier
+amasses
+countering
+crackle
+canoe
+consuls
+breathes
+broiled
+amalgam's
+bodes
+ballooners
+coating
+corollaries
+amphibology
+agenda's
+chafing
+alcoholics
+accredit
+anisotropy
+anchovies
+carriers
+acceptors
+betrayed
+buttocks
+busy
+bunny
+cropper
+accreditations
+bumblebee's
+adhesives
+civilize
+accedes
+abroad
+arch
+crept
+cotyledon
+alphabetic
+braille
+amateur
+adjure
+ascertaining
+budge
+adulterate
+additive's
+cardiac
+born
+brewed
+borneo
+bun's
+blue
+cackled
+acclimates
+airline
+blinder
+brokerage
+communicant
+central
+aggrieved
+asynchronous
+bough's
+acidly
+archaeology
+complementary
+animator's
+bodyguards
+climbs
+apathy
+constellation's
+acculturate
+archaeologists
+contingents
+control
+anglophilia
+billings
+corporate
+athlete
+accusing
+appear
+announcing
+accordions
+computerize
+combinations
+bile
+abut
+charger
+columnize
+computer
+blacks
+converges
+blamer
+bulked
+convincingly
+checker
+correspondence's
+accelerate
+accessible
+conceivably
+abscissa's
+adsorbs
+anglophobia
+anomic
+casters
+churning
+crease
+brood
+appendage
+bulwark
+bombers
+arcaded
+breadboard's
+aphrodite
+color
+commodore's
+answerer
+bobolink
+cloth
+conversion
+clime
+artery's
+birthplaces
+compiled
+arrack
+beetles
+bobs
+compatibility
+cocoon
+counterpart
+audible
+colonies
+airport's
+beige
+cogent
+bromide
+begrudging
+acids
+crucifies
+beggary
+archipelagoes
+availably
+counterfeiter
+blanketed
+amending
+accelerometer's
+advisors
+byway
+alignment
+amber
+austin
+copyrights
+beaus
+brigantine
+comforts
+appointment's
+crawler
+bangles
+contemplation
+concur
+characterizing
+censoring
+charters
+catalogues
+appropriately
+builds
+aeronautic
+confused
+comber
+axially
+cackler
+coercive
+ambassador
+arcades
+brash
+amorality
+belittling
+battling
+bloodied
+acrylic
+bantered
+clasped
+carcass
+archangel
+annunciators
+aristotle
+boulder
+burglarproofs
+chooser
+abilities
+calmest
+bach
+always
+blaspheming
+crossover
+bakeries
+clocks
+ankle's
+accidental
+arbitration
+chirp
+aeronautical
+boy's
+acidic
+bowline
+anonymously
+cod
+couplers
+beautifications
+bluffing
+backarrows
+brow
+covenant
+acronym's
+banning
+albeit
+ascetic
+burn
+animator
+beatnik's
+coveted
+cipher's
+broke
+cap
+bellman
+bulldozed
+clarifies
+bathes
+blip
+availabilities
+booth
+clangs
+audiences
+cathedrals
+confounding
+bigot's
+beecher
+arts
+company
+attributed
+avenged
+bawling
+caustics
+alee
+bordello's
+banks
+affords
+complied
+commas
+collaborate
+aquatic
+ambitiously
+burro's
+beard
+bittersweet
+candlestick
+bylaws
+broadcastings
+believe
+barrels
+braying
+certifications
+contrasts
+crashes
+audition
+confine
+bucks
+abates
+bureaucracy
+ambles
+besiege
+broccoli
+antibiotics
+attenuators
+accelerometer
+caste
+bib's
+browbeaten
+appurtenance
+bauxite
+asceticism
+case
+chewing
+aerator
+achievements
+barricade's
+agglutinates
+bewildering
+cartridge's
+children
+bufferrer
+actuator
+converging
+bolted
+chat
+combs
+chemist's
+adduced
+algebraic
+circular
+bloated
+conclusion
+burgess
+certifies
+absconds
+comprise
+benzedrine
+bumbler
+banjo
+allow
+appealing
+cooperation
+abraded
+chaperoned
+biracial
+braced
+censurer
+acoustician
+appraised
+benefitting
+constructs
+convertible
+administrative
+asocial
+area
+creature
+besetting
+crater
+begrudgingly
+blanket
+ablest
+alba
+airplanes
+allowing
+briefly
+beneficences
+concurring
+adjective's
+cork
+aerospace
+anomalies
+asher
+auger's
+boilers
+abhorring
+broadenings
+bladder
+belay
+approver
+abdominal
+commends
+cringing
+billiards
+beater
+auspice
+contrasters
+bights
+absentees
+atoll
+cooler
+activator's
+basement
+burgeon
+allusiveness
+codeword's
+bandage
+contemplate
+adopted
+coping
+carving
+baptism
+colds
+altos
+background
+closet
+commuted
+acre's
+aliens
+council
+cans
+cheese
+ally
+aseptic
+belgian's
+crossbar
+addressed
+commons
+call
+careers
+breakfasting
+brazilian
+catholics
+bachelors
+consultant
+brighter
+crossword's
+burglar
+avoidable
+batting
+cigar
+amps
+axiological
+combed
+comforters
+albumin
+cookies
+booming
+archaize
+canton's
+bunkmate
+combination
+bondsman
+anxiously
+affixed
+associatively
+cigar's
+backstitch
+calls
+captivates
+commodities
+atmosphere's
+asserting
+beaver
+beatnik
+container
+activists
+consoler
+commoner
+buttonhole's
+abhorred
+aggregate
+cliff
+antidisestablishmentarianism
+broach
+ambling
+comer
+bited
+advocated
+behaves
+bosom
+continents
+conserves
+bashful
+ago
+backarrow
+circumventable
+avocados
+briar's
+annuls
+barnstorming
+aired
+carry
+crossbar's
+aspire
+beards
+abides
+cliques
+completes
+brassiere
+absorbs
+annul
+chairman
+baron
+battens
+africans
+abatement
+colonization
+carries
+borough
+allurement
+breakfasters
+alkali
+acoustically
+corners
+capturer
+casualties
+asphyxia
+animized
+administrator
+belying
+basketballs
+bylines
+bandit
+autopsies
+braining
+contradiction's
+antic
+butted
+bacillus
+blurt
+conditioned
+backers
+agreeable
+almanacs
+cider
+chicken
+chambers
+clutch
+assailant's
+conveyers
+amazers
+beribboned
+breeder
+caveat's
+buffers
+combination's
+ampersand's
+crafting
+clanged
+caving
+aspirant
+butlers
+adjective
+auckland
+announced
+creators
+caches
+baseline
+codifies
+baptism's
+coarsened
+cohesion
+airman
+avenge
+backaches
+budgeted
+armpit
+bicycled
+converged
+besmirched
+autonomic
+coming
+assemblage's
+chained
+admissions
+alcoholic's
+branches
+bunk
+anciently
+bloods
+adventurers
+amazes
+coloring
+abstractors
+adaptation's
+boar
+amulet
+agglutination
+conquerable
+booker
+confronts
+barometer's
+bedbugs
+barricades
+cheap
+bewitch
+circus
+backward
+archeology
+automobiles
+bending
+amino
+beckoning
+admits
+berliners
+borer
+clambering
+atomizing
+banner
+blissfully
+catchable
+breakdown
+abjured
+computerized
+chaplain's
+amphitheater
+ballot's
+craziness
+croaks
+counties
+adopting
+breast
+airstrip's
+basin
+contemplating
+commitments
+critique
+appears
+bellies
+baccalaureate
+abducted
+blackened
+animosity
+appraising
+antiquity
+assistants
+asthma
+bootstrapping
+bounties
+agleam
+advertisements
+benches
+artful
+broadens
+chuck's
+betrayal
+blasphemed
+brooms
+castled
+coroutine
+conscious
+beetle
+banshee
+advertising
+baring
+awakens
+balm
+billions
+compromisingly
+ballroom's
+burrower
+bayou's
+ambiance
+beheading
+bought
+adagios
+adornment's
+anointed
+abolishment's
+anesthetizes
+badly
+boyishness
+consultant's
+cheek
+cannibals
+breakdowns
+assured
+agates
+bicker
+appliances
+cafe
+bagpipes
+adrenal
+combinatorially
+belligerence
+bricked
+adjacency
+aimless
+crook
+cherry's
+assessing
+brushfire
+cormorant
+captained
+blundered
+conceptually
+congress's
+contraster
+ambushes
+bronze
+autotransformer
+corded
+brisker
+contently
+announcements
+bullet
+apportionments
+columnized
+canon
+conservation
+algaecide
+blackening
+compassion
+beaks
+constructibility
+chapter
+abscond
+costly
+bacon
+coldest
+aptness
+billionth
+altercation
+approbation
+alternator's
+criticizes
+befell
+canopy
+buoyant
+brazil
+anticipate
+absenteeism
+champion
+aesthetics
+cadence
+betroth
+confidants
+bean
+braid
+aphids
+cluttering
+cantankerously
+bloom
+barbarity
+clawing
+bogged
+agreed
+asia
+abrasion
+corporals
+baselines
+box
+chartering
+apotheosis
+ampersands
+conceit
+creamer
+adhered
+circuit
+carpet
+accompaniments
+boomerangs
+blindness
+chipmunks
+bewitched
+allocate
+bicycle
+compacted
+cab
+calcium
+cellists
+apex
+borrows
+completed
+brightly
+constables
+ascertains
+conspiracy's
+badgers
+bunion's
+anabaptists
+broadband
+clefts
+accepted
+benched
+catalogued
+cadenced
+alliteration
+acquiesces
+boxcar's
+athlete's
+bracing
+cremations
+analysis
+crossings
+assorts
+apologize
+brazier
+configurable
+basking
+craves
+belle
+conversation's
+belligerent
+anesthetize
+brewers
+cackles
+adventures
+airlock's
+booklet's
+apply
+anecdotal
+bewails
+computer's
+autographs
+acclimated
+coefficients
+avidly
+beckoned
+broadener
+bulk
+blacklisting
+belly
+acquit
+convoy
+achiever
+aversions
+advisor's
+captor's
+camel's
+asset's
+advantageous
+basement's
+confident
+crescents
+compiling
+butler's
+cartoon's
+adaptive
+chlorine
+abets
+cruelly
+amiable
+baleful
+ceiling's
+adumbrated
+cherry
+aspirant's
+cashing
+candidly
+chaff
+bitter
+brim
+alcove
+bulb's
+carbonizers
+citizen
+attic's
+breed
+consumer
+conferrers
+accommodations
+contrapositive
+beget
+brilliantly
+attentionality
+continuation's
+bosses
+brave
+configurations
+benediction's
+conferring
+accessor's
+bobolinks
+bulled
+cleanness
+algorithm
+advancements
+altogether
+accumulations
+albacore
+bowing
+belching
+apical
+consequentiality
+bagpipe's
+ambrosial
+bullying
+cleans
+attendance's
+complimenter
+blink
+cager
+assembling
+coat
+allowable
+astringent
+antiresonator
+cardinal
+clicks
+commentator's
+blossom
+categorizing
+amphibian's
+commonality
+consonant
+classics
+affable
+accorded
+aimlessly
+archetype
+administerings
+boldness
+anatomy
+apprehensively
+absence's
+actuality
+attempting
+categorical
+checkpoints
+allemande
+corer
+behoove
+bleaches
+bough
+blended
+blotting
+baptists
+courtship
+benevolent
+bumptiousness
+chum
+anguished
+auto
+career
+bookstore's
+carbonized
+autocratically
+cherishes
+attendees
+contends
+anastomotic
+attributing
+abbot
+came
+blunt
+battlement's
+affection
+coordination
+annotate
+besets
+bucked
+boasting
+benedictions
+adherent
+blimp's
+acknowledging
+cleverly
+applejack
+annexation
+bat's
+cantons
+beetled
+closed
+country
+creatively
+bakery
+blasphemously
+chalking
+bold
+attended
+crasher
+backtrackers
+artist's
+bracelet's
+allowably
+affiliating
+arrant
+brayed
+barbells
+consigned
+abolishers
+climatic
+atrophying
+amigo
+arsenal
+ascribes
+converses
+aura's
+allotted
+bliss
+classical
+bigger
+ahead
+chopped
+blade
+casualty
+acceded
+bottling
+axon
+casement's
+battlefront's
+convinces
+alerting
+advertisers
+blemish
+agglutinating
+commonplaces
+autocorrelation
+armistice
+crediting
+besmirch
+amplify
+auscultation
+befalls
+called
+alnico
+arbiter's
+abort
+argonauts
+cessations
+cribs
+blare
+aforementioned
+condemners
+contaminated
+complained
+bootstrapped
+criticism
+cooperatively
+binding
+bullies
+basins
+contrived
+assort
+adulterously
+booms
+abandons
+also
+appealed
+count
+contributed
+beet
+crashers
+carryovers
+clays
+blackness
+cosmetics
+awkward
+blurted
+bothers
+analyzer
+backups
+alarming
+bicyclers
+credit
+abrogate
+audience's
+architecturally
+alibi's
+complicator's
+chuckle
+corporately
+banishment
+communist's
+birdie
+asymptotic
+break
+braze
+benzene
+bridgework's
+beak
+agitators
+collateral
+arranges
+bayonet
+breathlessly
+counsellor
+creates
+convulsions
+backdrops
+applicants
+altercation's
+commission
+breathtakingly
+corresponds
+backdrop
+armaments
+build
+biannual
+buttoning
+computational
+chaired
+bather
+critically
+amanuensis
+bantus
+confidential
+annoyance's
+carder
+authorizing
+acquits
+bipeds
+cocktail
+cinnamon
+burros
+brocade
+abdomen's
+creative
+acquisition's
+abdomen
+baited
+aristocratically
+alive
+committed
+arrestor
+cleaving
+comedy's
+baggage
+bra
+adaptors
+afoot
+bulls
+contoured
+amalgam
+comprehensibility
+amortizes
+biographical
+confront
+covert
+cravat
+animates
+booksellers
+bypass
+bootleggers
+bedfast
+affair's
+buzzer
+bellowed
+aligning
+bystander
+acclimatized
+accomplishing
+against
+blankness
+adopt
+addressing
+croaked
+boaters
+behooves
+audits
+boatyard
+cruise
+agnostics
+ailing
+anchorage's
+adaptations
+conceptualize
+advised
+cries
+bank
+actuators
+brazing
+catalyst
+beachheads
+aplomb
+compressed
+amputated
+contractor's
+bedspreads
+bowed
+coon
+chaplain
+cannons
+coffers
+assembly
+bouffant
+converters
+ampoule's
+borderland
+archaeologist
+blankets
+conserving
+avalanche
+assortment's
+aspic
+axle
+bereaves
+allowance
+carbonization
+bartender
+clawed
+coincidental
+appeared
+chipmunk's
+countable
+authenticators
+bestow
+alps
+caw
+aniseikonic
+avows
+blackmails
+controlling
+correlating
+audiologist's
+bit
+approving
+collapse
+coon's
+cleave
+atheists
+brigade
+autopilots
+bounteous
+commercialness
+accede
+cavalierness
+accustoming
+burnishing
+clobber
+aspirates
+brochures
+cellar's
+communes
+berkelium
+chickadee
+cobweb
+circumstances
+chose
+comprehend
+baritone's
+aggravation
+adopts
+cruelty
+and
+axer
+cautioned
+carbonic
+babbles
+bet
+charitable
+computable
+cardinality
+amenities
+confiscating
+catcher
+audaciousness
+complaint's
+cooperator's
+buddies
+baking
+constant
+classmate's
+accentuate
+choices
+crop's
+authorization's
+comedy
+brushy
+brotherly
+canals
+ads
+causeway
+abrading
+cemetery
+autocrat
+briefing
+abdomens
+apparition's
+consummately
+alkaloids
+bulkheads
+cravats
+bales
+campaigners
+bagpipe
+accentuates
+arm
+barometric
+bas
+agitator
+behavior
+abutters
+blockades
+alertness
+civilizes
+chinner
+anthropologist
+artificialness
+balkanize
+automates
+cackling
+anarchists
+amounted
+cereal's
+anodized
+cobblers
+acknowledgment's
+blear
+copper
+alphabetics
+blackboards
+apish
+answering
+afternoon
+arbors
+accused
+chickens
+agency's
+contractors
+contraptions
+cosmology
+anomaly
+bandstand
+attempter
+account
+challengers
+admiration
+calculations
+autocracy
+analyticity
+accord
+buildup's
+commonly
+babbling
+adjudication's
+attain
+ameliorating
+candlestick's
+chronicles
+align
+consensus
+agate
+adulation
+aspirated
+conclusive
+biologists
+cracks
+conform
+chambered
+beryllium
+connote
+amusing
+aquifer
+ankle
+batteries
+conservationists
+accountants
+apiaries
+actinometer
+beckon
+clearances
+clouded
+antitoxin's
+consolation's
+collectives
+boxtops
+bombarded
+bombarding
+bluest
+allusion's
+construction
+ballpark's
+codified
+coincidence
+celebration
+chip
+beginner's
+algerian
+boo
+athletics
+condenser
+bytes
+beauties
+concerts
+conductors
+awl's
+agitations
+buttered
+codifier's
+armory
+ascii
+aspirin
+arthritis
+bylaw's
+conformity
+blasting
+coinciding
+aphid's
+ceremonial
+banisters
+bristle
+bid's
+buckboard's
+bandied
+biopsy
+ballrooms
+chloroplasts
+bidding
+boil
+algebra
+constellation
+chuck
+cringes
+cleanliness
+apron's
+cosmopolitan
+bashes
+abusive
+believer
+conductor
+butters
+breweries
+allotment
+artfulness
+bunkmates
+blares
+connections
+anticipated
+classifies
+commandments
+beginnings
+bend
+brambles
+blacked
+basketball's
+affectionate
+cocoa
+anacondas
+busing
+bone
+birchen
+creamed
+aged
+commemorates
+brother
+aberration
+crawl
+actuarial
+apology's
+alumnus
+adversary's
+anaphoric
+aspiring
+consciousness
+cokes
+assignee
+boxing
+blanched
+camels
+contemporaries
+carnivorous
+assigned
+apologetically
+corpus
+accusations
+beefing
+champaign
+claps
+adherence
+aloft
+complication
+citizenship
+becomes
+compound
+arabesque
+bronchiole's
+appraises
+breach
+collection's
+botched
+bitches
+biblically
+bronchial
+amalgamating
+commoner's
+barbarian's
+arrange
+cradle
+conformed
+complimentary
+anodes
+cowering
+anoint
+brocaded
+bedazzling
+avionics
+burnishes
+bulkhead
+chink
+consciously
+contract
+clinch
+applicant's
+awning
+aloud
+chandelier's
+cathode's
+babble
+arachnid
+biplane
+clamorous
+assuredly
+consented
+axing
+avenger
+commence
+braving
+brandishing
+careless
+burningly
+boatsman
+channelling
+clarifying
+beggar
+berates
+cite
+cowered
+buffer
+condescending
+admixes
+bettering
+bedazzlement
+cord
+burglary's
+characteristics
+aptitudes
+adieu
+agree
+bends
+ceremonies
+accustom
+accessibly
+commanders
+ask
+cavalier
+brayer
+affront
+courser
+becoming
+carves
+configures
+beasts
+biters
+conditionals
+bodybuilding
+accretions
+chapter's
+cleverer
+corning
+brat
+classes
+almsman
+consumptive
+antique
+comprised
+beholders
+anthropologically
+buns
+bridge
+accretion
+acceptance's
+confederacy
+armorer
+argumentative
+crossword
+cowslip's
+analog
+counselor
+chastised
+barters
+clerked
+americas
+cloud
+aide
+alternators
+admitters
+bagatelle
+bridges
+civilizations
+anion's
+briton's
+apartment
+acquaints
+consummation
+chord
+coated
+barer
+carnivorously
+cheering
+allergy
+capacity
+classrooms
+assistantships
+complimented
+amphibiously
+commandment's
+audiogram
+corked
+badness
+bewildered
+assemblage
+backplane's
+asterisk's
+blob
+coexisting
+approximations
+counteractive
+barns
+adherer
+aborigine's
+brooding
+conceived
+adjustor
+cabled
+belongings
+breadwinner
+blot's
+brightness
+consigning
+barflies
+bisector's
+basing
+complement
+conditioner
+brazes
+crank
+antinomian
+crowd
+accelerometers
+befitting
+backlash
+bastions
+acceleration
+briefcases
+correlated
+baffle
+chew
+accosts
+agreeably
+bassinets
+cogitate
+concerning
+contouring
+broadside
+compact
+brainstems
+atom's
+bondage
+biter
+archdioceses
+basis
+bellboy
+blobs
+barons
+clods
+campaigned
+assessors
+bubbles
+annal
+casual
+altercations
+clog's
+biased
+arianism
+ancillary
+collaborator
+butter
+bureau
+blending
+antiquities
+brands
+activism
+crews
+beats
+broad
+buds
+baggers
+cobbler's
+condemns
+cabinets
+bomber
+blinders
+center
+contacted
+bewilderingly
+circulates
+burnings
+achieved
+belch
+barbecue
+angles
+comparative
+befuddle
+cherished
+chapters
+chanter
+allegation's
+armstrong
+converter
+combinatoric
+angrier
+brooks
+clinked
+blubber
+appointments
+compactor
+cleaned
+car
+contention's
+artificial
+cramp
+consistency
+aborting
+collaboration
+awarders
+crippled
+anaphora
+creamy
+buoyed
+baptistery
+altered
+anchoring
+alterer
+adjuring
+beacon's
+commencement's
+ascension
+candidness
+clouding
+cigars
+boiled
+christmas
+contingency's
+alum
+apparel
+contributors
+anisotropic
+annotations
+bushwhacks
+brides
+continuities
+carton's
+blurred
+antibody
+aorta
+blankest
+combinator's
+banish
+breaches
+accumulates
+bowling
+braver
+antibacterial
+cooperators
+banked
+compensated
+chartable
+conjunctively
+antelope's
+bluefish
+annoying
+composed
+barges
+biconcave
+australia
+ballparks
+bearers
+acknowledged
+advocates
+crossed
+competitor
+blaming
+andorra
+baritone
+collaborator's
+accessibility
+complains
+commentator
+bibliography
+conference's
+atmosphere
+agrees
+bedstead's
+ardor
+character's
+conventionally
+arena's
+chokes
+channel
+bludgeon
+convoys
+condense
+beautifier
+ailerons
+compacts
+black
+bell
+completions
+ballroom
+besotting
+conservatives
+adventured
+bulldog's
+conversely
+arroyos
+compositional
+alternative
+association
+broods
+beefy
+consolidated
+balms
+acquaint
+animal
+certificate
+combustion
+aims
+cracker
+abetted
+cautionings
+bread
+attains
+agriculturally
+courtyards
+bawls
+country's
+creator's
+checkbook's
+cliches
+colonizing
+biennial
+aqueous
+craftsman
+contrivances
+algorithmic
+crate
+barefooted
+bodily
+anthropologist's
+but
+climate's
+campers
+crackled
+awakes
+conveyed
+borrowers
+approached
+avoids
+crib
+albania
+bathrobe
+admonitions
+architectures
+consenting
+anastomosis
+blob's
+actual
+arrowhead's
+accountable
+allegiances
+commendation
+appearers
+comply
+concurs
+controversy
+abstracting
+artifact
diff --git a/storage/bdb/test/wrap.tcl b/storage/bdb/test/wrap.tcl
new file mode 100644
index 00000000000..aaceb4f74e6
--- /dev/null
+++ b/storage/bdb/test/wrap.tcl
@@ -0,0 +1,71 @@
+# See the file LICENSE for redistribution information.
+#
+# Copyright (c) 2000-2002
+# Sleepycat Software. All rights reserved.
+#
+# $Id: wrap.tcl,v 11.6 2002/04/25 13:35:02 bostic Exp $
+#
+# Sentinel file wrapper for multi-process tests. This is designed to avoid a
+# set of nasty bugs, primarily on Windows, where pid reuse causes watch_procs
+# to sit around waiting for some random process that's not DB's and is not
+# exiting.
+
+source ./include.tcl
+source $test_path/testutils.tcl
+
+# Arguments:
+if { $argc < 3 } {
+ puts "FAIL: wrap.tcl: Usage: wrap.tcl script log scriptargs"
+ exit
+}
+
+set script [lindex $argv 0]
+set logfile [lindex $argv 1]
+set args [lrange $argv 2 end]
+
+# Create a sentinel file to mark our creation and signal that watch_procs
+# should look for us.
+set parentpid [pid]
+set parentsentinel $testdir/begin.$parentpid
+set f [open $parentsentinel w]
+close $f
+
+# Create a Tcl subprocess that will actually run the test.
+set t [open "|$tclsh_path >& $logfile" w]
+
+# Create a sentinel for the subprocess.
+set childpid [pid $t]
+puts "Script watcher process $parentpid launching $script process $childpid."
+set childsentinel $testdir/begin.$childpid
+set f [open $childsentinel w]
+close $f
+
+puts $t "source $test_path/test.tcl"
+puts $t "set script $script"
+
+# Set up argv for the subprocess, since the args aren't passed in as true
+# arguments thanks to the pipe structure.
+puts $t "set argc [llength $args]"
+puts $t "set argv [list $args]"
+
+puts $t {set ret [catch { source $test_path/$script } result]}
+puts $t {if { [string length $result] > 0 } { puts $result }}
+puts $t {error_check_good "$test_path/$script run: pid [pid]" $ret 0}
+
+# Close the pipe. This will flush the above commands and actually run the
+# test, and will also return an error a la exec if anything bad happens
+# to the subprocess. The magic here is that closing a pipe blocks
+# and waits for the exit of processes in the pipeline, at least according
+# to Ousterhout (p. 115).
+
+set ret [catch {close $t} res]
+
+# Write ending sentinel files--we're done.
+set f [open $testdir/end.$childpid w]
+close $f
+set f [open $testdir/end.$parentpid w]
+close $f
+
+error_check_good "Pipe close ($childpid: $script $argv: logfile $logfile)"\
+ $ret 0
+exit $ret