diff options
author | Oran Agra <oran@redislabs.com> | 2022-02-28 15:35:46 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2022-02-28 15:35:46 +0200 |
commit | d2b5a579dd8b785690aa7714df8776ffc452d242 (patch) | |
tree | 1c54c71bae68eaa44efbf89020d75399a88dee40 /tests | |
parent | d5915a167f696644e210ee85e549c7ceb41b5791 (diff) | |
parent | 10dc57ab226155bbdbfb0b0d914e681aa346d7de (diff) | |
download | redis-7.0-rc2.tar.gz |
Merge pull request #10355 from oranagra/release-7.0-rc27.0-rc2
Release 7.0 RC2
Diffstat (limited to 'tests')
45 files changed, 2184 insertions, 263 deletions
diff --git a/tests/cluster/tests/15-cluster-slots.tcl b/tests/cluster/tests/15-cluster-slots.tcl index f154b7270..93b64b408 100644 --- a/tests/cluster/tests/15-cluster-slots.tcl +++ b/tests/cluster/tests/15-cluster-slots.tcl @@ -49,6 +49,34 @@ test "client can handle keys with hash tag" { $cluster close } +test "slot migration is valid from primary to another primary" { + set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]] + set key order1 + set slot [$cluster cluster keyslot $key] + array set nodefrom [$cluster masternode_for_slot $slot] + array set nodeto [$cluster masternode_notfor_slot $slot] + + assert_equal {OK} [$nodefrom(link) cluster setslot $slot node $nodeto(id)] + assert_equal {OK} [$nodeto(link) cluster setslot $slot node $nodeto(id)] +} + +test "slot migration is invalid from primary to replica" { + set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]] + set key order1 + set slot [$cluster cluster keyslot $key] + array set nodefrom [$cluster masternode_for_slot $slot] + + # Get replica node serving slot. + set replicanodeinfo [$cluster cluster replicas $nodefrom(id)] + puts $replicanodeinfo + set args [split $replicanodeinfo " "] + set replicaid [lindex [split [lindex $args 0] \{] 1] + puts $replicaid + + catch {[$nodefrom(link) cluster setslot $slot node $replicaid]} err + assert_match "*Target node is not a master" $err +} + if {$::tls} { test {CLUSTER SLOTS from non-TLS client in TLS cluster} { set slots_tls [R 0 cluster slots] @@ -60,4 +88,4 @@ if {$::tls} { # Compare the ports in the first row assert_no_match [lindex $slots_tls 0 3 1] [lindex $slots_plain 0 3 1] } -} +}
\ No newline at end of file diff --git a/tests/cluster/tests/23-multiple-slot-operations.tcl b/tests/cluster/tests/23-multiple-slot-operations.tcl index 965ecd5af..060ab577a 100644 --- a/tests/cluster/tests/23-multiple-slot-operations.tcl +++ b/tests/cluster/tests/23-multiple-slot-operations.tcl @@ -33,7 +33,6 @@ set master3 [Rn 2] set master4 [Rn 3] set master5 [Rn 4] - test "Continuous slots distribution" { assert_match "* 0-3276*" [$master1 CLUSTER NODES] assert_match "* 3277-6552*" [$master2 CLUSTER NODES] @@ -66,3 +65,51 @@ test "Continuous slots distribution" { assert_match "* 13105-13500 14001-15000 16001-16383*" [$master5 CLUSTER NODES] assert_match "*13105 13500*14001 15000*16001 16383*" [$master5 CLUSTER SLOTS] } + +test "ADDSLOTSRANGE command with several boundary conditions test suite" { + # Add multiple slots with incorrect argument number + assert_error "ERR wrong number of arguments for 'cluster|addslotsrange' command" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030} + + # Add multiple slots with invalid input slot + assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 aaa} + assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 70000} + assert_error "ERR Invalid or out of range slot" {R 0 cluster ADDSLOTSRANGE 3001 3020 -1000 3030} + + # Add multiple slots when start slot number is greater than the end slot + assert_error "ERR start slot number 3030 is greater than end slot number 3025" {R 0 cluster ADDSLOTSRANGE 3001 3020 3030 3025} + + # Add multiple slots with busy slot + assert_error "ERR Slot 3200 is already busy" {R 0 cluster ADDSLOTSRANGE 3001 3020 3200 3250} + + # Add multiple slots with assigned multiple times + assert_error "ERR Slot 3001 specified multiple times" {R 0 cluster ADDSLOTSRANGE 3001 3020 3001 3020} +} + +test "DELSLOTSRANGE command with several boundary conditions test suite" { + # Delete multiple slots with incorrect argument number + assert_error "ERR wrong number of arguments for 'cluster|delslotsrange' command" {R 0 cluster DELSLOTSRANGE 1000 2000 2100} + assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES] + assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS] + + # Delete multiple slots with invalid input slot + assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 2100 aaa} + assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 2100 70000} + assert_error "ERR Invalid or out of range slot" {R 0 cluster DELSLOTSRANGE 1000 2000 -2100 2200} + assert_match "* 0-3000 3051-3276*" [$master1 CLUSTER NODES] + assert_match "*0 3000*3051 3276*" [$master1 CLUSTER SLOTS] + + # Delete multiple slots when start slot number is greater than the end slot + assert_error "ERR start slot number 5800 is greater than end slot number 5750" {R 1 cluster DELSLOTSRANGE 5600 5700 5800 5750} + assert_match "* 3277-5000 5501-6552*" [$master2 CLUSTER NODES] + assert_match "*3277 5000*5501 6552*" [$master2 CLUSTER SLOTS] + + # Delete multiple slots with already unassigned + assert_error "ERR Slot 7001 is already unassigned" {R 2 cluster DELSLOTSRANGE 7001 7100 9000 9200} + assert_match "* 6553-7000 7101-8000 8501-9828*" [$master3 CLUSTER NODES] + assert_match "*6553 7000*7101 8000*8501 9828*" [$master3 CLUSTER SLOTS] + + # Delete multiple slots with assigned multiple times + assert_error "ERR Slot 12500 specified multiple times" {R 3 cluster DELSLOTSRANGE 12500 12600 12500 12600} + assert_match "* 9829-11000 12001-12100 12201-13104*" [$master4 CLUSTER NODES] + assert_match "*9829 11000*12001 12100*12201 13104*" [$master4 CLUSTER SLOTS] +} diff --git a/tests/integration/aof-multi-part.tcl b/tests/integration/aof-multi-part.tcl index 52877404c..982b6907b 100644 --- a/tests/integration/aof-multi-part.tcl +++ b/tests/integration/aof-multi-part.tcl @@ -45,7 +45,7 @@ tags {"external:skip"} { fail "AOF loading didn't fail" } - assert_equal 1 [count_message_lines $server_path/stdout "appendonly.aof.1.incr.aof doesn't exist"] + assert_equal 1 [count_message_lines $server_path/stdout "appendonly.aof.1.incr.aof .*No such file or directory"] } clean_aof_persistence $aof_dirpath @@ -100,7 +100,7 @@ tags {"external:skip"} { fail "AOF loading didn't fail" } - assert_equal 1 [count_message_lines $server_path/stdout "The AOF manifest file is invalid format"] + assert_equal 1 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"] } clean_aof_persistence $aof_dirpath @@ -186,7 +186,7 @@ tags {"external:skip"} { fail "AOF loading didn't fail" } - assert_equal 2 [count_message_lines $server_path/stdout "The AOF manifest file is invalid format"] + assert_equal 2 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"] } clean_aof_persistence $aof_dirpath @@ -213,7 +213,7 @@ tags {"external:skip"} { fail "AOF loading didn't fail" } - assert_equal 3 [count_message_lines $server_path/stdout "The AOF manifest file is invalid format"] + assert_equal 3 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"] } clean_aof_persistence $aof_dirpath @@ -267,7 +267,7 @@ tags {"external:skip"} { fail "AOF loading didn't fail" } - assert_equal 4 [count_message_lines $server_path/stdout "The AOF manifest file is invalid format"] + assert_equal 4 [count_message_lines $server_path/stdout "Invalid AOF manifest file format"] } clean_aof_persistence $aof_dirpath @@ -584,7 +584,7 @@ tags {"external:skip"} { fail "AOF loading didn't fail" } - assert_equal 1 [count_message_lines $server_path/stdout "appendonly.aof doesn't exist"] + assert_equal 1 [count_message_lines $server_path/stdout "appendonly.aof .*No such file or directory"] } clean_aof_persistence $aof_dirpath diff --git a/tests/integration/aof.tcl b/tests/integration/aof.tcl index cab1d3745..3d8c08c51 100644 --- a/tests/integration/aof.tcl +++ b/tests/integration/aof.tcl @@ -4,6 +4,7 @@ set server_path [tmpdir server.aof] set aof_dirname "appendonlydir" set aof_basename "appendonly.aof" set aof_dirpath "$server_path/$aof_dirname" +set aof_base_file "$server_path/$aof_dirname/${aof_basename}.1$::base_aof_sufix$::aof_format_suffix" set aof_file "$server_path/$aof_dirname/${aof_basename}.1$::incr_aof_sufix$::aof_format_suffix" set aof_manifest_file "$server_path/$aof_dirname/$aof_basename$::manifest_suffix" @@ -142,7 +143,7 @@ tags {"aof external:skip"} { ## Test that redis-check-aof indeed sees this AOF is not valid test "Short read: Utility should confirm the AOF is not valid" { catch { - exec src/redis-check-aof $aof_file + exec src/redis-check-aof $aof_manifest_file } result assert_match "*not valid*" $result } @@ -154,13 +155,13 @@ tags {"aof external:skip"} { } catch { - exec src/redis-check-aof $aof_file + exec src/redis-check-aof $aof_manifest_file } result assert_match "*ok_up_to_line=8*" $result } test "Short read: Utility should be able to fix the AOF" { - set result [exec src/redis-check-aof --fix $aof_file << "y\n"] + set result [exec src/redis-check-aof --fix $aof_manifest_file << "y\n"] assert_match "*Successfully truncated AOF*" $result } @@ -444,7 +445,7 @@ tags {"aof external:skip"} { test {Truncate AOF to specific timestamp} { # truncate to timestamp 1628217473 - exec src/redis-check-aof --truncate-to-timestamp 1628217473 $aof_file + exec src/redis-check-aof --truncate-to-timestamp 1628217473 $aof_manifest_file start_server_aof [list dir $server_path] { set c [redis [dict get $srv host] [dict get $srv port] 0 $::tls] wait_done_loading $c @@ -454,7 +455,7 @@ tags {"aof external:skip"} { } # truncate to timestamp 1628217471 - exec src/redis-check-aof --truncate-to-timestamp 1628217471 $aof_file + exec src/redis-check-aof --truncate-to-timestamp 1628217471 $aof_manifest_file start_server_aof [list dir $server_path] { set c [redis [dict get $srv host] [dict get $srv port] 0 $::tls] wait_done_loading $c @@ -464,7 +465,7 @@ tags {"aof external:skip"} { } # truncate to timestamp 1628217470 - exec src/redis-check-aof --truncate-to-timestamp 1628217470 $aof_file + exec src/redis-check-aof --truncate-to-timestamp 1628217470 $aof_manifest_file start_server_aof [list dir $server_path] { set c [redis [dict get $srv host] [dict get $srv port] 0 $::tls] wait_done_loading $c @@ -473,7 +474,7 @@ tags {"aof external:skip"} { } # truncate to timestamp 1628217469 - catch {exec src/redis-check-aof --truncate-to-timestamp 1628217469 $aof_file} e + catch {exec src/redis-check-aof --truncate-to-timestamp 1628217469 $aof_manifest_file} e assert_match {*aborting*} $e } @@ -515,4 +516,120 @@ tags {"aof external:skip"} { assert_equal [r get foo] 102 } } + + test {Test redis-check-aof for old style resp AOF} { + create_aof $aof_dirpath $aof_file { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand set bar world] + } + + catch { + exec src/redis-check-aof $aof_file + } result + assert_match "*Start checking Old-Style AOF*is valid*" $result + } + + test {Test redis-check-aof for old style rdb-preamble AOF} { + catch { + exec src/redis-check-aof tests/assets/rdb-preamble.aof + } result + assert_match "*Start checking Old-Style AOF*RDB preamble is OK, proceeding with AOF tail*is valid*" $result + } + + test {Test redis-check-aof for Multi Part AOF with resp AOF base} { + create_aof $aof_dirpath $aof_base_file { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand set bar world] + } + + create_aof $aof_dirpath $aof_file { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand set bar world] + } + + create_aof_manifest $aof_dirpath $aof_manifest_file { + append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n" + append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n" + } + + catch { + exec src/redis-check-aof $aof_manifest_file + } result + assert_match "*Start checking Multi Part AOF*Start to check BASE AOF (RESP format)*BASE AOF*is valid*Start to check INCR files*INCR AOF*is valid*All AOF files and manifest are valid*" $result + } + + test {Test redis-check-aof for Multi Part AOF with rdb-preamble AOF base} { + exec cp tests/assets/rdb-preamble.aof $aof_base_file + + create_aof $aof_dirpath $aof_file { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand set bar world] + } + + create_aof_manifest $aof_dirpath $aof_manifest_file { + append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n" + append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n" + } + + catch { + exec src/redis-check-aof $aof_manifest_file + } result + assert_match "*Start checking Multi Part AOF*Start to check BASE AOF (RDB format)*DB preamble is OK, proceeding with AOF tail*BASE AOF*is valid*Start to check INCR files*INCR AOF*is valid*All AOF files and manifest are valid*" $result + } + + test {Test redis-check-aof only truncates the last file for Multi Part AOF in fix mode} { + create_aof $aof_dirpath $aof_base_file { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand multi] + append_to_aof [formatCommand set bar world] + } + + create_aof $aof_dirpath $aof_file { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand set bar world] + } + + create_aof_manifest $aof_dirpath $aof_manifest_file { + append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n" + append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n" + } + + catch { + exec src/redis-check-aof $aof_manifest_file + } result + assert_match "*not valid*" $result + + catch { + exec src/redis-check-aof --fix $aof_manifest_file + } result + assert_match "*Failed to truncate AOF*because it is not the last file*" $result + } + + test {Test redis-check-aof only truncates the last file for Multi Part AOF in truncate-to-timestamp mode} { + create_aof $aof_dirpath $aof_base_file { + append_to_aof "#TS:1628217470\r\n" + append_to_aof [formatCommand set foo1 bar1] + append_to_aof "#TS:1628217471\r\n" + append_to_aof [formatCommand set foo2 bar2] + append_to_aof "#TS:1628217472\r\n" + append_to_aof "#TS:1628217473\r\n" + append_to_aof [formatCommand set foo3 bar3] + append_to_aof "#TS:1628217474\r\n" + } + + create_aof $aof_dirpath $aof_file { + append_to_aof [formatCommand set foo hello] + append_to_aof [formatCommand set bar world] + } + + create_aof_manifest $aof_dirpath $aof_manifest_file { + append_to_manifest "file appendonly.aof.1.base.aof seq 1 type b\n" + append_to_manifest "file appendonly.aof.1.incr.aof seq 1 type i\n" + } + + catch { + exec src/redis-check-aof --truncate-to-timestamp 1628217473 $aof_manifest_file + } result + assert_match "*Failed to truncate AOF*to timestamp*because it is not the last file*" $result + } } diff --git a/tests/integration/corrupt-dump.tcl b/tests/integration/corrupt-dump.tcl index 86c7dd246..d2491306a 100644 --- a/tests/integration/corrupt-dump.tcl +++ b/tests/integration/corrupt-dump.tcl @@ -193,9 +193,8 @@ test {corrupt payload: listpack invalid size header} { test {corrupt payload: listpack too long entry len} { start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { r config set sanitize-dump-payload no - r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x55\x00\x00\x00\x0F\x00\x01\x01\x00\x01\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x89\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x88\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x40\x63\xC9\x37\x03\xA2\xE5\x68" catch { - r xinfo stream key full + r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x55\x00\x00\x00\x0F\x00\x01\x01\x00\x01\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x89\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x88\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x40\x63\xC9\x37\x03\xA2\xE5\x68" } err assert_equal [count_log_message 0 "crashed by signal"] 0 assert_equal [count_log_message 0 "ASSERTION FAILED"] 1 @@ -205,9 +204,9 @@ test {corrupt payload: listpack too long entry len} { test {corrupt payload: listpack very long entry len} { start_server [list overrides [list loglevel verbose use-exit-on-panic yes crash-memcheck-enabled no] ] { r config set sanitize-dump-payload no - r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x55\x00\x00\x00\x0F\x00\x01\x01\x00\x01\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x9C\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x63\x6F\x42\x8E\x7C\xB5\xA2\x9D" catch { - r xinfo stream key full + # This will catch migrated payloads from v6.2.x + r restore key 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x02\x40\x55\x55\x00\x00\x00\x0F\x00\x01\x01\x00\x01\x02\x01\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x00\x01\x00\x01\x00\x01\x00\x01\x02\x02\x88\x31\x00\x00\x00\x00\x00\x00\x00\x09\x88\x61\x00\x00\x00\x00\x00\x00\x00\x09\x88\x32\x00\x00\x00\x00\x00\x00\x00\x09\x9C\x62\x00\x00\x00\x00\x00\x00\x00\x09\x08\x01\xFF\x0A\x01\x00\x00\x09\x00\x63\x6F\x42\x8E\x7C\xB5\xA2\x9D" } err assert_equal [count_log_message 0 "crashed by signal"] 0 assert_equal [count_log_message 0 "ASSERTION FAILED"] 1 diff --git a/tests/integration/replication-4.tcl b/tests/integration/replication-4.tcl index 8cccd9678..b8c50308a 100644 --- a/tests/integration/replication-4.tcl +++ b/tests/integration/replication-4.tcl @@ -46,6 +46,12 @@ start_server {tags {"repl external:skip"}} { set master_port [srv -1 port] set slave [srv 0 client] + # Load some functions to be used later + $master FUNCTION load lua test replace { + redis.register_function{function_name='f_default_flags', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={}} + redis.register_function{function_name='f_no_writes', callback=function(keys, args) return redis.call('get',keys[1]) end, flags={'no-writes'}} + } + test {First server should have role slave after SLAVEOF} { $slave slaveof $master_host $master_port wait_replica_online $master @@ -54,28 +60,46 @@ start_server {tags {"repl external:skip"}} { test {With min-slaves-to-write (1,3): master should be writable} { $master config set min-slaves-max-lag 3 $master config set min-slaves-to-write 1 - $master set foo bar - } {OK} + assert_equal OK [$master set foo 123] + assert_equal OK [$master eval "return redis.call('set','foo',12345)" 0] + } test {With min-slaves-to-write (2,3): master should not be writable} { $master config set min-slaves-max-lag 3 $master config set min-slaves-to-write 2 - catch {$master set foo bar} e - set e - } {NOREPLICAS*} + assert_error "*NOREPLICAS*" {$master set foo bar} + assert_error "*NOREPLICAS*" {$master eval "redis.call('set','foo','bar')" 0} + } + + test {With min-slaves-to-write function without no-write flag} { + assert_error "*NOREPLICAS*" {$master fcall f_default_flags 1 foo} + assert_equal "12345" [$master fcall f_no_writes 1 foo] + } + + test {With not enough good slaves, read in Lua script is still accepted} { + $master config set min-slaves-max-lag 3 + $master config set min-slaves-to-write 1 + $master eval "redis.call('set','foo','bar')" 0 + + $master config set min-slaves-to-write 2 + $master eval "return redis.call('get','foo')" 0 + } {bar} test {With min-slaves-to-write: master not writable with lagged slave} { $master config set min-slaves-max-lag 2 $master config set min-slaves-to-write 1 - assert {[$master set foo bar] eq {OK}} + assert_equal OK [$master set foo 123] + assert_equal OK [$master eval "return redis.call('set','foo',12345)" 0] + # Killing a slave to make it become a lagged slave. exec kill -SIGSTOP [srv 0 pid] + # Waiting for slave kill. wait_for_condition 100 100 { - [catch {$master set foo bar}] != 0 + [catch {$master set foo 123}] != 0 } else { fail "Master didn't become readonly" } - catch {$master set foo bar} err - assert_match {NOREPLICAS*} $err + assert_error "*NOREPLICAS*" {$master set foo 123} + assert_error "*NOREPLICAS*" {$master eval "return redis.call('set','foo',12345)" 0} exec kill -SIGCONT [srv 0 pid] } } diff --git a/tests/integration/replication.tcl b/tests/integration/replication.tcl index 2af07e806..05f62d5e8 100644 --- a/tests/integration/replication.tcl +++ b/tests/integration/replication.tcl @@ -318,7 +318,7 @@ foreach mdl {no yes} { stop_write_load $load_handle4 # Make sure no more commands processed - wait_load_handlers_disconnected + wait_load_handlers_disconnected -3 wait_for_ofs_sync $master [lindex $slaves 0] wait_for_ofs_sync $master [lindex $slaves 1] @@ -428,7 +428,7 @@ foreach testType {Successful Aborted} { } else { fail "Replica didn't get into loading mode" } - + assert_equal [s -1 async_loading] 0 } @@ -444,7 +444,7 @@ foreach testType {Successful Aborted} { } else { fail "Replica didn't disconnect" } - + test {Diskless load swapdb (different replid): old database is exposed after replication fails} { # Ensure we see old values from replica assert_equal [$replica get mykey] "myvalue" @@ -551,10 +551,10 @@ foreach testType {Successful Aborted} { } else { fail "Replica didn't get into async_loading mode" } - + assert_equal [s -1 loading] 0 } - + test {Diskless load swapdb (async_loading): old database is exposed while async replication is in progress} { # Ensure we still see old values while async_loading is in progress and also not LOADING status assert_equal [$replica get mykey] "myvalue" @@ -598,7 +598,7 @@ foreach testType {Successful Aborted} { } else { fail "Replica didn't disconnect" } - + test {Diskless load swapdb (async_loading): old database is exposed after async replication fails} { # Ensure we see old values from replica assert_equal [$replica get mykey] "myvalue" @@ -1222,14 +1222,100 @@ test {replica can handle EINTR if use diskless load} { # Wait for the replica to start reading the rdb set res [wait_for_log_messages -1 {"*Loading DB in memory*"} 0 200 10] set loglines [lindex $res 1] - + # Wait till we see the watchgod log line AFTER the loading started wait_for_log_messages -1 {"*WATCHDOG TIMER EXPIRED*"} $loglines 200 10 - + # Make sure we're still loading, and that there was just one full sync attempt - assert ![log_file_matches [srv -1 stdout] "*Reconnecting to MASTER*"] + assert ![log_file_matches [srv -1 stdout] "*Reconnecting to MASTER*"] assert_equal 1 [s 0 sync_full] assert_equal 1 [s -1 loading] } } } {} {external:skip} + +start_server {tags {"repl" "external:skip"}} { + test "replica do not write the reply to the replication link - SYNC (_addReplyToBufferOrList)" { + set rd [redis_deferring_client] + set lines [count_log_lines 0] + + $rd sync + $rd ping + catch {$rd read} e + if {$::verbose} { puts "SYNC _addReplyToBufferOrList: $e" } + assert_equal "PONG" [r ping] + + # Check we got the warning logs about the PING command. + verify_log_message 0 "*Replica generated a reply to command 'ping', disconnecting it: *" $lines + + $rd close + catch {exec kill -9 [get_child_pid 0]} + waitForBgsave r + } + + test "replica do not write the reply to the replication link - SYNC (addReplyDeferredLen)" { + set rd [redis_deferring_client] + set lines [count_log_lines 0] + + $rd sync + $rd xinfo help + catch {$rd read} e + if {$::verbose} { puts "SYNC addReplyDeferredLen: $e" } + assert_equal "PONG" [r ping] + + # Check we got the warning logs about the XINFO HELP command. + verify_log_message 0 "*Replica generated a reply to command 'xinfo|help', disconnecting it: *" $lines + + $rd close + catch {exec kill -9 [get_child_pid 0]} + waitForBgsave r + } + + test "replica do not write the reply to the replication link - PSYNC (_addReplyToBufferOrList)" { + set rd [redis_deferring_client] + set lines [count_log_lines 0] + + $rd psync replicationid -1 + assert_match {FULLRESYNC * 0} [$rd read] + $rd get foo + catch {$rd read} e + if {$::verbose} { puts "PSYNC _addReplyToBufferOrList: $e" } + assert_equal "PONG" [r ping] + + # Check we got the warning logs about the GET command. + verify_log_message 0 "*Replica generated a reply to command 'get', disconnecting it: *" $lines + verify_log_message 0 "*== CRITICAL == This master is sending an error to its replica: *" $lines + verify_log_message 0 "*Replica can't interact with the keyspace*" $lines + + $rd close + catch {exec kill -9 [get_child_pid 0]} + waitForBgsave r + } + + test "replica do not write the reply to the replication link - PSYNC (addReplyDeferredLen)" { + set rd [redis_deferring_client] + set lines [count_log_lines 0] + + $rd psync replicationid -1 + assert_match {FULLRESYNC * 0} [$rd read] + $rd slowlog get + catch {$rd read} e + if {$::verbose} { puts "PSYNC addReplyDeferredLen: $e" } + assert_equal "PONG" [r ping] + + # Check we got the warning logs about the SLOWLOG GET command. + verify_log_message 0 "*Replica generated a reply to command 'slowlog|get', disconnecting it: *" $lines + + $rd close + catch {exec kill -9 [get_child_pid 0]} + waitForBgsave r + } + + test "PSYNC with wrong offset should throw error" { + # It used to accept the FULL SYNC, but also replied with an error. + assert_error {ERR value is not an integer or out of range} {r psync replicationid offset_str} + set logs [exec tail -n 100 < [srv 0 stdout]] + assert_match {*Replica * asks for synchronization but with a wrong offset} $logs + assert_equal "PONG" [r ping] + } +} diff --git a/tests/modules/Makefile b/tests/modules/Makefile index ec8e6de89..ce842f3af 100644 --- a/tests/modules/Makefile +++ b/tests/modules/Makefile @@ -2,11 +2,12 @@ # find the OS uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') +warning_cflags = -W -Wall -Wno-missing-field-initializers ifeq ($(uname_S),Darwin) - SHOBJ_CFLAGS ?= -W -Wall -dynamic -fno-common -g -ggdb -std=c99 -O2 + SHOBJ_CFLAGS ?= $(warning_cflags) -dynamic -fno-common -g -ggdb -std=c99 -O2 SHOBJ_LDFLAGS ?= -bundle -undefined dynamic_lookup else # Linux, others - SHOBJ_CFLAGS ?= -W -Wall -fno-common -g -ggdb -std=c99 -O2 + SHOBJ_CFLAGS ?= $(warning_cflags) -fno-common -g -ggdb -std=c99 -O2 SHOBJ_LDFLAGS ?= -shared endif @@ -40,6 +41,7 @@ TEST_MODULES = \ keyspace_events.so \ blockedclient.so \ getkeys.so \ + getchannels.so \ test_lazyfree.so \ timer.so \ defragtest.so \ @@ -51,6 +53,7 @@ TEST_MODULES = \ list.so \ subcommands.so \ reply.so \ + cmdintrospection.so \ eventloop.so .PHONY: all diff --git a/tests/modules/aclcheck.c b/tests/modules/aclcheck.c index cc8d263fd..0e9c9af29 100644 --- a/tests/modules/aclcheck.c +++ b/tests/modules/aclcheck.c @@ -15,11 +15,13 @@ int set_aclcheck_key(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { const char *flags = RedisModule_StringPtrLen(argv[1], NULL); if (!strcasecmp(flags, "W")) { - permissions = REDISMODULE_KEY_PERMISSION_WRITE; + permissions = REDISMODULE_CMD_KEY_UPDATE; } else if (!strcasecmp(flags, "R")) { - permissions = REDISMODULE_KEY_PERMISSION_READ; + permissions = REDISMODULE_CMD_KEY_ACCESS; } else if (!strcasecmp(flags, "*")) { - permissions = REDISMODULE_KEY_PERMISSION_ALL; + permissions = REDISMODULE_CMD_KEY_UPDATE | REDISMODULE_CMD_KEY_ACCESS; + } else if (!strcasecmp(flags, "~")) { + permissions = 0; /* Requires either read or write */ } else { RedisModule_ReplyWithError(ctx, "INVALID FLAGS"); return REDISMODULE_OK; @@ -58,7 +60,7 @@ int publish_aclcheck_channel(RedisModuleCtx *ctx, RedisModuleString **argv, int /* Check that the pubsub channel can be accessed */ RedisModuleString *user_name = RedisModule_GetCurrentUserName(ctx); RedisModuleUser *user = RedisModule_GetModuleUserFromUserName(user_name); - int ret = RedisModule_ACLCheckChannelPermissions(user, argv[1], 1); + int ret = RedisModule_ACLCheckChannelPermissions(user, argv[1], REDISMODULE_CMD_CHANNEL_SUBSCRIBE); if (ret != 0) { RedisModule_ReplyWithError(ctx, "DENIED CHANNEL"); RedisModule_FreeModuleUser(user); diff --git a/tests/modules/cmdintrospection.c b/tests/modules/cmdintrospection.c new file mode 100644 index 000000000..aba817e14 --- /dev/null +++ b/tests/modules/cmdintrospection.c @@ -0,0 +1,157 @@ +#include "redismodule.h" + +#define UNUSED(V) ((void) V) + +int cmd_xadd(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + UNUSED(argv); + UNUSED(argc); + RedisModule_ReplyWithSimpleString(ctx, "OK"); + return REDISMODULE_OK; +} + +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + if (RedisModule_Init(ctx, "cmdintrospection", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx,"cmdintrospection.xadd",cmd_xadd,"write deny-oom random fast",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + RedisModuleCommand *xadd = RedisModule_GetCommand(ctx,"cmdintrospection.xadd"); + + RedisModuleCommandInfo info = { + .version = REDISMODULE_COMMAND_INFO_VERSION, + .arity = -5, + .summary = "Appends a new entry to a stream", + .since = "5.0.0", + .complexity = "O(1) when adding a new entry, O(N) when trimming where N being the number of entries evicted.", + .tips = "nondeterministic_output", + .history = (RedisModuleCommandHistoryEntry[]){ + /* NOTE: All versions specified should be the module's versions, not + * Redis'! We use Redis versions in this example for the purpose of + * testing (comparing the output with the output of the vanilla + * XADD). */ + {"6.2.0", "Added the `NOMKSTREAM` option, `MINID` trimming strategy and the `LIMIT` option."}, + {"7.0.0", "Added support for the `<ms>-*` explicit ID form."}, + {0} + }, + .key_specs = (RedisModuleCommandKeySpec[]){ + { + .notes = "UPDATE instead of INSERT because of the optional trimming feature", + .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 1, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {0,1,0} + }, + {0} + }, + .args = (RedisModuleCommandArg[]){ + { + .name = "key", + .type = REDISMODULE_ARG_TYPE_KEY, + .key_spec_index = 0 + }, + { + .name = "nomkstream", + .type = REDISMODULE_ARG_TYPE_PURE_TOKEN, + .token = "NOMKSTREAM", + .since = "6.2.0", + .flags = REDISMODULE_CMD_ARG_OPTIONAL + }, + { + .name = "trim", + .type = REDISMODULE_ARG_TYPE_BLOCK, + .flags = REDISMODULE_CMD_ARG_OPTIONAL, + .subargs = (RedisModuleCommandArg[]){ + { + .name = "strategy", + .type = REDISMODULE_ARG_TYPE_ONEOF, + .subargs = (RedisModuleCommandArg[]){ + { + .name = "maxlen", + .type = REDISMODULE_ARG_TYPE_PURE_TOKEN, + .token = "MAXLEN", + }, + { + .name = "minid", + .type = REDISMODULE_ARG_TYPE_PURE_TOKEN, + .token = "MINID", + .since = "6.2.0", + }, + {0} + } + }, + { + .name = "operator", + .type = REDISMODULE_ARG_TYPE_ONEOF, + .flags = REDISMODULE_CMD_ARG_OPTIONAL, + .subargs = (RedisModuleCommandArg[]){ + { + .name = "equal", + .type = REDISMODULE_ARG_TYPE_PURE_TOKEN, + .token = "=" + }, + { + .name = "approximately", + .type = REDISMODULE_ARG_TYPE_PURE_TOKEN, + .token = "~" + }, + {0} + } + }, + { + .name = "threshold", + .type = REDISMODULE_ARG_TYPE_STRING, + }, + { + .name = "count", + .type = REDISMODULE_ARG_TYPE_INTEGER, + .token = "LIMIT", + .since = "6.2.0", + .flags = REDISMODULE_CMD_ARG_OPTIONAL + }, + {0} + } + }, + { + .name = "id_or_auto", + .type = REDISMODULE_ARG_TYPE_ONEOF, + .subargs = (RedisModuleCommandArg[]){ + { + .name = "auto_id", + .type = REDISMODULE_ARG_TYPE_PURE_TOKEN, + .token = "*" + }, + { + .name = "id", + .type = REDISMODULE_ARG_TYPE_STRING, + }, + {0} + } + }, + { + .name = "field_value", + .type = REDISMODULE_ARG_TYPE_BLOCK, + .flags = REDISMODULE_CMD_ARG_MULTIPLE, + .subargs = (RedisModuleCommandArg[]){ + { + .name = "field", + .type = REDISMODULE_ARG_TYPE_STRING, + }, + { + .name = "value", + .type = REDISMODULE_ARG_TYPE_STRING, + }, + {0} + } + }, + {0} + } + }; + if (RedisModule_SetCommandInfo(xadd, &info) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + return REDISMODULE_OK; +} diff --git a/tests/modules/getchannels.c b/tests/modules/getchannels.c new file mode 100644 index 000000000..330531d1a --- /dev/null +++ b/tests/modules/getchannels.c @@ -0,0 +1,69 @@ +#include "redismodule.h" +#include <strings.h> +#include <assert.h> +#include <unistd.h> +#include <errno.h> + +/* A sample with declarable channels, that are used to validate against ACLs */ +int getChannels_subscribe(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + if ((argc - 1) % 3 != 0) { + RedisModule_WrongArity(ctx); + return REDISMODULE_OK; + } + char *err = NULL; + + /* getchannels.command [[subscribe|unsubscribe|publish] [pattern|literal] <channel> ...] + * This command marks the given channel is accessed based on the + * provided modifiers. */ + for (int i = 1; i < argc; i += 3) { + const char *operation = RedisModule_StringPtrLen(argv[i], NULL); + const char *type = RedisModule_StringPtrLen(argv[i+1], NULL); + int flags = 0; + + if (!strcasecmp(operation, "subscribe")) { + flags |= REDISMODULE_CMD_CHANNEL_SUBSCRIBE; + } else if (!strcasecmp(operation, "unsubscribe")) { + flags |= REDISMODULE_CMD_CHANNEL_UNSUBSCRIBE; + } else if (!strcasecmp(operation, "publish")) { + flags |= REDISMODULE_CMD_CHANNEL_PUBLISH; + } else { + err = "Invalid channel operation"; + break; + } + + if (!strcasecmp(type, "literal")) { + /* No op */ + } else if (!strcasecmp(type, "pattern")) { + flags |= REDISMODULE_CMD_CHANNEL_PATTERN; + } else { + err = "Invalid channel type"; + break; + } + if (RedisModule_IsChannelsPositionRequest(ctx)) { + RedisModule_ChannelAtPosWithFlags(ctx, i+2, flags); + } + } + + if (!RedisModule_IsChannelsPositionRequest(ctx)) { + if (err) { + RedisModule_ReplyWithError(ctx, err); + } else { + /* Normal implementation would go here, but for tests just return okay */ + RedisModule_ReplyWithSimpleString(ctx, "OK"); + } + } + + return REDISMODULE_OK; +} + +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); + if (RedisModule_Init(ctx, "getchannels", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + if (RedisModule_CreateCommand(ctx, "getchannels.command", getChannels_subscribe, "getchannels-api", 0, 0, 0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + return REDISMODULE_OK; +} diff --git a/tests/modules/getkeys.c b/tests/modules/getkeys.c index acb8a1295..cee3b3e13 100644 --- a/tests/modules/getkeys.c +++ b/tests/modules/getkeys.c @@ -44,6 +44,40 @@ int getkeys_command(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) return REDISMODULE_OK; } +int getkeys_command_with_flags(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) +{ + int i; + int count = 0; + + /* Handle getkeys-api introspection */ + if (RedisModule_IsKeysPositionRequest(ctx)) { + for (i = 0; i < argc; i++) { + size_t len; + const char *str = RedisModule_StringPtrLen(argv[i], &len); + + if (len == 3 && !strncasecmp(str, "key", 3) && i + 1 < argc) + RedisModule_KeyAtPosWithFlags(ctx, i + 1, REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS); + } + + return REDISMODULE_OK; + } + + /* Handle real command invocation */ + RedisModule_ReplyWithArray(ctx, REDISMODULE_POSTPONED_LEN); + for (i = 0; i < argc; i++) { + size_t len; + const char *str = RedisModule_StringPtrLen(argv[i], &len); + + if (len == 3 && !strncasecmp(str, "key", 3) && i + 1 < argc) { + RedisModule_ReplyWithString(ctx, argv[i+1]); + count++; + } + } + RedisModule_ReplySetArrayLength(ctx, count); + + return REDISMODULE_OK; +} + int getkeys_fixed(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { int i; @@ -57,19 +91,22 @@ int getkeys_fixed(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) /* Introspect a command using RM_GetCommandKeys() and returns the list * of keys. Essentially this is COMMAND GETKEYS implemented in a module. + * INTROSPECT <with-flags> <cmd> <args> */ int getkeys_introspect(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { - UNUSED(argv); - UNUSED(argc); + long long with_flags = 0; - if (argc < 3) { + if (argc < 4) { RedisModule_WrongArity(ctx); return REDISMODULE_OK; } - int num_keys; - int *keyidx = RedisModule_GetCommandKeys(ctx, &argv[1], argc - 1, &num_keys); + if (RedisModule_StringToLongLong(argv[1],&with_flags) != REDISMODULE_OK) + return RedisModule_ReplyWithError(ctx,"ERR invalid integer"); + + int num_keys, *keyflags = NULL; + int *keyidx = RedisModule_GetCommandKeysWithFlags(ctx, &argv[2], argc - 2, &num_keys, with_flags ? &keyflags : NULL); if (!keyidx) { if (!errno) @@ -93,10 +130,27 @@ int getkeys_introspect(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) int i; RedisModule_ReplyWithArray(ctx, num_keys); - for (i = 0; i < num_keys; i++) - RedisModule_ReplyWithString(ctx, argv[1 + keyidx[i]]); + for (i = 0; i < num_keys; i++) { + if (!with_flags) { + RedisModule_ReplyWithString(ctx, argv[2 + keyidx[i]]); + continue; + } + RedisModule_ReplyWithArray(ctx, 2); + RedisModule_ReplyWithString(ctx, argv[2 + keyidx[i]]); + char* sflags = ""; + if (keyflags[i] & REDISMODULE_CMD_KEY_RO) + sflags = "RO"; + else if (keyflags[i] & REDISMODULE_CMD_KEY_RW) + sflags = "RW"; + else if (keyflags[i] & REDISMODULE_CMD_KEY_OW) + sflags = "OW"; + else if (keyflags[i] & REDISMODULE_CMD_KEY_RM) + sflags = "RM"; + RedisModule_ReplyWithCString(ctx, sflags); + } RedisModule_Free(keyidx); + RedisModule_Free(keyflags); } return REDISMODULE_OK; @@ -111,6 +165,9 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) if (RedisModule_CreateCommand(ctx,"getkeys.command", getkeys_command,"getkeys-api",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx,"getkeys.command_with_flags", getkeys_command_with_flags,"getkeys-api",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + if (RedisModule_CreateCommand(ctx,"getkeys.fixed", getkeys_fixed,"",2,4,1) == REDISMODULE_ERR) return REDISMODULE_ERR; diff --git a/tests/modules/keyspecs.c b/tests/modules/keyspecs.c index 18291019d..32a6bebaa 100644 --- a/tests/modules/keyspecs.c +++ b/tests/modules/keyspecs.c @@ -1,116 +1,185 @@ #include "redismodule.h" -#ifdef INCLUDE_UNRELEASED_KEYSPEC_API #define UNUSED(V) ((void) V) -int kspec_legacy(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { +/* This function implements all commands in this module. All we care about is + * the COMMAND metadata anyway. */ +int kspec_impl(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { UNUSED(argv); UNUSED(argc); RedisModule_ReplyWithSimpleString(ctx, "OK"); return REDISMODULE_OK; } -int kspec_complex1(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { - UNUSED(argv); - UNUSED(argc); - RedisModule_ReplyWithSimpleString(ctx, "OK"); - return REDISMODULE_OK; -} - -int kspec_complex2(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { - UNUSED(argv); - UNUSED(argc); - RedisModule_ReplyWithSimpleString(ctx, "OK"); +int createKspecNone(RedisModuleCtx *ctx) { + /* A command without keyspecs; only the legacy (first,last,step) triple (MSET like spec). */ + if (RedisModule_CreateCommand(ctx,"kspec.none",kspec_impl,"",1,-1,2) == REDISMODULE_ERR) + return REDISMODULE_ERR; return REDISMODULE_OK; } -int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { - REDISMODULE_NOT_USED(argv); - REDISMODULE_NOT_USED(argc); - - int spec_id; - - if (RedisModule_Init(ctx, "keyspecs", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR) +int createKspecTwoRanges(RedisModuleCtx *ctx) { + /* Test that two position/range-based key specs are combined to produce the + * legacy (first,last,step) values representing both keys. */ + if (RedisModule_CreateCommand(ctx,"kspec.tworanges",kspec_impl,"",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.tworanges"); + RedisModuleCommandInfo info = { + .version = REDISMODULE_COMMAND_INFO_VERSION, + .arity = -2, + .key_specs = (RedisModuleCommandKeySpec[]){ + { + .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 1, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {0,1,0} + }, + { + .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 2, + /* Omitted find_keys_type is shorthand for RANGE {0,1,0} */ + }, + {0} + } + }; + if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR) return REDISMODULE_ERR; - /* Test legacy range "gluing" */ - if (RedisModule_CreateCommand(ctx,"kspec.legacy",kspec_legacy,"",0,0,0) == REDISMODULE_ERR) - return REDISMODULE_ERR; - RedisModuleCommand *legacy = RedisModule_GetCommand(ctx,"kspec.legacy"); + return REDISMODULE_OK; +} - if (RedisModule_AddCommandKeySpec(legacy,"RO ACCESS",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchIndex(legacy,spec_id,1) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(legacy,spec_id,0,1,0) == REDISMODULE_ERR) +int createKspecKeyword(RedisModuleCtx *ctx) { + /* Only keyword-based specs. The legacy triple is wiped and set to (0,0,0). */ + if (RedisModule_CreateCommand(ctx,"kspec.keyword",kspec_impl,"",3,-1,1) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.keyword"); + RedisModuleCommandInfo info = { + .version = REDISMODULE_COMMAND_INFO_VERSION, + .key_specs = (RedisModuleCommandKeySpec[]){ + { + .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS, + .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD, + .bs.keyword.keyword = "KEYS", + .bs.keyword.startfrom = 1, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {-1,1,0} + }, + {0} + } + }; + if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR) return REDISMODULE_ERR; - if (RedisModule_AddCommandKeySpec(legacy,"RW UPDATE",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchIndex(legacy,spec_id,2) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(legacy,spec_id,0,1,0) == REDISMODULE_ERR) - return REDISMODULE_ERR; + return REDISMODULE_OK; +} - /* First is legacy, rest are new specs */ - if (RedisModule_CreateCommand(ctx,"kspec.complex1",kspec_complex1,"",1,1,1) == REDISMODULE_ERR) +int createKspecComplex1(RedisModuleCtx *ctx) { + /* First is a range a single key. The rest are keyword-based specs. */ + if (RedisModule_CreateCommand(ctx,"kspec.complex1",kspec_impl,"",1,1,1) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.complex1"); + RedisModuleCommandInfo info = { + .version = REDISMODULE_COMMAND_INFO_VERSION, + .key_specs = (RedisModuleCommandKeySpec[]){ + { + .flags = REDISMODULE_CMD_KEY_RO, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 1, + }, + { + .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE, + .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD, + .bs.keyword.keyword = "STORE", + .bs.keyword.startfrom = 2, + }, + { + .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS, + .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD, + .bs.keyword.keyword = "KEYS", + .bs.keyword.startfrom = 2, + .find_keys_type = REDISMODULE_KSPEC_FK_KEYNUM, + .fk.keynum = {0,1,1} + }, + {0} + } + }; + if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR) return REDISMODULE_ERR; - RedisModuleCommand *complex1 = RedisModule_GetCommand(ctx,"kspec.complex1"); - if (RedisModule_AddCommandKeySpec(complex1,"RW UPDATE",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchKeyword(complex1,spec_id,"STORE",2) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(complex1,spec_id,0,1,0) == REDISMODULE_ERR) - return REDISMODULE_ERR; - - if (RedisModule_AddCommandKeySpec(complex1,"RO ACCESS",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchKeyword(complex1,spec_id,"KEYS",2) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysKeynum(complex1,spec_id,0,1,1) == REDISMODULE_ERR) - return REDISMODULE_ERR; + return REDISMODULE_OK; +} +int createKspecComplex2(RedisModuleCtx *ctx) { /* First is not legacy, more than STATIC_KEYS_SPECS_NUM specs */ - if (RedisModule_CreateCommand(ctx,"kspec.complex2",kspec_complex2,"",0,0,0) == REDISMODULE_ERR) - return REDISMODULE_ERR; - RedisModuleCommand *complex2 = RedisModule_GetCommand(ctx,"kspec.complex2"); - - if (RedisModule_AddCommandKeySpec(complex2,"RW UPDATE",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchKeyword(complex2,spec_id,"STORE",5) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(complex2,spec_id,0,1,0) == REDISMODULE_ERR) - return REDISMODULE_ERR; - - if (RedisModule_AddCommandKeySpec(complex2,"RO ACCESS",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchIndex(complex2,spec_id,1) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(complex2,spec_id,0,1,0) == REDISMODULE_ERR) + if (RedisModule_CreateCommand(ctx,"kspec.complex2",kspec_impl,"",0,0,0) == REDISMODULE_ERR) + return REDISMODULE_ERR; + + RedisModuleCommand *command = RedisModule_GetCommand(ctx,"kspec.complex2"); + RedisModuleCommandInfo info = { + .version = REDISMODULE_COMMAND_INFO_VERSION, + .key_specs = (RedisModuleCommandKeySpec[]){ + { + .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE, + .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD, + .bs.keyword.keyword = "STORE", + .bs.keyword.startfrom = 5, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {0,1,0} + }, + { + .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 1, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {0,1,0} + }, + { + .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 2, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {0,1,0} + }, + { + .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 3, + .find_keys_type = REDISMODULE_KSPEC_FK_KEYNUM, + .fk.keynum = {0,1,1} + }, + { + .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE, + .begin_search_type = REDISMODULE_KSPEC_BS_KEYWORD, + .bs.keyword.keyword = "MOREKEYS", + .bs.keyword.startfrom = 5, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {-1,1,0} + }, + {0} + } + }; + if (RedisModule_SetCommandInfo(command, &info) == REDISMODULE_ERR) return REDISMODULE_ERR; - if (RedisModule_AddCommandKeySpec(complex2,"RO ACCESS",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchIndex(complex2,spec_id,2) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(complex2,spec_id,0,1,0) == REDISMODULE_ERR) - return REDISMODULE_ERR; + return REDISMODULE_OK; +} - if (RedisModule_AddCommandKeySpec(complex2,"RW UPDATE",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchIndex(complex2,spec_id,3) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysKeynum(complex2,spec_id,0,1,1) == REDISMODULE_ERR) - return REDISMODULE_ERR; +int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) { + REDISMODULE_NOT_USED(argv); + REDISMODULE_NOT_USED(argc); - if (RedisModule_AddCommandKeySpec(complex2,"RW UPDATE",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchKeyword(complex2,spec_id,"MOREKEYS",5) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(complex2,spec_id,-1,1,0) == REDISMODULE_ERR) + if (RedisModule_Init(ctx, "keyspecs", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (createKspecNone(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (createKspecTwoRanges(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (createKspecKeyword(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (createKspecComplex1(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR; + if (createKspecComplex2(ctx) == REDISMODULE_ERR) return REDISMODULE_ERR; return REDISMODULE_OK; } -#endif diff --git a/tests/modules/subcommands.c b/tests/modules/subcommands.c index 51a760c9c..7cb337331 100644 --- a/tests/modules/subcommands.c +++ b/tests/modules/subcommands.c @@ -29,11 +29,7 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) REDISMODULE_NOT_USED(argv); REDISMODULE_NOT_USED(argc); -#ifdef INCLUDE_UNRELEASED_KEYSPEC_API - int spec_id; -#endif - - if (RedisModule_Init(ctx, "subcommands", 1, REDISMODULE_APIVER_1)== REDISMODULE_ERR) + if (RedisModule_Init(ctx, "subcommands", 1, REDISMODULE_APIVER_1) == REDISMODULE_ERR) return REDISMODULE_ERR; if (RedisModule_CreateCommand(ctx,"subcommands.bitarray",NULL,"",0,0,0) == REDISMODULE_ERR) @@ -43,28 +39,40 @@ int RedisModule_OnLoad(RedisModuleCtx *ctx, RedisModuleString **argv, int argc) if (RedisModule_CreateSubcommand(parent,"set",cmd_set,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; RedisModuleCommand *subcmd = RedisModule_GetCommand(ctx,"subcommands.bitarray|set"); - -#ifdef INCLUDE_UNRELEASED_KEYSPEC_API - if (RedisModule_AddCommandKeySpec(subcmd,"RW UPDATE",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchIndex(subcmd,spec_id,1) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(subcmd,spec_id,0,1,0) == REDISMODULE_ERR) + RedisModuleCommandInfo cmd_set_info = { + .version = REDISMODULE_COMMAND_INFO_VERSION, + .key_specs = (RedisModuleCommandKeySpec[]){ + { + .flags = REDISMODULE_CMD_KEY_RW | REDISMODULE_CMD_KEY_UPDATE, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 1, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {0,1,0} + }, + {0} + } + }; + if (RedisModule_SetCommandInfo(subcmd, &cmd_set_info) == REDISMODULE_ERR) return REDISMODULE_ERR; -#endif if (RedisModule_CreateSubcommand(parent,"get",cmd_get,"",0,0,0) == REDISMODULE_ERR) return REDISMODULE_ERR; subcmd = RedisModule_GetCommand(ctx,"subcommands.bitarray|get"); - -#ifdef INCLUDE_UNRELEASED_KEYSPEC_API - if (RedisModule_AddCommandKeySpec(subcmd,"RO ACCESS",&spec_id) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecBeginSearchIndex(subcmd,spec_id,1) == REDISMODULE_ERR) - return REDISMODULE_ERR; - if (RedisModule_SetCommandKeySpecFindKeysRange(subcmd,spec_id,0,1,0) == REDISMODULE_ERR) + RedisModuleCommandInfo cmd_get_info = { + .version = REDISMODULE_COMMAND_INFO_VERSION, + .key_specs = (RedisModuleCommandKeySpec[]){ + { + .flags = REDISMODULE_CMD_KEY_RO | REDISMODULE_CMD_KEY_ACCESS, + .begin_search_type = REDISMODULE_KSPEC_BS_INDEX, + .bs.index.pos = 1, + .find_keys_type = REDISMODULE_KSPEC_FK_RANGE, + .fk.range = {0,1,0} + }, + {0} + } + }; + if (RedisModule_SetCommandInfo(subcmd, &cmd_get_info) == REDISMODULE_ERR) return REDISMODULE_ERR; -#endif /* Get the name of the command currently running. */ if (RedisModule_CreateCommand(ctx,"subcommands.parent_get_fullname",cmd_get_fullname,"",0,0,0) == REDISMODULE_ERR) diff --git a/tests/sentinel/tests/03-runtime-reconf.tcl b/tests/sentinel/tests/03-runtime-reconf.tcl index ad9284e41..3e930646a 100644 --- a/tests/sentinel/tests/03-runtime-reconf.tcl +++ b/tests/sentinel/tests/03-runtime-reconf.tcl @@ -44,5 +44,22 @@ test "Sentinel Set with other error situations" { # unknown parameter option assert_error "ERR Unknown option or number of arguments for SENTINEL SET 'fakeoption'" {S 0 SENTINEL SET mymaster fakeoption fakevalue} -} + # save new config to disk failed + set info [S 0 SENTINEL master mymaster] + set origin_quorum [dict get $info quorum] + set update_quorum [expr $origin_quorum+1] + set sentinel_id 0 + set configfilename [file join "sentinel_$sentinel_id" "sentinel.conf"] + set configfilename_bak [file join "sentinel_$sentinel_id" "sentinel.conf.bak"] + + file rename $configfilename $configfilename_bak + file mkdir $configfilename + + catch {[S 0 SENTINEL SET mymaster quorum $update_quorum]} err + + file delete $configfilename + file rename $configfilename_bak $configfilename + + assert_match "ERR Failed to save config file*" $err +} diff --git a/tests/sentinel/tests/13-info-command.tcl b/tests/sentinel/tests/13-info-command.tcl new file mode 100644 index 000000000..ef9dc0113 --- /dev/null +++ b/tests/sentinel/tests/13-info-command.tcl @@ -0,0 +1,47 @@ +source "../tests/includes/init-tests.tcl" + +test "info command with at most one argument" { + set subCommandList {} + foreach arg {"" "all" "default" "everything"} { + if {$arg == ""} { + set info [S 0 info] + } else { + set info [S 0 info $arg] + } + assert { [string match "*redis_version*" $info] } + assert { [string match "*maxclients*" $info] } + assert { [string match "*used_cpu_user*" $info] } + assert { [string match "*sentinel_tilt*" $info] } + assert { ![string match "*used_memory*" $info] } + assert { ![string match "*rdb_last_bgsave*" $info] } + assert { ![string match "*master_repl_offset*" $info] } + assert { ![string match "*cluster_enabled*" $info] } + } +} + +test "info command with one sub-section" { + set info [S 0 info cpu] + assert { [string match "*used_cpu_user*" $info] } + assert { ![string match "*sentinel_tilt*" $info] } + assert { ![string match "*redis_version*" $info] } + + set info [S 0 info sentinel] + assert { [string match "*sentinel_tilt*" $info] } + assert { ![string match "*used_cpu_user*" $info] } + assert { ![string match "*redis_version*" $info] } +} + +test "info command with multiple sub-sections" { + set info [S 0 info server sentinel replication] + assert { [string match "*redis_version*" $info] } + assert { [string match "*sentinel_tilt*" $info] } + assert { ![string match "*used_memory*" $info] } + assert { ![string match "*used_cpu_user*" $info] } + + set info [S 0 info cpu all] + assert { [string match "*used_cpu_user*" $info] } + assert { [string match "*sentinel_tilt*" $info] } + assert { [string match "*redis_version*" $info] } + assert { ![string match "*used_memory*" $info] } + assert { ![string match "*master_repl_offset*" $info] } +} diff --git a/tests/support/redis.tcl b/tests/support/redis.tcl index 2c89de6ce..5743be5f4 100644 --- a/tests/support/redis.tcl +++ b/tests/support/redis.tcl @@ -35,6 +35,7 @@ array set ::redis::addr {} array set ::redis::blocking {} array set ::redis::deferred {} array set ::redis::readraw {} +array set ::redis::attributes {} ;# Holds the RESP3 attributes from the last call array set ::redis::reconnect {} array set ::redis::tls {} array set ::redis::callback {} @@ -105,6 +106,7 @@ proc ::redis::__dispatch__raw__ {id method argv} { set argv [lrange $argv 0 end-1] } if {[info command ::redis::__method__$method] eq {}} { + catch {unset ::redis::attributes($id)} set cmd "*[expr {[llength $argv]+1}]\r\n" append cmd "$[string length $method]\r\n$method\r\n" foreach a $argv { @@ -165,6 +167,7 @@ proc ::redis::__method__close {id fd} { catch {unset ::redis::blocking($id)} catch {unset ::redis::deferred($id)} catch {unset ::redis::readraw($id)} + catch {unset ::redis::attributes($id)} catch {unset ::redis::reconnect($id)} catch {unset ::redis::tls($id)} catch {unset ::redis::state($id)} @@ -185,6 +188,10 @@ proc ::redis::__method__readraw {id fd val} { set ::redis::readraw($id) $val } +proc ::redis::__method__attributes {id fd} { + set _ $::redis::attributes($id) +} + proc ::redis::redis_write {fd buf} { puts -nonewline $fd $buf } @@ -286,8 +293,8 @@ proc ::redis::redis_read_reply {id fd} { * {return [redis_multi_bulk_read $id $fd]} % {return [redis_read_map $id $fd]} | { - # ignore attributes for now (nowhere to store them) - redis_read_map $id $fd + set attrib [redis_read_map $id $fd] + set ::redis::attributes($id) $attrib continue } default { diff --git a/tests/support/test.tcl b/tests/support/test.tcl index db3a81e06..f5de12256 100644 --- a/tests/support/test.tcl +++ b/tests/support/test.tcl @@ -24,10 +24,10 @@ proc assert_no_match {pattern value} { } } -proc assert_match {pattern value} { +proc assert_match {pattern value {detail ""}} { if {![string match $pattern $value]} { set context "(context: [info frame -1])" - error "assertion:Expected '$value' to match '$pattern' $context" + error "assertion:Expected '$value' to match '$pattern' $context $detail" } } @@ -84,9 +84,9 @@ proc assert_range {value min max {detail ""}} { proc assert_error {pattern code {detail ""}} { if {[catch {uplevel 1 $code} error]} { - assert_match $pattern $error + assert_match $pattern $error $detail } else { - assert_failed "assertion:Expected an error but nothing was caught" $detail + assert_failed "Expected an error matching '$pattern' but got '$error'" $detail } } diff --git a/tests/support/util.tcl b/tests/support/util.tcl index 5fc319254..46c9654c8 100644 --- a/tests/support/util.tcl +++ b/tests/support/util.tcl @@ -89,7 +89,7 @@ proc waitForBgsave r { puts -nonewline "\nWaiting for background save to finish... " flush stdout } - after 1000 + after 50 } else { break } @@ -103,7 +103,7 @@ proc waitForBgrewriteaof r { puts -nonewline "\nWaiting for background AOF rewrite to finish... " flush stdout } - after 1000 + after 50 } else { break } @@ -647,7 +647,7 @@ proc latencyrstat_percentiles {cmd r} { proc generate_fuzzy_traffic_on_key {key duration} { # Commands per type, blocking commands removed - # TODO: extract these from help.h or elsewhere, and improve to include other types + # TODO: extract these from COMMAND DOCS, and improve to include other types set string_commands {APPEND BITCOUNT BITFIELD BITOP BITPOS DECR DECRBY GET GETBIT GETRANGE GETSET INCR INCRBY INCRBYFLOAT MGET MSET MSETNX PSETEX SET SETBIT SETEX SETNX SETRANGE LCS STRLEN} set hash_commands {HDEL HEXISTS HGET HGETALL HINCRBY HINCRBYFLOAT HKEYS HLEN HMGET HMSET HSCAN HSET HSETNX HSTRLEN HVALS HRANDFIELD} set zset_commands {ZADD ZCARD ZCOUNT ZINCRBY ZINTERSTORE ZLEXCOUNT ZPOPMAX ZPOPMIN ZRANGE ZRANGEBYLEX ZRANGEBYSCORE ZRANK ZREM ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE ZREVRANGE ZREVRANGEBYLEX ZREVRANGEBYSCORE ZREVRANK ZSCAN ZSCORE ZUNIONSTORE ZRANDMEMBER} diff --git a/tests/test_helper.tcl b/tests/test_helper.tcl index 1a5096937..277fa3803 100644 --- a/tests/test_helper.tcl +++ b/tests/test_helper.tcl @@ -20,6 +20,7 @@ set ::all_tests { unit/keyspace unit/scan unit/info + unit/info-command unit/type/string unit/type/incr unit/type/list @@ -92,6 +93,7 @@ set ::all_tests { unit/cluster unit/client-eviction unit/violations + unit/replybufsize } # Index to the next test to run in the ::all_tests list. set ::next_test 0 diff --git a/tests/unit/acl.tcl b/tests/unit/acl.tcl index 494c3847e..0a9ffb250 100644 --- a/tests/unit/acl.tcl +++ b/tests/unit/acl.tcl @@ -7,6 +7,11 @@ start_server {tags {"acl external:skip"}} { r ACL setuser newuser } + test {Usernames can not contain spaces or null characters} { + catch {r ACL setuser "a a"} err + set err + } {*Usernames can't contain spaces or null characters*} + test {New users start disabled} { r ACL setuser newuser >passwd1 catch {r AUTH newuser passwd1} err @@ -699,6 +704,23 @@ start_server {tags {"acl external:skip"}} { catch {[r ping]} e assert_match "*I/O error*" $e } + + test {ACL GENPASS command failed test} { + catch {r ACL genpass -236} err1 + catch {r ACL genpass 5000} err2 + assert_match "*ACL GENPASS argument must be the number*" $err1 + assert_match "*ACL GENPASS argument must be the number*" $err2 + } + + test {Default user can not be removed} { + catch {r ACL deluser default} err + set err + } {ERR The 'default' user cannot be removed} + + test {ACL load non-existing configured ACL file} { + catch {r ACL load} err + set err + } {*Redis instance is not configured to use an ACL file*} } set server_path [tmpdir "server.acl"] diff --git a/tests/unit/auth.tcl b/tests/unit/auth.tcl index 6fa5e0c13..4a4d7564c 100644 --- a/tests/unit/auth.tcl +++ b/tests/unit/auth.tcl @@ -3,6 +3,11 @@ start_server {tags {"auth external:skip"}} { catch {r auth foo} err set _ $err } {ERR*any password*} + + test {Arity check for auth command} { + catch {r auth a b c} err + set _ $err + } {*syntax error*} } start_server {tags {"auth external:skip"} overrides {requirepass foobar}} { diff --git a/tests/unit/client-eviction.tcl b/tests/unit/client-eviction.tcl index 0b1b8b281..949ac8f3d 100644 --- a/tests/unit/client-eviction.tcl +++ b/tests/unit/client-eviction.tcl @@ -45,6 +45,10 @@ proc mb {v} { return [expr $v * 1024 * 1024] } +proc kb {v} { + return [expr $v * 1024] +} + start_server {} { set maxmemory_clients 3000000 r config set maxmemory-clients $maxmemory_clients @@ -213,7 +217,7 @@ start_server {} { r debug pause-cron 0 $rr close $redirected_c close - } + } {0} {needs:debug} test "client evicted due to client tracking prefixes" { r flushdb @@ -391,6 +395,7 @@ start_server {} { test "evict clients only until below limit" { set client_count 10 set client_mem [mb 1] + r debug replybuffer-peak-reset-time never r config set maxmemory-clients 0 r client setname control r client no-evict on @@ -433,19 +438,23 @@ start_server {} { set connected_clients [llength [lsearch -all [split [string trim [r client list]] "\r\n"] *name=client*]] assert {$connected_clients == [expr $client_count / 2]} + # Restore the peak reset time to default + r debug replybuffer-peak-reset-time reset + foreach rr $rrs {$rr close} - } + } {} {needs:debug} } start_server {} { test "evict clients in right order (large to small)" { # Note that each size step needs to be at least x2 larger than previous step # because of how the client-eviction size bucktting works - set sizes [list 100000 [mb 1] [mb 3]] + set sizes [list [kb 128] [mb 1] [mb 3]] set clients_per_size 3 r client setname control r client no-evict on r config set maxmemory-clients 0 + r debug replybuffer-peak-reset-time never # Run over all sizes and create some clients using up that size set total_client_mem 0 @@ -470,7 +479,6 @@ start_server {} { # Account total client memory usage incr total_mem [expr $clients_per_size * $client_mem] } - incr total_mem [client_field control tot-mem] # Make sure all clients are connected set clients [split [string trim [r client list]] "\r\n"] @@ -481,8 +489,9 @@ start_server {} { # For each size reduce maxmemory-clients so relevant clients should be evicted # do this from largest to smallest foreach size [lreverse $sizes] { + set control_mem [client_field control tot-mem] set total_mem [expr $total_mem - $clients_per_size * $size] - r config set maxmemory-clients $total_mem + r config set maxmemory-clients [expr $total_mem + $control_mem] set clients [split [string trim [r client list]] "\r\n"] # Verify only relevant clients were evicted for {set i 0} {$i < [llength $sizes]} {incr i} { @@ -495,8 +504,12 @@ start_server {} { } } } + + # Restore the peak reset time to default + r debug replybuffer-peak-reset-time reset + foreach rr $rrs {$rr close} - } + } {} {needs:debug} } } diff --git a/tests/unit/geo.tcl b/tests/unit/geo.tcl index bd93ea4ba..e6afb211b 100644 --- a/tests/unit/geo.tcl +++ b/tests/unit/geo.tcl @@ -293,6 +293,31 @@ start_server {tags {"geo"}} { test {GEORADIUSBYMEMBER simple (sorted)} { r georadiusbymember nyc "wtc one" 7 km } {{wtc one} {union square} {central park n/q/r} 4545 {lic market}} + + test {GEORADIUSBYMEMBER search areas contain satisfied points in oblique direction} { + r del k1 + + r geoadd k1 -0.15307903289794921875 85 n1 0.3515625 85.00019260486917005437 n2 + set ret1 [r GEORADIUSBYMEMBER k1 n1 4891.94 m] + assert_equal $ret1 {n1 n2} + + r zrem k1 n1 n2 + r geoadd k1 -4.95211958885192871094 85 n3 11.25 85.0511 n4 + set ret2 [r GEORADIUSBYMEMBER k1 n3 156544 m] + assert_equal $ret2 {n3 n4} + + r zrem k1 n3 n4 + r geoadd k1 -45 65.50900022111811438208 n5 90 85.0511 n6 + set ret3 [r GEORADIUSBYMEMBER k1 n5 5009431 m] + assert_equal $ret3 {n5 n6} + } + + test {GEORADIUSBYMEMBER crossing pole search} { + r del k1 + r geoadd k1 45 65 n1 -135 85.05 n2 + set ret [r GEORADIUSBYMEMBER k1 n1 5009431 m] + assert_equal $ret {n1 n2} + } test {GEOSEARCH FROMMEMBER simple (sorted)} { r geosearch nyc frommember "wtc one" bybox 14 14 km diff --git a/tests/unit/info-command.tcl b/tests/unit/info-command.tcl new file mode 100644 index 000000000..bc24ed256 --- /dev/null +++ b/tests/unit/info-command.tcl @@ -0,0 +1,62 @@ +start_server {tags {"info and its relative command"}} { + test "info command with at most one sub command" { + foreach arg {"" "all" "default" "everything"} { + if {$arg == ""} { + set info [r 0 info] + } else { + set info [r 0 info $arg] + } + + assert { [string match "*redis_version*" $info] } + assert { [string match "*used_cpu_user*" $info] } + assert { ![string match "*sentinel_tilt*" $info] } + assert { [string match "*used_memory*" $info] } + if {$arg == "" || $arg == "default"} { + assert { ![string match "*rejected_calls*" $info] } + } else { + assert { [string match "*rejected_calls*" $info] } + } + } + } + + test "info command with one sub-section" { + set info [r info cpu] + assert { [string match "*used_cpu_user*" $info] } + assert { ![string match "*sentinel_tilt*" $info] } + assert { ![string match "*used_memory*" $info] } + + set info [r info sentinel] + assert { ![string match "*sentinel_tilt*" $info] } + assert { ![string match "*used_memory*" $info] } + + set info [r info commandSTATS] ;# test case insensitive compare + assert { ![string match "*used_memory*" $info] } + assert { [string match "*rejected_calls*" $info] } + } + + test "info command with multiple sub-sections" { + set info [r info cpu sentinel] + assert { [string match "*used_cpu_user*" $info] } + assert { ![string match "*sentinel_tilt*" $info] } + assert { ![string match "*master_repl_offset*" $info] } + + set info [r info cpu all] + assert { [string match "*used_cpu_user*" $info] } + assert { ![string match "*sentinel_tilt*" $info] } + assert { [string match "*used_memory*" $info] } + assert { [string match "*master_repl_offset*" $info] } + assert { [string match "*rejected_calls*" $info] } + # check that we didn't get the same info twice + assert { ![string match "*used_cpu_user_children*used_cpu_user_children*" $info] } + + set info [r info cpu default] + assert { [string match "*used_cpu_user*" $info] } + assert { ![string match "*sentinel_tilt*" $info] } + assert { [string match "*used_memory*" $info] } + assert { [string match "*master_repl_offset*" $info] } + assert { ![string match "*rejected_calls*" $info] } + # check that we didn't get the same info twice + assert { ![string match "*used_cpu_user_children*used_cpu_user_children*" $info] } + } + +} diff --git a/tests/unit/info.tcl b/tests/unit/info.tcl index b211e6c91..759e5bc0b 100644 --- a/tests/unit/info.tcl +++ b/tests/unit/info.tcl @@ -238,6 +238,7 @@ start_server {tags {"info" "external:skip"}} { assert_equal [s total_error_replies] 1 r config resetstat assert_match {} [errorstat OOM] + r config set maxmemory 0 } test {errorstats: rejected call by authorization error} { @@ -253,6 +254,25 @@ start_server {tags {"info" "external:skip"}} { assert_equal [s total_error_replies] 1 r config resetstat assert_match {} [errorstat NOPERM] + r auth default "" } + + test {errorstats: blocking commands} { + r config resetstat + set rd [redis_deferring_client] + $rd client id + set rd_id [$rd read] + r del list1{t} + + $rd blpop list1{t} 0 + wait_for_blocked_client + r client unblock $rd_id error + assert_error {UNBLOCKED*} {$rd read} + assert_match {*count=1*} [errorstat UNBLOCKED] + assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdstat blpop] + assert_equal [s total_error_replies] 1 + $rd close + } + } } diff --git a/tests/unit/introspection-2.tcl b/tests/unit/introspection-2.tcl index 40124e035..46dac50b7 100644 --- a/tests/unit/introspection-2.tcl +++ b/tests/unit/introspection-2.tcl @@ -33,6 +33,15 @@ start_server {tags {"introspection"}} { assert_match {} [cmdstat zadd] } {} {needs:config-resetstat} + test {errors stats for GEOADD} { + r config resetstat + # make sure geo command will failed + r set foo 1 + assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} {r GEOADD foo 0 0 bar} + assert_match {*calls=1*,rejected_calls=0,failed_calls=1*} [cmdstat geoadd] + assert_match {} [cmdstat zadd] + } {} {needs:config-resetstat} + test {command stats for EXPIRE} { r config resetstat r SET foo bar @@ -81,6 +90,13 @@ start_server {tags {"introspection"}} { assert_equal {key} [r command getkeys get key] } + test {COMMAND GETKEYSANDFLAGS} { + assert_equal {{k1 {OW update}}} [r command getkeysandflags set k1 v1] + assert_equal {{k1 {OW update}} {k2 {OW update}}} [r command getkeysandflags mset k1 v1 k2 v2] + assert_equal {{k1 {RW access delete}} {k2 {RW insert}}} [r command getkeysandflags LMOVE k1 k2 left right] + assert_equal {{k1 {RO access}} {k2 {OW update}}} [r command getkeysandflags sort k1 store k2] + } + test {COMMAND GETKEYS MEMORY USAGE} { assert_equal {key} [r command getkeys memory usage key] } diff --git a/tests/unit/introspection.tcl b/tests/unit/introspection.tcl index 33a41ee50..59c96b5ad 100644 --- a/tests/unit/introspection.tcl +++ b/tests/unit/introspection.tcl @@ -7,7 +7,7 @@ start_server {tags {"introspection"}} { test {CLIENT LIST} { r client list - } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|list user=* redir=-1 resp=2*} + } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|list user=* redir=-1 resp=2*} test {CLIENT LIST with IDs} { set myid [r client id] @@ -17,7 +17,7 @@ start_server {tags {"introspection"}} { test {CLIENT INFO} { r client info - } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|info user=* redir=-1 resp=2*} + } {id=* addr=*:* laddr=*:* fd=* name=* age=* idle=* flags=N db=* sub=0 psub=0 multi=-1 qbuf=26 qbuf-free=* argv-mem=* multi-mem=0 rbs=* rbp=* obl=0 oll=0 omem=0 tot-mem=* events=r cmd=client|info user=* redir=-1 resp=2*} test {CLIENT KILL with illegal arguments} { assert_error "ERR wrong number of arguments for 'client|kill' command" {r client kill} diff --git a/tests/unit/memefficiency.tcl b/tests/unit/memefficiency.tcl index 299dd658b..e6663ce06 100644 --- a/tests/unit/memefficiency.tcl +++ b/tests/unit/memefficiency.tcl @@ -157,6 +157,88 @@ start_server {tags {"defrag external:skip"} overrides {appendonly yes auto-aof-r } r config set appendonly no r config set key-load-delay 0 + + test "Active defrag eval scripts" { + r flushdb + r script flush sync + r config resetstat + r config set hz 100 + r config set activedefrag no + r config set active-defrag-threshold-lower 5 + r config set active-defrag-cycle-min 65 + r config set active-defrag-cycle-max 75 + r config set active-defrag-ignore-bytes 1500kb + r config set maxmemory 0 + + set n 50000 + + # Populate memory with interleaving script-key pattern of same size + set dummy_script "--[string repeat x 400]\nreturn " + set rd [redis_deferring_client] + for {set j 0} {$j < $n} {incr j} { + set val "$dummy_script[format "%06d" $j]" + $rd script load $val + $rd set k$j $val + } + for {set j 0} {$j < $n} {incr j} { + $rd read ; # Discard script load replies + $rd read ; # Discard set replies + } + after 120 ;# serverCron only updates the info once in 100ms + if {$::verbose} { + puts "used [s allocator_allocated]" + puts "rss [s allocator_active]" + puts "frag [s allocator_frag_ratio]" + puts "frag_bytes [s allocator_frag_bytes]" + } + assert_lessthan [s allocator_frag_ratio] 1.05 + + # Delete all the keys to create fragmentation + for {set j 0} {$j < $n} {incr j} { $rd del k$j } + for {set j 0} {$j < $n} {incr j} { $rd read } ; # Discard del replies + $rd close + after 120 ;# serverCron only updates the info once in 100ms + if {$::verbose} { + puts "used [s allocator_allocated]" + puts "rss [s allocator_active]" + puts "frag [s allocator_frag_ratio]" + puts "frag_bytes [s allocator_frag_bytes]" + } + assert_morethan [s allocator_frag_ratio] 1.4 + + catch {r config set activedefrag yes} e + if {[r config get activedefrag] eq "activedefrag yes"} { + + # wait for the active defrag to start working (decision once a second) + wait_for_condition 50 100 { + [s active_defrag_running] ne 0 + } else { + fail "defrag not started." + } + + # wait for the active defrag to stop working + wait_for_condition 500 100 { + [s active_defrag_running] eq 0 + } else { + after 120 ;# serverCron only updates the info once in 100ms + puts [r info memory] + puts [r memory malloc-stats] + fail "defrag didn't stop." + } + + # test the the fragmentation is lower + after 120 ;# serverCron only updates the info once in 100ms + if {$::verbose} { + puts "used [s allocator_allocated]" + puts "rss [s allocator_active]" + puts "frag [s allocator_frag_ratio]" + puts "frag_bytes [s allocator_frag_bytes]" + } + assert_lessthan_equal [s allocator_frag_ratio] 1.05 + } + # Flush all script to make sure we don't crash after defragging them + r script flush sync + } {OK} test "Active defrag big keys" { r flushdb diff --git a/tests/unit/moduleapi/aclcheck.tcl b/tests/unit/moduleapi/aclcheck.tcl index a6df4f7c9..953f4bf05 100644 --- a/tests/unit/moduleapi/aclcheck.tcl +++ b/tests/unit/moduleapi/aclcheck.tcl @@ -26,6 +26,12 @@ start_server {tags {"modules acl"}} { catch {r aclcheck.set.check.key "*" v 5} e assert_match "*DENIED KEY*" $e + assert_equal [r aclcheck.set.check.key "~" x 5] OK + assert_equal [r aclcheck.set.check.key "~" y 5] OK + assert_equal [r aclcheck.set.check.key "~" z 5] OK + catch {r aclcheck.set.check.key "~" v 5} e + assert_match "*DENIED KEY*" $e + assert_equal [r aclcheck.set.check.key "W" y 5] OK catch {r aclcheck.set.check.key "W" v 5} e assert_match "*DENIED KEY*" $e diff --git a/tests/unit/moduleapi/blockedclient.tcl b/tests/unit/moduleapi/blockedclient.tcl index 523d7ba69..ea2d6f5a4 100644 --- a/tests/unit/moduleapi/blockedclient.tcl +++ b/tests/unit/moduleapi/blockedclient.tcl @@ -180,6 +180,37 @@ start_server {tags {"modules"}} { assert_no_match "*name=myclient*" $clients } + test {module client error stats} { + r config resetstat + + # simple module command that replies with string error + assert_error "NULL reply returned" {r do_rm_call hgetalllll} + assert_equal [errorrstat NULL r] {count=1} + + # module command that replies with string error from bg thread + assert_error "NULL reply returned" {r do_bg_rm_call hgetalllll} + assert_equal [errorrstat NULL r] {count=2} + + # module command that returns an arity error + r do_rm_call set x x + assert_error "ERR wrong number of arguments for 'do_rm_call' command" {r do_rm_call} + assert_equal [errorrstat ERR r] {count=1} + + # RM_Call that propagates an error + assert_error "WRONGTYPE*" {r do_rm_call hgetall x} + assert_equal [errorrstat WRONGTYPE r] {count=1} + assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdrstat hgetall r] + + # RM_Call from bg thread that propagates an error + assert_error "WRONGTYPE*" {r do_bg_rm_call hgetall x} + assert_equal [errorrstat WRONGTYPE r] {count=2} + assert_match {*calls=2,*,rejected_calls=0,failed_calls=2} [cmdrstat hgetall r] + + assert_equal [s total_error_replies] 5 + assert_match {*calls=4,*,rejected_calls=0,failed_calls=3} [cmdrstat do_rm_call r] + assert_match {*calls=2,*,rejected_calls=0,failed_calls=2} [cmdrstat do_bg_rm_call r] + } + test "Unload the module - blockedclient" { assert_equal {OK} [r module unload blockedclient] } diff --git a/tests/unit/moduleapi/cmdintrospection.tcl b/tests/unit/moduleapi/cmdintrospection.tcl new file mode 100644 index 000000000..375b3e406 --- /dev/null +++ b/tests/unit/moduleapi/cmdintrospection.tcl @@ -0,0 +1,42 @@ +set testmodule [file normalize tests/modules/cmdintrospection.so] + +start_server {tags {"modules"}} { + r module load $testmodule + + # cmdintrospection.xadd mimics XADD with regards to how + # what COMMAND exposes. There are two differences: + # + # 1. cmdintrospection.xadd (and all module commands) do not have ACL categories + # 2. cmdintrospection.xadd's `group` is "module" + # + # This tests verify that, apart from the above differences, the output of + # COMMAND INFO and COMMAND DOCS are identical for the two commands. + test "Module command introspection via COMMAND INFO" { + set redis_reply [lindex [r command info xadd] 0] + set module_reply [lindex [r command info cmdintrospection.xadd] 0] + for {set i 1} {$i < [llength $redis_reply]} {incr i} { + if {$i == 2} { + # Remove the "module" flag + set mylist [lindex $module_reply $i] + set idx [lsearch $mylist "module"] + set mylist [lreplace $mylist $idx $idx] + lset module_reply $i $mylist + } + if {$i == 6} { + # Skip ACL categories + continue + } + assert_equal [lindex $redis_reply $i] [lindex $module_reply $i] + } + } + + test "Module command introspection via COMMAND DOCS" { + set redis_reply [dict create {*}[lindex [r command docs xadd] 1]] + set module_reply [dict create {*}[lindex [r command docs cmdintrospection.xadd] 1]] + # Compare the maps. We need to pop "group" first. + dict unset redis_reply group + dict unset module_reply group + + assert_equal $redis_reply $module_reply + } +} diff --git a/tests/unit/moduleapi/getchannels.tcl b/tests/unit/moduleapi/getchannels.tcl new file mode 100644 index 000000000..e8f557dcc --- /dev/null +++ b/tests/unit/moduleapi/getchannels.tcl @@ -0,0 +1,40 @@ +set testmodule [file normalize tests/modules/getchannels.so] + +start_server {tags {"modules"}} { + r module load $testmodule + + # Channels are currently used to just validate ACLs, so test them here + r ACL setuser testuser +@all resetchannels &channel &pattern* + + test "module getchannels-api with literals - ACL" { + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command subscribe literal channel subscribe literal pattern1] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command publish literal channel publish literal pattern1] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe literal channel unsubscribe literal pattern1] + + assert_equal "This user has no permissions to access the 'nopattern1' channel" [r ACL DRYRUN testuser getchannels.command subscribe literal channel subscribe literal nopattern1] + assert_equal "This user has no permissions to access the 'nopattern1' channel" [r ACL DRYRUN testuser getchannels.command publish literal channel subscribe literal nopattern1] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe literal channel unsubscribe literal nopattern1] + + assert_equal "This user has no permissions to access the 'otherchannel' channel" [r ACL DRYRUN testuser getchannels.command subscribe literal otherchannel subscribe literal pattern1] + assert_equal "This user has no permissions to access the 'otherchannel' channel" [r ACL DRYRUN testuser getchannels.command publish literal otherchannel subscribe literal pattern1] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe literal otherchannel unsubscribe literal pattern1] + } + + test "module getchannels-api with patterns - ACL" { + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command subscribe pattern pattern*] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command publish pattern pattern*] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe pattern pattern*] + + assert_equal "This user has no permissions to access the 'pattern1' channel" [r ACL DRYRUN testuser getchannels.command subscribe pattern pattern1 subscribe pattern pattern*] + assert_equal "This user has no permissions to access the 'pattern1' channel" [r ACL DRYRUN testuser getchannels.command publish pattern pattern1 subscribe pattern pattern*] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe pattern pattern1 unsubscribe pattern pattern*] + + assert_equal "This user has no permissions to access the 'otherpattern*' channel" [r ACL DRYRUN testuser getchannels.command subscribe pattern otherpattern* subscribe pattern pattern*] + assert_equal "This user has no permissions to access the 'otherpattern*' channel" [r ACL DRYRUN testuser getchannels.command publish pattern otherpattern* subscribe pattern pattern*] + assert_equal "OK" [r ACL DRYRUN testuser getchannels.command unsubscribe pattern otherpattern* unsubscribe pattern pattern*] + } + + test "Unload the module - getchannels" { + assert_equal {OK} [r module unload getchannels] + } +} diff --git a/tests/unit/moduleapi/getkeys.tcl b/tests/unit/moduleapi/getkeys.tcl index 6061fe8cf..734c55fa2 100644 --- a/tests/unit/moduleapi/getkeys.tcl +++ b/tests/unit/moduleapi/getkeys.tcl @@ -16,32 +16,64 @@ start_server {tags {"modules"}} { r command getkeys getkeys.command arg1 arg2 key key1 arg3 key key2 key key3 } {key1 key2 key3} + test {COMMAND GETKEYS correctly reports a movable keys module command using flags} { + r command getkeys getkeys.command_with_flags arg1 arg2 key key1 arg3 key key2 key key3 + } {key1 key2 key3} + + test {COMMAND GETKEYSANDFLAGS correctly reports a movable keys module command not using flags} { + r command getkeysandflags getkeys.command arg1 arg2 key key1 arg3 key key2 + } {{key1 {RW access update}} {key2 {RW access update}}} + + test {COMMAND GETKEYSANDFLAGS correctly reports a movable keys module command using flags} { + r command getkeysandflags getkeys.command_with_flags arg1 arg2 key key1 arg3 key key2 key key3 + } {{key1 {RO access}} {key2 {RO access}} {key3 {RO access}}} + test {RM_GetCommandKeys on non-existing command} { - catch {r getkeys.introspect non-command key1 key2} e + catch {r getkeys.introspect 0 non-command key1 key2} e set _ $e } {*ENOENT*} test {RM_GetCommandKeys on built-in fixed keys command} { - r getkeys.introspect set key1 value1 + r getkeys.introspect 0 set key1 value1 } {key1} + test {RM_GetCommandKeys on built-in fixed keys command with flags} { + r getkeys.introspect 1 set key1 value1 + } {{key1 OW}} + test {RM_GetCommandKeys on EVAL} { - r getkeys.introspect eval "" 4 key1 key2 key3 key4 arg1 arg2 + r getkeys.introspect 0 eval "" 4 key1 key2 key3 key4 arg1 arg2 } {key1 key2 key3 key4} test {RM_GetCommandKeys on a movable keys module command} { - r getkeys.introspect getkeys.command arg1 arg2 key key1 arg3 key key2 key key3 + r getkeys.introspect 0 getkeys.command arg1 arg2 key key1 arg3 key key2 key key3 } {key1 key2 key3} test {RM_GetCommandKeys on a non-movable module command} { - r getkeys.introspect getkeys.fixed arg1 key1 key2 key3 arg2 + r getkeys.introspect 0 getkeys.fixed arg1 key1 key2 key3 arg2 } {key1 key2 key3} test {RM_GetCommandKeys with bad arity} { - catch {r getkeys.introspect set key} e + catch {r getkeys.introspect 0 set key} e set _ $e } {*EINVAL*} + # user that can only read from "read" keys, write to "write" keys, and read+write to "RW" keys + r ACL setuser testuser +@all %R~read* %W~write* %RW~rw* + + test "module getkeys-api - ACL" { + # legacy triple didn't provide flags, so they require both read and write + assert_equal "OK" [r ACL DRYRUN testuser getkeys.command key rw] + assert_equal "This user has no permissions to access the 'read' key" [r ACL DRYRUN testuser getkeys.command key read] + assert_equal "This user has no permissions to access the 'write' key" [r ACL DRYRUN testuser getkeys.command key write] + } + + test "module getkeys-api with flags - ACL" { + assert_equal "OK" [r ACL DRYRUN testuser getkeys.command_with_flags key rw] + assert_equal "OK" [r ACL DRYRUN testuser getkeys.command_with_flags key read] + assert_equal "This user has no permissions to access the 'write' key" [r ACL DRYRUN testuser getkeys.command_with_flags key write] + } + test "Unload the module - getkeys" { assert_equal {OK} [r module unload getkeys] } diff --git a/tests/unit/moduleapi/infotest.tcl b/tests/unit/moduleapi/infotest.tcl index 0d07aaa7e..354487a19 100644 --- a/tests/unit/moduleapi/infotest.tcl +++ b/tests/unit/moduleapi/infotest.tcl @@ -64,7 +64,7 @@ start_server {tags {"modules"}} { } test {module info one module} { - set info [r info INFOTEST] + set info [r info INFOtest] ;# test case insensitive compare # info all does not contain modules assert { [string match "*Spanish*" $info] } assert { ![string match "*used_memory*" $info] } @@ -72,7 +72,7 @@ start_server {tags {"modules"}} { } {-2} test {module info one section} { - set info [r info INFOTEST_SPANISH] + set info [r info INFOtest_SpanisH] ;# test case insensitive compare assert { ![string match "*used_memory*" $info] } assert { ![string match "*Italian*" $info] } assert { ![string match "*infotest_global*" $info] } @@ -90,6 +90,31 @@ start_server {tags {"modules"}} { assert_match {*infotest_unsafe_field:value=1*} $info } + test {module info multiply sections without all, everything, default keywords} { + set info [r info replication INFOTEST] + assert { [string match "*Spanish*" $info] } + assert { ![string match "*used_memory*" $info] } + assert { [string match "*repl_offset*" $info] } + } + + test {module info multiply sections with all keyword and modules} { + set info [r info all modules] + assert { [string match "*cluster*" $info] } + assert { [string match "*cmdstat_info*" $info] } + assert { [string match "*infotest_global*" $info] } + } + + test {module info multiply sections with everything keyword} { + set info [r info replication everything cpu] + assert { [string match "*client_recent*" $info] } + assert { [string match "*cmdstat_info*" $info] } + assert { [string match "*Italian*" $info] } + # check that we didn't get the same info twice + assert { ![string match "*used_cpu_user_children*used_cpu_user_children*" $info] } + assert { ![string match "*Italian*Italian*" $info] } + field $info infotest_dos + } {2} + test "Unload the module - infotest" { assert_equal {OK} [r module unload infotest] } diff --git a/tests/unit/moduleapi/keyspecs.tcl b/tests/unit/moduleapi/keyspecs.tcl index cb9f1851a..60d3fe5d3 100644 --- a/tests/unit/moduleapi/keyspecs.tcl +++ b/tests/unit/moduleapi/keyspecs.tcl @@ -1,12 +1,26 @@ set testmodule [file normalize tests/modules/keyspecs.so] -if 0 { ; # Test suite disabled due to planned API changes start_server {tags {"modules"}} { r module load $testmodule - test "Module key specs: Legacy" { - set reply [lindex [r command info kspec.legacy] 0] - # Verify (first, last, step) + test "Module key specs: No spec, only legacy triple" { + set reply [lindex [r command info kspec.none] 0] + # Verify (first, last, step) and not movablekeys + assert_equal [lindex $reply 2] {module} + assert_equal [lindex $reply 3] 1 + assert_equal [lindex $reply 4] -1 + assert_equal [lindex $reply 5] 2 + # Verify key-spec auto-generated from the legacy triple + set keyspecs [lindex $reply 8] + assert_equal [llength $keyspecs] 1 + assert_equal [lindex $keyspecs 0] {flags {RW access update variable_flags} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey -1 keystep 2 limit 0}}} + assert_equal [r command getkeys kspec.none key1 val1 key2 val2] {key1 key2} + } + + test "Module key specs: Two ranges" { + set reply [lindex [r command info kspec.tworanges] 0] + # Verify (first, last, step) and not movablekeys + assert_equal [lindex $reply 2] {module} assert_equal [lindex $reply 3] 1 assert_equal [lindex $reply 4] 2 assert_equal [lindex $reply 5] 1 @@ -14,24 +28,41 @@ start_server {tags {"modules"}} { set keyspecs [lindex $reply 8] assert_equal [lindex $keyspecs 0] {flags {RO access} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}} assert_equal [lindex $keyspecs 1] {flags {RW update} begin_search {type index spec {index 2}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}} + assert_equal [r command getkeys kspec.tworanges foo bar baz quux] {foo bar} + } + + test "Module key specs: Keyword-only spec clears the legacy triple" { + set reply [lindex [r command info kspec.keyword] 0] + # Verify (first, last, step) and movablekeys + assert_equal [lindex $reply 2] {module movablekeys} + assert_equal [lindex $reply 3] 0 + assert_equal [lindex $reply 4] 0 + assert_equal [lindex $reply 5] 0 + # Verify key-specs + set keyspecs [lindex $reply 8] + assert_equal [lindex $keyspecs 0] {flags {RO access} begin_search {type keyword spec {keyword KEYS startfrom 1}} find_keys {type range spec {lastkey -1 keystep 1 limit 0}}} + assert_equal [r command getkeys kspec.keyword foo KEYS bar baz] {bar baz} } test "Module key specs: Complex specs, case 1" { set reply [lindex [r command info kspec.complex1] 0] - # Verify (first, last, step) + # Verify (first, last, step) and movablekeys + assert_equal [lindex $reply 2] {module movablekeys} assert_equal [lindex $reply 3] 1 assert_equal [lindex $reply 4] 1 assert_equal [lindex $reply 5] 1 # Verify key-specs set keyspecs [lindex $reply 8] - assert_equal [lindex $keyspecs 0] {flags {} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}} + assert_equal [lindex $keyspecs 0] {flags RO begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}} assert_equal [lindex $keyspecs 1] {flags {RW update} begin_search {type keyword spec {keyword STORE startfrom 2}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}} assert_equal [lindex $keyspecs 2] {flags {RO access} begin_search {type keyword spec {keyword KEYS startfrom 2}} find_keys {type keynum spec {keynumidx 0 firstkey 1 keystep 1}}} + assert_equal [r command getkeys kspec.complex1 foo dummy KEYS 1 bar baz STORE quux] {foo quux bar} } test "Module key specs: Complex specs, case 2" { set reply [lindex [r command info kspec.complex2] 0] - # Verify (first, last, step) + # Verify (first, last, step) and movablekeys + assert_equal [lindex $reply 2] {module movablekeys} assert_equal [lindex $reply 3] 1 assert_equal [lindex $reply 4] 2 assert_equal [lindex $reply 5] 1 @@ -42,17 +73,42 @@ start_server {tags {"modules"}} { assert_equal [lindex $keyspecs 2] {flags {RO access} begin_search {type index spec {index 2}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}} assert_equal [lindex $keyspecs 3] {flags {RW update} begin_search {type index spec {index 3}} find_keys {type keynum spec {keynumidx 0 firstkey 1 keystep 1}}} assert_equal [lindex $keyspecs 4] {flags {RW update} begin_search {type keyword spec {keyword MOREKEYS startfrom 5}} find_keys {type range spec {lastkey -1 keystep 1 limit 0}}} + assert_equal [r command getkeys kspec.complex2 foo bar 2 baz quux banana STORE dst dummy MOREKEYS hey ho] {dst foo bar baz quux hey ho} } test "Module command list filtering" { ;# Note: we piggyback this tcl file to test the general functionality of command list filtering set reply [r command list filterby module keyspecs] - assert_equal [lsort $reply] {kspec.complex1 kspec.complex2 kspec.legacy} + assert_equal [lsort $reply] {kspec.complex1 kspec.complex2 kspec.keyword kspec.none kspec.tworanges} + assert_equal [r command getkeys kspec.complex2 foo bar 2 baz quux banana STORE dst dummy MOREKEYS hey ho] {dst foo bar baz quux hey ho} + } + + test {COMMAND GETKEYSANDFLAGS correctly reports module key-spec without flags} { + r command getkeysandflags kspec.none key1 val1 key2 val2 + } {{key1 {RW access update variable_flags}} {key2 {RW access update variable_flags}}} + + test {COMMAND GETKEYSANDFLAGS correctly reports module key-spec flags} { + r command getkeysandflags kspec.keyword keys key1 key2 key3 + } {{key1 {RO access}} {key2 {RO access}} {key3 {RO access}}} + + # user that can only read from "read" keys, write to "write" keys, and read+write to "RW" keys + r ACL setuser testuser +@all %R~read* %W~write* %RW~rw* + + test "Module key specs: No spec, only legacy triple - ACL" { + # legacy triple didn't provide flags, so they require both read and write + assert_equal "OK" [r ACL DRYRUN testuser kspec.none rw val1] + assert_equal "This user has no permissions to access the 'read' key" [r ACL DRYRUN testuser kspec.none read val1] + assert_equal "This user has no permissions to access the 'write' key" [r ACL DRYRUN testuser kspec.none write val1] + } + + test "Module key specs: tworanges - ACL" { + assert_equal "OK" [r ACL DRYRUN testuser kspec.tworanges read write] + assert_equal "OK" [r ACL DRYRUN testuser kspec.tworanges rw rw] + assert_equal "This user has no permissions to access the 'read' key" [r ACL DRYRUN testuser kspec.tworanges rw read] + assert_equal "This user has no permissions to access the 'write' key" [r ACL DRYRUN testuser kspec.tworanges write rw] } test "Unload the module - keyspecs" { assert_equal {OK} [r module unload keyspecs] } } - -} ; # Test suite disabled diff --git a/tests/unit/moduleapi/subcommands.tcl b/tests/unit/moduleapi/subcommands.tcl index d696f9ad2..11d243243 100644 --- a/tests/unit/moduleapi/subcommands.tcl +++ b/tests/unit/moduleapi/subcommands.tcl @@ -8,20 +8,15 @@ start_server {tags {"modules"}} { set command_reply [r command info subcommands.bitarray] set first_cmd [lindex $command_reply 0] set subcmds_in_command [lsort [lindex $first_cmd 9]] - if 0 { ; # Keyspecs disabled due to planned changes in keyspec API - assert_equal [lindex $subcmds_in_command 0] {subcommands.bitarray|get -2 module 1 1 1 {} {} {{flags {RO access} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}} {}} - assert_equal [lindex $subcmds_in_command 1] {subcommands.bitarray|set -2 module 1 1 1 {} {} {{flags {RW update} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}} {}} - } else { ; # The same asserts without the key specs - assert_equal [lindex $subcmds_in_command 0] {subcommands.bitarray|get -2 module 0 0 0 {} {} {} {}} - assert_equal [lindex $subcmds_in_command 1] {subcommands.bitarray|set -2 module 0 0 0 {} {} {} {}} - } + assert_equal [lindex $subcmds_in_command 0] {subcommands.bitarray|get -2 module 1 1 1 {} {} {{flags {RO access} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}} {}} + assert_equal [lindex $subcmds_in_command 1] {subcommands.bitarray|set -2 module 1 1 1 {} {} {{flags {RW update} begin_search {type index spec {index 1}} find_keys {type range spec {lastkey 0 keystep 1 limit 0}}}} {}} # Verify that module subcommands are displayed correctly in COMMAND DOCS set docs_reply [r command docs subcommands.bitarray] set docs [dict create {*}[lindex $docs_reply 1]] set subcmds_in_cmd_docs [dict create {*}[dict get $docs subcommands]] - assert_equal [dict get $subcmds_in_cmd_docs "subcommands.bitarray|get"] {summary {} since {} group module} - assert_equal [dict get $subcmds_in_cmd_docs "subcommands.bitarray|set"] {summary {} since {} group module} + assert_equal [dict get $subcmds_in_cmd_docs "subcommands.bitarray|get"] {group module} + assert_equal [dict get $subcmds_in_cmd_docs "subcommands.bitarray|set"] {group module} } test "Module pure-container command fails on arity error" { diff --git a/tests/unit/moduleapi/testrdb.tcl b/tests/unit/moduleapi/testrdb.tcl index 8d76a11bc..a01bcb30b 100644 --- a/tests/unit/moduleapi/testrdb.tcl +++ b/tests/unit/moduleapi/testrdb.tcl @@ -47,6 +47,12 @@ tags "modules" { } } + test {Verify module options info} { + start_server [list overrides [list loadmodule "$testmodule"]] { + assert_match "*\[handle-io-errors|handle-repl-async-load\]*" [r info modules] + } + } + tags {repl} { test {diskless loading short read with module} { start_server [list overrides [list loadmodule "$testmodule"]] { diff --git a/tests/unit/moduleapi/timer.tcl b/tests/unit/moduleapi/timer.tcl index c04f80b23..4e9dd0f09 100644 --- a/tests/unit/moduleapi/timer.tcl +++ b/tests/unit/moduleapi/timer.tcl @@ -26,6 +26,8 @@ start_server {tags {"modules"}} { assert_equal "timer-incr-key" [lindex $info 0] set remaining [lindex $info 1] assert {$remaining < 10000 && $remaining > 1} + # Stop the timer after get timer test + assert_equal 1 [r test.stoptimer $id] } test {RM_StopTimer: basic sanity} { @@ -54,7 +56,43 @@ start_server {tags {"modules"}} { assert_equal {} [r test.gettimer $id] } - test "Unload the module - timer" { + test "Module can be unloaded when timer was finished" { + r set "timer-incr-key" 0 + r test.createtimer 500 timer-incr-key + + # Make sure the Timer has not been fired + assert_equal 0 [r get timer-incr-key] + # Module can not be unloaded since the timer was ongoing + catch {r module unload timer} err + assert_match {*the module holds timer that is not fired*} $err + + # Wait to be sure timer has been finished + wait_for_condition 10 500 { + [r get timer-incr-key] == 1 + } else { + fail "Timer not fired" + } + + # Timer fired, can be unloaded now. + assert_equal {OK} [r module unload timer] + } + + test "Module can be unloaded when timer was stopped" { + r module load $testmodule + r set "timer-incr-key" 0 + set id [r test.createtimer 5000 timer-incr-key] + + # Module can not be unloaded since the timer was ongoing + catch {r module unload timer} err + assert_match {*the module holds timer that is not fired*} $err + + # Stop the timer + assert_equal 1 [r test.stoptimer $id] + + # Make sure the Timer has not been fired + assert_equal 0 [r get timer-incr-key] + + # Timer has stopped, can be unloaded now. assert_equal {OK} [r module unload timer] } } diff --git a/tests/unit/multi.tcl b/tests/unit/multi.tcl index 8a0b731d5..63d85d26b 100644 --- a/tests/unit/multi.tcl +++ b/tests/unit/multi.tcl @@ -132,18 +132,61 @@ start_server {tags {"multi"}} { } {} {cluster:skip} test {EXEC fail on lazy expired WATCHed key} { - r flushall + r del key r debug set-active-expire 0 - r del key - r set key 1 px 2 - r watch key + for {set j 0} {$j < 10} {incr j} { + r set key 1 px 100 + r watch key + after 101 + r multi + r incr key + + set res [r exec] + if {$res eq {}} break + } + if {$::verbose} { puts "EXEC fail on lazy expired WATCHed key attempts: $j" } + + r debug set-active-expire 1 + set _ $res + } {} {needs:debug} + + test {WATCH stale keys should not fail EXEC} { + r del x + r debug set-active-expire 0 + r set x foo px 1 + after 2 + r watch x + r multi + r ping + assert_equal {PONG} [r exec] + r debug set-active-expire 1 + } {OK} {needs:debug} - after 100 + test {Delete WATCHed stale keys should not fail EXEC} { + r del x + r debug set-active-expire 0 + r set x foo px 1 + after 2 + r watch x + # EXISTS triggers lazy expiry/deletion + assert_equal 0 [r exists x] + r multi + r ping + assert_equal {PONG} [r exec] + r debug set-active-expire 1 + } {OK} {needs:debug} + test {FLUSHDB while watching stale keys should not fail EXEC} { + r del x + r debug set-active-expire 0 + r set x foo px 1 + after 2 + r watch x + r flushdb r multi - r incr key - assert_equal [r exec] {} + r ping + assert_equal {PONG} [r exec] r debug set-active-expire 1 } {OK} {needs:debug} @@ -245,6 +288,52 @@ start_server {tags {"multi"}} { r exec } {} {singledb:skip} + test {SWAPDB does not touch watched stale keys} { + r flushall + r select 1 + r debug set-active-expire 0 + r set x foo px 1 + after 2 + r watch x + r swapdb 0 1 ; # expired key replaced with no key => no change + r multi + r ping + assert_equal {PONG} [r exec] + r debug set-active-expire 1 + } {OK} {singledb:skip needs:debug} + + test {SWAPDB does not touch non-existing key replaced with stale key} { + r flushall + r select 0 + r debug set-active-expire 0 + r set x foo px 1 + after 2 + r select 1 + r watch x + r swapdb 0 1 ; # no key replaced with expired key => no change + r multi + r ping + assert_equal {PONG} [r exec] + r debug set-active-expire 1 + } {OK} {singledb:skip needs:debug} + + test {SWAPDB does not touch stale key replaced with another stale key} { + r flushall + r debug set-active-expire 0 + r select 1 + r set x foo px 1 + r select 0 + r set x bar px 1 + after 2 + r select 1 + r watch x + r swapdb 0 1 ; # no key replaced with expired key => no change + r multi + r ping + assert_equal {PONG} [r exec] + r debug set-active-expire 1 + } {OK} {singledb:skip needs:debug} + test {WATCH is able to remember the DB a key belongs to} { r select 5 r set x 30 diff --git a/tests/unit/protocol.tcl b/tests/unit/protocol.tcl index ec4a1a4aa..50305bd27 100644 --- a/tests/unit/protocol.tcl +++ b/tests/unit/protocol.tcl @@ -139,13 +139,17 @@ start_server {tags {"protocol network"}} { test {RESP3 attributes} { r hello 3 - set res [r debug protocol attrib] - # currently the parser in redis.tcl ignores the attributes + assert_equal {Some real reply following the attribute} [r debug protocol attrib] + assert_equal {key-popularity {key:123 90}} [r attributes] + + # make sure attributes are not kept from previous command + r ping + assert_error {*attributes* no such element in array} {r attributes} # restore state r hello 2 - set _ $res - } {Some real reply following the attribute} {needs:debug resp3} + set _ "" + } {} {needs:debug resp3} test {RESP3 attributes readraw} { r hello 3 diff --git a/tests/unit/replybufsize.tcl b/tests/unit/replybufsize.tcl new file mode 100644 index 000000000..9377a8fd3 --- /dev/null +++ b/tests/unit/replybufsize.tcl @@ -0,0 +1,47 @@ +proc get_reply_buffer_size {cname} { + + set clients [split [string trim [r client list]] "\r\n"] + set c [lsearch -inline $clients *name=$cname*] + if {![regexp rbs=(\[a-zA-Z0-9-\]+) $c - rbufsize]} { + error "field rbus not found in $c" + } + return $rbufsize +} + +start_server {tags {"replybufsize"}} { + + test {verify reply buffer limits} { + # In order to reduce test time we can set the peak reset time very low + r debug replybuffer-peak-reset-time 100 + + # Create a simple idle test client + variable tc [redis_client] + $tc client setname test_client + + # make sure the client is idle for 1 seconds to make it shrink the reply buffer + wait_for_condition 10 100 { + [get_reply_buffer_size test_client] >= 1024 && [get_reply_buffer_size test_client] < 2046 + } else { + set rbs [get_reply_buffer_size test_client] + fail "reply buffer of idle client is $rbs after 1 seconds" + } + + r set bigval [string repeat x 32768] + + # In order to reduce test time we can set the peak reset time very low + r debug replybuffer-peak-reset-time never + + wait_for_condition 10 100 { + [$tc get bigval ; get_reply_buffer_size test_client] >= 16384 && [get_reply_buffer_size test_client] < 32768 + } else { + set rbs [get_reply_buffer_size test_client] + fail "reply buffer of busy client is $rbs after 1 seconds" + } + + # Restore the peak reset time to default + r debug replybuffer-peak-reset-time reset + + $tc close + } {0} {needs:debug} +} +
\ No newline at end of file diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index f16555350..6c40844c3 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -73,10 +73,10 @@ start_server {tags {"scripting"}} { test {EVAL - Lua error reply -> Redis protocol type conversion} { catch { - run_script {return {err='this is an error'}} 0 + run_script {return {err='ERR this is an error'}} 0 } e set _ $e - } {this is an error} + } {ERR this is an error} test {EVAL - Lua table -> Redis protocol type conversion} { run_script {return {1,2,3,'ciao',{1,2}}} 0 @@ -378,7 +378,7 @@ start_server {tags {"scripting"}} { r set foo bar catch {run_script_ro {redis.call('del', KEYS[1]);} 1 foo} e set e - } {*Write commands are not allowed from read-only scripts*} + } {ERR Write commands are not allowed from read-only scripts*} if {$is_eval eq 1} { # script command is only relevant for is_eval Lua @@ -439,12 +439,12 @@ start_server {tags {"scripting"}} { test {Globals protection reading an undeclared global variable} { catch {run_script {return a} 0} e set e - } {*ERR*attempted to access * global*} + } {ERR*attempted to access * global*} test {Globals protection setting an undeclared global*} { catch {run_script {a=10} 0} e set e - } {*ERR*attempted to create global*} + } {ERR*attempted to create global*} test {Test an example script DECR_IF_GT} { set decr_if_gt { @@ -599,8 +599,8 @@ start_server {tags {"scripting"}} { } {ERR Number of keys can't be negative} test {Scripts can handle commands with incorrect arity} { - assert_error "*Wrong number of args calling Redis command from script" {run_script "redis.call('set','invalid')" 0} - assert_error "*Wrong number of args calling Redis command from script" {run_script "redis.call('incr')" 0} + assert_error "ERR Wrong number of args calling Redis command from script*" {run_script "redis.call('set','invalid')" 0} + assert_error "ERR Wrong number of args calling Redis command from script*" {run_script "redis.call('incr')" 0} } test {Correct handling of reused argv (issue #1939)} { @@ -701,6 +701,32 @@ start_server {tags {"scripting"}} { return redis.call("EXISTS", "key") } 0] 0 } + + test "Script ACL check" { + r acl setuser bob on {>123} {+@scripting} {+set} {~x*} + assert_equal [r auth bob 123] {OK} + + # Check permission granted + assert_equal [run_script { + return redis.acl_check_cmd('set','xx',1) + } 1 xx] 1 + + # Check permission denied unauthorised command + assert_equal [run_script { + return redis.acl_check_cmd('hset','xx','f',1) + } 1 xx] {} + + # Check permission denied unauthorised key + # Note: we don't pass the "yy" key as an argument to the script so key acl checks won't block the script + assert_equal [run_script { + return redis.acl_check_cmd('set','yy',1) + } 0] {} + + # Check error due to invalid command + assert_error {ERR *Invalid command passed to redis.acl_check_cmd()*} {run_script { + return redis.acl_check_cmd('invalid-cmd','arg') + } 0} + } } # Start a new server since the last test in this stanza will kill the @@ -1262,7 +1288,7 @@ start_server {tags {"scripting"}} { r config set maxmemory 1 # Fail to execute deny-oom command in OOM condition (backwards compatibility mode without flags) - assert_error {ERR Error running script *OOM command not allowed when used memory > 'maxmemory'.} { + assert_error {OOM command not allowed when used memory > 'maxmemory'*} { r eval { redis.call('set','x',1) return 1 @@ -1293,7 +1319,7 @@ start_server {tags {"scripting"}} { } test "no-writes shebang flag" { - assert_error {ERR Error running script *Write commands are not allowed from read-only scripts.} { + assert_error {ERR Write commands are not allowed from read-only scripts*} { r eval {#!lua flags=no-writes redis.call('set','x',1) return 1 @@ -1374,3 +1400,154 @@ start_server {tags {"scripting"}} { set _ {} } {} {external:skip} } + +# Additional eval only tests +start_server {tags {"scripting"}} { + test "Consistent eval error reporting" { + r config resetstat + r config set maxmemory 1 + # Script aborted due to Redis state (OOM) should report script execution error with detailed internal error + assert_error {OOM command not allowed when used memory > 'maxmemory'*} { + r eval {return redis.call('set','x','y')} 1 x + } + assert_equal [errorrstat OOM r] {count=1} + assert_equal [s total_error_replies] {1} + assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r] + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r] + + # redis.pcall() failure due to Redis state (OOM) returns lua error table with Redis error message without '-' prefix + r config resetstat + assert_equal [ + r eval { + local t = redis.pcall('set','x','y') + if t['err'] == "OOM command not allowed when used memory > 'maxmemory'." then + return 1 + else + return 0 + end + } 1 x + ] 1 + # error stats were not incremented + assert_equal [errorrstat ERR r] {} + assert_equal [errorrstat OOM r] {count=1} + assert_equal [s total_error_replies] {1} + assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r] + assert_match {calls=1*rejected_calls=0,failed_calls=0*} [cmdrstat eval r] + + # Returning an error object from lua is handled as a valid RESP error result. + r config resetstat + assert_error {OOM command not allowed when used memory > 'maxmemory'.} { + r eval { return redis.pcall('set','x','y') } 1 x + } + assert_equal [errorrstat ERR r] {} + assert_equal [errorrstat OOM r] {count=1} + assert_equal [s total_error_replies] {1} + assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r] + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r] + + r config set maxmemory 0 + r config resetstat + # Script aborted due to error result of Redis command + assert_error {ERR DB index is out of range*} { + r eval {return redis.call('select',99)} 0 + } + assert_equal [errorrstat ERR r] {count=1} + assert_equal [s total_error_replies] {1} + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat select r] + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r] + + # redis.pcall() failure due to error in Redis command returns lua error table with redis error message without '-' prefix + r config resetstat + assert_equal [ + r eval { + local t = redis.pcall('select',99) + if t['err'] == "ERR DB index is out of range" then + return 1 + else + return 0 + end + } 0 + ] 1 + assert_equal [errorrstat ERR r] {count=1} ; + assert_equal [s total_error_replies] {1} + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat select r] + assert_match {calls=1*rejected_calls=0,failed_calls=0*} [cmdrstat eval r] + + # Script aborted due to scripting specific error state (write cmd with eval_ro) should report script execution error with detailed internal error + r config resetstat + assert_error {ERR Write commands are not allowed from read-only scripts*} { + r eval_ro {return redis.call('set','x','y')} 1 x + } + assert_equal [errorrstat ERR r] {count=1} + assert_equal [s total_error_replies] {1} + assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r] + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval_ro r] + + # redis.pcall() failure due to scripting specific error state (write cmd with eval_ro) returns lua error table with Redis error message without '-' prefix + r config resetstat + assert_equal [ + r eval_ro { + local t = redis.pcall('set','x','y') + if t['err'] == "ERR Write commands are not allowed from read-only scripts." then + return 1 + else + return 0 + end + } 1 x + ] 1 + assert_equal [errorrstat ERR r] {count=1} + assert_equal [s total_error_replies] {1} + assert_match {calls=0*rejected_calls=1,failed_calls=0*} [cmdrstat set r] + assert_match {calls=1*rejected_calls=0,failed_calls=0*} [cmdrstat eval_ro r] + + r config resetstat + # make sure geoadd will failed + r set Sicily 1 + assert_error {WRONGTYPE Operation against a key holding the wrong kind of value*} { + r eval {return redis.call('GEOADD', 'Sicily', '13.361389', '38.115556', 'Palermo', '15.087269', '37.502669', 'Catania')} 1 x + } + assert_equal [errorrstat WRONGTYPE r] {count=1} + assert_equal [s total_error_replies] {1} + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat geoadd r] + assert_match {calls=1*rejected_calls=0,failed_calls=1*} [cmdrstat eval r] + } {} {cluster:skip} + + test "LUA redis.error_reply API" { + r config resetstat + assert_error {MY_ERR_CODE custom msg} { + r eval {return redis.error_reply("MY_ERR_CODE custom msg")} 0 + } + assert_equal [errorrstat MY_ERR_CODE r] {count=1} + } + + test "LUA redis.error_reply API with empty string" { + r config resetstat + assert_error {ERR} { + r eval {return redis.error_reply("")} 0 + } + assert_equal [errorrstat ERR r] {count=1} + } + + test "LUA redis.status_reply API" { + r config resetstat + r readraw 1 + assert_equal [ + r eval {return redis.status_reply("MY_OK_CODE custom msg")} 0 + ] {+MY_OK_CODE custom msg} + r readraw 0 + assert_equal [errorrstat MY_ERR_CODE r] {} ;# error stats were not incremented + } + + test "LUA test pcall" { + assert_equal [ + r eval {local status, res = pcall(function() return 1 end); return 'status: ' .. tostring(status) .. ' result: ' .. res} 0 + ] {status: true result: 1} + } + + test "LUA test pcall with error" { + assert_match {status: false result:*Script attempted to access nonexistent global variable 'foo'} [ + r eval {local status, res = pcall(function() return foo end); return 'status: ' .. tostring(status) .. ' result: ' .. res} 0 + ] + } +} + diff --git a/tests/unit/type/stream-cgroups.tcl b/tests/unit/type/stream-cgroups.tcl index ae8da27b8..27cbc686e 100644 --- a/tests/unit/type/stream-cgroups.tcl +++ b/tests/unit/type/stream-cgroups.tcl @@ -281,14 +281,20 @@ start_server { } test {XGROUP DESTROY should unblock XREADGROUP with -NOGROUP} { + r config resetstat r del mystream r XGROUP CREATE mystream mygroup $ MKSTREAM set rd [redis_deferring_client] $rd XREADGROUP GROUP mygroup Alice BLOCK 0 STREAMS mystream ">" wait_for_blocked_clients_count 1 r XGROUP DESTROY mystream mygroup - assert_error "*NOGROUP*" {$rd read} + assert_error "NOGROUP*" {$rd read} $rd close + + # verify command stats, error stats and error counter work on failed blocked command + assert_match {*count=1*} [errorrstat NOGROUP r] + assert_match {*calls=1,*,rejected_calls=0,failed_calls=1} [cmdrstat xreadgroup r] + assert_equal [s total_error_replies] 1 } test {RENAME can unblock XREADGROUP with data} { @@ -355,24 +361,22 @@ start_server { # Delete item 2 from the stream. Now consumer 1 has PEL that contains # only item 3. Try to use consumer 2 to claim the deleted item 2 - # from the PEL of consumer 1, this should return nil + # from the PEL of consumer 1, this should be NOP r XDEL mystream $id2 set reply [ r XCLAIM mystream mygroup consumer2 10 $id2 ] - assert {[llength $reply] == 1} - assert_equal "" [lindex $reply 0] + assert {[llength $reply] == 0} # Delete item 3 from the stream. Now consumer 1 has PEL that is empty. # Try to use consumer 2 to claim the deleted item 3 from the PEL - # of consumer 1, this should return nil + # of consumer 1, this should be NOP after 200 r XDEL mystream $id3 set reply [ r XCLAIM mystream mygroup consumer2 10 $id3 ] - assert {[llength $reply] == 1} - assert_equal "" [lindex $reply 0] + assert {[llength $reply] == 0} } test {XCLAIM without JUSTID increments delivery count} { @@ -445,6 +449,7 @@ start_server { set id1 [r XADD mystream * a 1] set id2 [r XADD mystream * b 2] set id3 [r XADD mystream * c 3] + set id4 [r XADD mystream * d 4] r XGROUP CREATE mystream mygroup 0 # Consumer 1 reads item 1 from the stream without acknowledgements. @@ -454,7 +459,7 @@ start_server { assert_equal [lindex $reply 0 1 0 1] {a 1} after 200 set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 1] - assert_equal [llength $reply] 2 + assert_equal [llength $reply] 3 assert_equal [lindex $reply 0] "0-0" assert_equal [llength [lindex $reply 1]] 1 assert_equal [llength [lindex $reply 1 0]] 2 @@ -462,7 +467,7 @@ start_server { assert_equal [lindex $reply 1 0 1] {a 1} # Consumer 1 reads another 2 items from stream - r XREADGROUP GROUP mygroup consumer1 count 2 STREAMS mystream > + r XREADGROUP GROUP mygroup consumer1 count 3 STREAMS mystream > # For min-idle-time after 200 @@ -471,33 +476,37 @@ start_server { # only item 3. Try to use consumer 2 to claim the deleted item 2 # from the PEL of consumer 1, this should return nil r XDEL mystream $id2 + + # id1 and id3 are self-claimed here but not id2 ('count' was set to 2) + # we make sure id2 is indeed skipped (the cursor points to id4) set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 2] - # id1 is self-claimed here but not id2 ('count' was set to 2) - assert_equal [llength $reply] 2 - assert_equal [lindex $reply 0] $id3 + + assert_equal [llength $reply] 3 + assert_equal [lindex $reply 0] $id4 assert_equal [llength [lindex $reply 1]] 2 assert_equal [llength [lindex $reply 1 0]] 2 assert_equal [llength [lindex $reply 1 0 1]] 2 assert_equal [lindex $reply 1 0 1] {a 1} - assert_equal [lindex $reply 1 1] "" + assert_equal [lindex $reply 1 1 1] {c 3} # Delete item 3 from the stream. Now consumer 1 has PEL that is empty. # Try to use consumer 2 to claim the deleted item 3 from the PEL # of consumer 1, this should return nil after 200 - r XDEL mystream $id3 + + r XDEL mystream $id4 + + # id1 and id3 are self-claimed here but not id2 and id4 ('count' is default 100) set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - JUSTID] - # id1 is self-claimed here but not id2 and id3 ('count' is default 100) # we also test the JUSTID modifier here. note that, when using JUSTID, # deleted entries are returned in reply (consistent with XCLAIM). - assert_equal [llength $reply] 2 - assert_equal [lindex $reply 0] "0-0" - assert_equal [llength [lindex $reply 1]] 3 + assert_equal [llength $reply] 3 + assert_equal [lindex $reply 0] {0-0} + assert_equal [llength [lindex $reply 1]] 2 assert_equal [lindex $reply 1 0] $id1 - assert_equal [lindex $reply 1 1] $id2 - assert_equal [lindex $reply 1 2] $id3 + assert_equal [lindex $reply 1 1] $id3 } test {XAUTOCLAIM as an iterator} { @@ -518,7 +527,7 @@ start_server { # Claim 2 entries set reply [r XAUTOCLAIM mystream mygroup consumer2 10 - COUNT 2] - assert_equal [llength $reply] 2 + assert_equal [llength $reply] 3 set cursor [lindex $reply 0] assert_equal $cursor $id3 assert_equal [llength [lindex $reply 1]] 2 @@ -527,7 +536,7 @@ start_server { # Claim 2 more entries set reply [r XAUTOCLAIM mystream mygroup consumer2 10 $cursor COUNT 2] - assert_equal [llength $reply] 2 + assert_equal [llength $reply] 3 set cursor [lindex $reply 0] assert_equal $cursor $id5 assert_equal [llength [lindex $reply 1]] 2 @@ -536,7 +545,7 @@ start_server { # Claim last entry set reply [r XAUTOCLAIM mystream mygroup consumer2 10 $cursor COUNT 1] - assert_equal [llength $reply] 2 + assert_equal [llength $reply] 3 set cursor [lindex $reply 0] assert_equal $cursor {0-0} assert_equal [llength [lindex $reply 1]] 1 @@ -548,6 +557,56 @@ start_server { assert_error "ERR COUNT must be > 0" {r XAUTOCLAIM key group consumer 1 1 COUNT 0} } + test {XCLAIM with XDEL} { + r DEL x + r XADD x 1-0 f v + r XADD x 2-0 f v + r XADD x 3-0 f v + r XGROUP CREATE x grp 0 + assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}} + r XDEL x 2-0 + assert_equal [r XCLAIM x grp Bob 0 1-0 2-0 3-0] {{1-0 {f v}} {3-0 {f v}}} + assert_equal [r XPENDING x grp - + 10 Alice] {} + } + + test {XCLAIM with trimming} { + r DEL x + r config set stream-node-max-entries 2 + r XADD x 1-0 f v + r XADD x 2-0 f v + r XADD x 3-0 f v + r XGROUP CREATE x grp 0 + assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}} + r XTRIM x MAXLEN 1 + assert_equal [r XCLAIM x grp Bob 0 1-0 2-0 3-0] {{3-0 {f v}}} + assert_equal [r XPENDING x grp - + 10 Alice] {} + } + + test {XAUTOCLAIM with XDEL} { + r DEL x + r XADD x 1-0 f v + r XADD x 2-0 f v + r XADD x 3-0 f v + r XGROUP CREATE x grp 0 + assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}} + r XDEL x 2-0 + assert_equal [r XAUTOCLAIM x grp Bob 0 0-0] {0-0 {{1-0 {f v}} {3-0 {f v}}} 2-0} + assert_equal [r XPENDING x grp - + 10 Alice] {} + } + + test {XCLAIM with trimming} { + r DEL x + r config set stream-node-max-entries 2 + r XADD x 1-0 f v + r XADD x 2-0 f v + r XADD x 3-0 f v + r XGROUP CREATE x grp 0 + assert_equal [r XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}}}}} + r XTRIM x MAXLEN 1 + assert_equal [r XAUTOCLAIM x grp Bob 0 0-0] {0-0 {{3-0 {f v}}} {1-0 2-0}} + assert_equal [r XPENDING x grp - + 10 Alice] {} + } + test {XINFO FULL output} { r del x r XADD x 100 a 1 @@ -564,22 +623,30 @@ start_server { r XDEL x 103 set reply [r XINFO STREAM x FULL] - assert_equal [llength $reply] 12 - assert_equal [lindex $reply 1] 4 ;# stream length - assert_equal [lindex $reply 9] "{100-0 {a 1}} {101-0 {b 1}} {102-0 {c 1}} {104-0 {f 1}}" ;# entries - assert_equal [lindex $reply 11 0 1] "g1" ;# first group name - assert_equal [lindex $reply 11 0 7 0 0] "100-0" ;# first entry in group's PEL - assert_equal [lindex $reply 11 0 9 0 1] "Alice" ;# first consumer - assert_equal [lindex $reply 11 0 9 0 7 0 0] "100-0" ;# first entry in first consumer's PEL - assert_equal [lindex $reply 11 1 1] "g2" ;# second group name - assert_equal [lindex $reply 11 1 9 0 1] "Charlie" ;# first consumer - assert_equal [lindex $reply 11 1 9 0 7 0 0] "100-0" ;# first entry in first consumer's PEL - assert_equal [lindex $reply 11 1 9 0 7 1 0] "101-0" ;# second entry in first consumer's PEL + assert_equal [llength $reply] 18 + assert_equal [dict get $reply length] 4 + assert_equal [dict get $reply entries] "{100-0 {a 1}} {101-0 {b 1}} {102-0 {c 1}} {104-0 {f 1}}" + + # First consumer group + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group name] "g1" + assert_equal [lindex [dict get $group pending] 0 0] "100-0" + set consumer [lindex [dict get $group consumers] 0] + assert_equal [dict get $consumer name] "Alice" + assert_equal [lindex [dict get $consumer pending] 0 0] "100-0" ;# first entry in first consumer's PEL + + # Second consumer group + set group [lindex [dict get $reply groups] 1] + assert_equal [dict get $group name] "g2" + set consumer [lindex [dict get $group consumers] 0] + assert_equal [dict get $consumer name] "Charlie" + assert_equal [lindex [dict get $consumer pending] 0 0] "100-0" ;# first entry in first consumer's PEL + assert_equal [lindex [dict get $consumer pending] 1 0] "101-0" ;# second entry in first consumer's PEL set reply [r XINFO STREAM x FULL COUNT 1] - assert_equal [llength $reply] 12 - assert_equal [lindex $reply 1] 4 - assert_equal [lindex $reply 9] "{100-0 {a 1}}" + assert_equal [llength $reply] 18 + assert_equal [dict get $reply length] 4 + assert_equal [dict get $reply entries] "{100-0 {a 1}}" } test {XGROUP CREATECONSUMER: create consumer if does not exist} { @@ -643,7 +710,7 @@ start_server { set grpinfo [r xinfo groups mystream] r debug loadaof - assert {[r xinfo groups mystream] == $grpinfo} + assert_equal [r xinfo groups mystream] $grpinfo set reply [r xinfo consumers mystream mygroup] set consumer_info [lindex $reply 0] assert_equal [lindex $consumer_info 1] "Alice" ;# consumer name @@ -682,6 +749,154 @@ start_server { } } + test {Consumer group read counter and lag in empty streams} { + r DEL x + r XGROUP CREATE x g1 0 MKSTREAM + + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $reply max-deleted-entry-id] "0-0" + assert_equal [dict get $reply entries-added] 0 + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] 0 + + r XADD x 1-0 data a + r XDEL x 1-0 + + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $reply max-deleted-entry-id] "1-0" + assert_equal [dict get $reply entries-added] 1 + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] 0 + } + + test {Consumer group read counter and lag sanity} { + r DEL x + r XADD x 1-0 data a + r XADD x 2-0 data b + r XADD x 3-0 data c + r XADD x 4-0 data d + r XADD x 5-0 data e + r XGROUP CREATE x g1 0 + + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] 5 + + r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x > + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 1 + assert_equal [dict get $group lag] 4 + + r XREADGROUP GROUP g1 c12 COUNT 10 STREAMS x > + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 5 + assert_equal [dict get $group lag] 0 + + r XADD x 6-0 data f + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 5 + assert_equal [dict get $group lag] 1 + } + + test {Consumer group lag with XDELs} { + r DEL x + r XADD x 1-0 data a + r XADD x 2-0 data b + r XADD x 3-0 data c + r XADD x 4-0 data d + r XADD x 5-0 data e + r XDEL x 3-0 + r XGROUP CREATE x g1 0 + r XGROUP CREATE x g2 0 + + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] {} + + r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x > + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] {} + + r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x > + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] {} + + r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x > + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] {} + + r XREADGROUP GROUP g1 c11 COUNT 1 STREAMS x > + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 5 + assert_equal [dict get $group lag] 0 + + r XADD x 6-0 data f + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 5 + assert_equal [dict get $group lag] 1 + + r XTRIM x MINID = 3-0 + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 5 + assert_equal [dict get $group lag] 1 + set group [lindex [dict get $reply groups] 1] + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] 3 + + r XTRIM x MINID = 5-0 + set reply [r XINFO STREAM x FULL] + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 5 + assert_equal [dict get $group lag] 1 + set group [lindex [dict get $reply groups] 1] + assert_equal [dict get $group entries-read] {} + assert_equal [dict get $group lag] 2 + } + + test {Loading from legacy (Redis <= v6.2.x, rdb_ver < 10) persistence} { + # The payload was DUMPed from a v5 instance after: + # XADD x 1-0 data a + # XADD x 2-0 data b + # XADD x 3-0 data c + # XADD x 4-0 data d + # XADD x 5-0 data e + # XADD x 6-0 data f + # XDEL x 3-0 + # XGROUP CREATE x g1 0 + # XGROUP CREATE x g2 0 + # XREADGROUP GROUP g1 c11 COUNT 4 STREAMS x > + # XTRIM x MAXLEN = 2 + + r DEL x + r RESTORE x 0 "\x0F\x01\x10\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\xC3\x40\x4A\x40\x57\x16\x57\x00\x00\x00\x23\x00\x02\x01\x04\x01\x01\x01\x84\x64\x61\x74\x61\x05\x00\x01\x03\x01\x00\x20\x01\x03\x81\x61\x02\x04\x20\x0A\x00\x01\x40\x0A\x00\x62\x60\x0A\x00\x02\x40\x0A\x00\x63\x60\x0A\x40\x22\x01\x81\x64\x20\x0A\x40\x39\x20\x0A\x00\x65\x60\x0A\x00\x05\x40\x0A\x00\x66\x20\x0A\x00\xFF\x02\x06\x00\x02\x02\x67\x31\x05\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x3E\xF7\x83\x43\x7A\x01\x00\x00\x01\x01\x03\x63\x31\x31\x3E\xF7\x83\x43\x7A\x01\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00\x02\x67\x32\x00\x00\x00\x00\x09\x00\x3D\x52\xEF\x68\x67\x52\x1D\xFA" + + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply max-deleted-entry-id] "0-0" + assert_equal [dict get $reply entries-added] 2 + set group [lindex [dict get $reply groups] 0] + assert_equal [dict get $group entries-read] 1 + assert_equal [dict get $group lag] 1 + set group [lindex [dict get $reply groups] 1] + assert_equal [dict get $group entries-read] 0 + assert_equal [dict get $group lag] 2 + } + start_server {tags {"external:skip"}} { set master [srv -1 client] set master_host [srv -1 host] @@ -733,6 +948,46 @@ start_server { } } + start_server {tags {"external:skip"}} { + set master [srv -1 client] + set master_host [srv -1 host] + set master_port [srv -1 port] + set replica [srv 0 client] + + foreach autoclaim {0 1} { + test "Replication tests of XCLAIM with deleted entries (autclaim=$autoclaim)" { + $replica replicaof $master_host $master_port + wait_for_condition 50 100 { + [s 0 master_link_status] eq {up} + } else { + fail "Replication not started." + } + + $master DEL x + $master XADD x 1-0 f v + $master XADD x 2-0 f v + $master XADD x 3-0 f v + $master XADD x 4-0 f v + $master XADD x 5-0 f v + $master XGROUP CREATE x grp 0 + assert_equal [$master XREADGROUP GROUP grp Alice STREAMS x >] {{x {{1-0 {f v}} {2-0 {f v}} {3-0 {f v}} {4-0 {f v}} {5-0 {f v}}}}} + wait_for_ofs_sync $master $replica + assert_equal [llength [$replica XPENDING x grp - + 10 Alice]] 5 + $master XDEL x 2-0 + $master XDEL x 4-0 + if {$autoclaim} { + assert_equal [$master XAUTOCLAIM x grp Bob 0 0-0] {0-0 {{1-0 {f v}} {3-0 {f v}} {5-0 {f v}}} {2-0 4-0}} + wait_for_ofs_sync $master $replica + assert_equal [llength [$replica XPENDING x grp - + 10 Alice]] 0 + } else { + assert_equal [$master XCLAIM x grp Bob 0 1-0 2-0 3-0 4-0] {{1-0 {f v}} {3-0 {f v}}} + wait_for_ofs_sync $master $replica + assert_equal [llength [$replica XPENDING x grp - + 10 Alice]] 1 + } + } + } + } + start_server {tags {"stream needs:debug"} overrides {appendonly yes aof-use-rdb-preamble no}} { test {Empty stream with no lastid can be rewrite into AOF correctly} { r XGROUP CREATE mystream group-name $ MKSTREAM @@ -742,7 +997,7 @@ start_server { waitForBgrewriteaof r r debug loadaof assert {[dict get [r xinfo stream mystream] length] == 0} - assert {[r xinfo groups mystream] == $grpinfo} + assert_equal [r xinfo groups mystream] $grpinfo } } } diff --git a/tests/unit/type/stream.tcl b/tests/unit/type/stream.tcl index 7ba3ed116..bd689cd29 100644 --- a/tests/unit/type/stream.tcl +++ b/tests/unit/type/stream.tcl @@ -760,7 +760,9 @@ start_server {tags {"stream xsetid"}} { test {XSETID can set a specific ID} { r XSETID mystream "200-0" - assert {[dict get [r xinfo stream mystream] last-generated-id] == "200-0"} + set reply [r XINFO stream mystream] + assert_equal [dict get $reply last-generated-id] "200-0" + assert_equal [dict get $reply entries-added] 1 } test {XSETID cannot SETID with smaller ID} { @@ -774,6 +776,98 @@ start_server {tags {"stream xsetid"}} { catch {r XSETID stream 1-1} err set _ $err } {ERR no such key} + + test {XSETID cannot run with an offset but without a maximal tombstone} { + catch {r XSETID stream 1-1 0} err + set _ $err + } {ERR syntax error} + + test {XSETID cannot run with a maximal tombstone but without an offset} { + catch {r XSETID stream 1-1 0-0} err + set _ $err + } {ERR syntax error} + + test {XSETID errors on negstive offset} { + catch {r XSETID stream 1-1 ENTRIESADDED -1 MAXDELETEDID 0-0} err + set _ $err + } {ERR*must be positive} + + test {XSETID cannot set the maximal tombstone with larger ID} { + r DEL x + r XADD x 1-0 a b + + catch {r XSETID x "1-0" ENTRIESADDED 1 MAXDELETEDID "2-0" } err + r XADD mystream MAXLEN 0 * a b + set err + } {ERR*smaller*} + + test {XSETID cannot set the offset to less than the length} { + r DEL x + r XADD x 1-0 a b + + catch {r XSETID x "1-0" ENTRIESADDED 0 MAXDELETEDID "0-0" } err + r XADD mystream MAXLEN 0 * a b + set err + } {ERR*smaller*} +} + +start_server {tags {"stream offset"}} { + test {XADD advances the entries-added counter and sets the recorded-first-entry-id} { + r DEL x + r XADD x 1-0 data a + + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply entries-added] 1 + assert_equal [dict get $reply recorded-first-entry-id] "1-0" + + r XADD x 2-0 data a + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply entries-added] 2 + assert_equal [dict get $reply recorded-first-entry-id] "1-0" + } + + test {XDEL/TRIM are reflected by recorded first entry} { + r DEL x + r XADD x 1-0 data a + r XADD x 2-0 data a + r XADD x 3-0 data a + r XADD x 4-0 data a + r XADD x 5-0 data a + + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply entries-added] 5 + assert_equal [dict get $reply recorded-first-entry-id] "1-0" + + r XDEL x 2-0 + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply recorded-first-entry-id] "1-0" + + r XDEL x 1-0 + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply recorded-first-entry-id] "3-0" + + r XTRIM x MAXLEN = 2 + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply recorded-first-entry-id] "4-0" + } + + test {Maxmimum XDEL ID behaves correctly} { + r DEL x + r XADD x 1-0 data a + r XADD x 2-0 data b + r XADD x 3-0 data c + + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply max-deleted-entry-id] "0-0" + + r XDEL x 2-0 + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply max-deleted-entry-id] "2-0" + + r XDEL x 1-0 + set reply [r XINFO STREAM x FULL] + assert_equal [dict get $reply max-deleted-entry-id] "2-0" + } } start_server {tags {"stream needs:debug"} overrides {appendonly yes aof-use-rdb-preamble no}} { @@ -796,7 +890,7 @@ start_server {tags {"stream needs:debug"} overrides {appendonly yes aof-use-rdb- waitForBgrewriteaof r r debug loadaof assert {[dict get [r xinfo stream mystream] length] == 1} - assert {[dict get [r xinfo stream mystream] last-generated-id] == "2-2"} + assert_equal [dict get [r xinfo stream mystream] last-generated-id] "2-2" } } |