-
Notifications
You must be signed in to change notification settings - Fork 499
/
15-cluster-slots.tcl
159 lines (137 loc) · 5.41 KB
/
15-cluster-slots.tcl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
source "../tests/includes/init-tests.tcl"
proc cluster_allocate_mixedSlots {n} {
set slot 16383
while {$slot >= 0} {
set node [expr {$slot % $n}]
lappend slots_$node $slot
incr slot -1
}
for {set j 0} {$j < $n} {incr j} {
R $j cluster addslots {*}[set slots_${j}]
}
}
proc create_cluster_with_mixedSlot {masters slaves} {
cluster_allocate_mixedSlots $masters
if {$slaves} {
cluster_allocate_slaves $masters $slaves
}
assert_cluster_state ok
}
test "Create a 5 nodes cluster" {
create_cluster_with_mixedSlot 5 15
}
test "Cluster is up" {
assert_cluster_state ok
}
test "Cluster is writable" {
cluster_write_test 0
}
test "Instance #5 is a slave" {
assert {[RI 5 role] eq {slave}}
}
test "client do not break when cluster slot" {
R 0 config set client-output-buffer-limit "normal 33554432 16777216 60"
if { [catch {R 0 cluster slots}] } {
fail "output overflow when cluster slots"
}
}
test "client can handle keys with hash tag" {
set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
$cluster set foo{tag} bar
$cluster close
}
test "slot migration is valid from primary to another primary" {
set startup_port [get_instance_attrib redis 0 port]
set cluster [redis_cluster 127.0.0.1:$startup_port]
set key order1
set slot [$cluster cluster keyslot $key]
array set nodefrom [$cluster masternode_for_slot $slot]
array set nodeto [$cluster masternode_notfor_slot $slot]
# A 3rd node for checking that it gets informed about the migration
array set node3 [$cluster masternode_notfor_slot $slot]
while { $node3(port) eq $nodefrom(port) || $node3(port) eq $nodeto(port) } {
array set node3 [$cluster masternode_notfor_slot $slot]
}
# Check that all nodes are different
assert_not_equal $nodefrom(port) $nodeto(port)
assert_not_equal $nodefrom(port) $node3(port)
assert_not_equal $nodeto(port) $node3(port)
# Test subscribe to moved slot notifications
set rd1 [redis_deferring_client_by_addr 127.0.0.1 $nodefrom(port)]
set rd2 [redis_deferring_client_by_addr 127.0.0.1 $nodeto(port)]
set rd3 [redis_deferring_client_by_addr 127.0.0.1 $node3(port)]
assert_equal {1} [subscribe $rd1 {__cluster__:moved}]
assert_equal {1} [subscribe $rd2 {__cluster__:moved}]
assert_equal {1} [subscribe $rd3 {__cluster__:moved}]
assert_equal {OK} [$nodeto(link) cluster setslot $slot importing $nodefrom(id)]
assert_equal {OK} [$nodefrom(link) cluster setslot $slot migrating $nodeto(id)]
assert_equal {OK} [$nodefrom(link) cluster setslot $slot node $nodeto(id)]
assert_equal {OK} [$nodeto(link) cluster setslot $slot node $nodeto(id)]
# Check that we got the pubsub MOVED message from all nodes.
set expect_content "MOVED $slot 127.0.0.1:$nodeto(port)"
set expect_publish "message __cluster__:moved {$expect_content}"
assert_equal $expect_publish [$rd1 read]
assert_equal $expect_publish [$rd2 read]
assert_equal $expect_publish [$rd3 read]
$rd1 close
$rd2 close
$rd3 close
}
test "slot migration is invalid from primary to replica" {
set cluster [redis_cluster 127.0.0.1:[get_instance_attrib redis 0 port]]
set key order1
set slot [$cluster cluster keyslot $key]
array set nodefrom [$cluster masternode_for_slot $slot]
# Get replica node serving slot.
set replicanodeinfo [$cluster cluster replicas $nodefrom(id)]
puts $replicanodeinfo
set args [split $replicanodeinfo " "]
set replicaid [lindex [split [lindex $args 0] \{] 1]
puts $replicaid
catch {[$nodefrom(link) cluster setslot $slot node $replicaid]} err
assert_match "*Target node is not a master" $err
}
proc count_bound_slots {n} {
set slot_count 0
foreach slot_range_mapping [$n cluster slots] {
set start_slot [lindex $slot_range_mapping 0]
set end_slot [lindex $slot_range_mapping 1]
incr slot_count [expr $end_slot - $start_slot + 1]
}
return $slot_count
}
test "slot must be unbound on the owner when it is deleted" {
set node0 [Rn 0]
set node1 [Rn 1]
assert {[count_bound_slots $node0] eq 16384}
assert {[count_bound_slots $node1] eq 16384}
set slot_to_delete 0
# Delete
$node0 CLUSTER DELSLOTS $slot_to_delete
# Verify
# The node that owns the slot must unbind the slot that was deleted
wait_for_condition 1000 50 {
[count_bound_slots $node0] == 16383
} else {
fail "Cluster slot deletion was not recorded on the node that owns the slot"
}
# We don't propagate slot deletion across all nodes in the cluster.
# This can lead to extra redirect before the clients find out that the slot is unbound.
wait_for_condition 1000 50 {
[count_bound_slots $node1] == 16384
} else {
fail "Cluster slot deletion should not be propagated to all nodes in the cluster"
}
}
if {$::tls} {
test {CLUSTER SLOTS from non-TLS client in TLS cluster} {
set slots_tls [R 0 cluster slots]
set host [get_instance_attrib redis 0 host]
set plaintext_port [get_instance_attrib redis 0 plaintext-port]
set client_plain [redis $host $plaintext_port 0 0]
set slots_plain [$client_plain cluster slots]
$client_plain close
# Compare the ports in the first row
assert_no_match [lindex $slots_tls 0 3 1] [lindex $slots_plain 0 3 1]
}
}