"Fossies" - the Fresh Open Source Software Archive

Member "redis-7.0.5/tests/cluster/tests/28-cluster-shards.tcl" (21 Sep 2022, 7669 Bytes) of package /linux/misc/redis-7.0.5.tar.gz:


As a special service "Fossies" has tried to format the requested source page into HTML format using (guessed) Tcl/Tk source code syntax highlighting (style: standard) with prefixed line numbers. Alternatively you can here view or download the uninterpreted source code file. See also the last Fossies "Diffs" side-by-side code changes report for "28-cluster-shards.tcl": 7.0.2_vs_7.0.3.

    1 source "../tests/includes/init-tests.tcl"
    2 
    3 # Initial slot distribution.
    4 set ::slot0 [list 0 1000 1002 5459 5461 5461 10926 10926]
    5 set ::slot1 [list 5460 5460 5462 10922 10925 10925]
    6 set ::slot2 [list 10923 10924 10927 16383]
    7 set ::slot3 [list 1001 1001]
    8 
    9 proc cluster_create_with_split_slots {masters replicas} {
   10     for {set j 0} {$j < $masters} {incr j} {
   11         R $j cluster ADDSLOTSRANGE {*}[set ::slot${j}]
   12     }
   13     if {$replicas} {
   14         cluster_allocate_slaves $masters $replicas
   15     }
   16     set ::cluster_master_nodes $masters
   17     set ::cluster_replica_nodes $replicas
   18 }
   19 
   20 # Get the node info with the specific node_id from the
   21 # given reference node. Valid type options are "node" and "shard"
   22 proc get_node_info_from_shard {id reference {type node}} {
   23     set shards_response [R $reference CLUSTER SHARDS]
   24     foreach shard_response $shards_response {
   25         set nodes [dict get $shard_response nodes]
   26         foreach node $nodes {
   27             if {[dict get $node id] eq $id} {
   28                 if {$type eq "node"} {
   29                     return $node
   30                 } elseif {$type eq "shard"} {
   31                     return $shard_response
   32                 } else {
   33                     return {}
   34                 }
   35             }
   36         }
   37     }
   38     # No shard found, return nothing
   39     return {}
   40 }
   41 
   42 test "Create a 8 nodes cluster with 4 shards" {
   43     cluster_create_with_split_slots 4 4
   44 }
   45 
   46 test "Cluster should start ok" {
   47     assert_cluster_state ok
   48 }
   49 
   50 test "Set cluster hostnames and verify they are propagated" {
   51     for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
   52         R $j config set cluster-announce-hostname "host-$j.com"
   53     }
   54 
   55     # Wait for everyone to agree about the state
   56     wait_for_cluster_propagation
   57 }
   58 
   59 test "Verify information about the shards" {
   60     set ids {}
   61     for {set j 0} {$j < $::cluster_master_nodes + $::cluster_replica_nodes} {incr j} {
   62         lappend ids [R $j CLUSTER MYID]
   63     }
   64     set slots [list $::slot0 $::slot1 $::slot2 $::slot3 $::slot0 $::slot1 $::slot2 $::slot3]
   65 
   66     # Verify on each node (primary/replica), the response of the `CLUSTER SLOTS` command is consistent.
   67     for {set ref 0} {$ref < $::cluster_master_nodes + $::cluster_replica_nodes} {incr ref} {
   68         for {set i 0} {$i < $::cluster_master_nodes + $::cluster_replica_nodes} {incr i} {
   69             assert_equal [lindex $slots $i] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "shard"] slots]
   70             assert_equal "host-$i.com" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] hostname]
   71             assert_equal "127.0.0.1"  [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] ip]
   72             # Default value of 'cluster-preferred-endpoint-type' is ip.
   73             assert_equal "127.0.0.1"  [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] endpoint]
   74 
   75             if {$::tls} {
   76                 assert_equal [get_instance_attrib redis $i plaintext-port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
   77                 assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] tls-port]
   78             } else {
   79                 assert_equal [get_instance_attrib redis $i port] [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] port]
   80             }
   81 
   82             if {$i < 4} {
   83                 assert_equal "master" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
   84                 assert_equal "online" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] health]
   85             } else {
   86                 assert_equal "replica" [dict get [get_node_info_from_shard [lindex $ids $i] $ref "node"] role]
   87                 # Replica could be in online or loading
   88             }
   89         }
   90     }    
   91 }
   92 
   93 test "Verify no slot shard" {
   94     # Node 8 has no slots assigned
   95     set node_8_id [R 8 CLUSTER MYID]
   96     assert_equal {} [dict get [get_node_info_from_shard $node_8_id 8 "shard"] slots]
   97     assert_equal {} [dict get [get_node_info_from_shard $node_8_id 0 "shard"] slots]
   98 }
   99 
  100 set node_0_id [R 0 CLUSTER MYID]
  101 
  102 test "Kill a node and tell the replica to immediately takeover" {
  103     kill_instance redis 0
  104     R 4 cluster failover force
  105 }
  106 
  107 # Primary 0 node should report as fail, wait until the new primary acknowledges it.
  108 test "Verify health as fail for killed node" {
  109     wait_for_condition 50 100 {
  110         "fail" eq [dict get [get_node_info_from_shard $node_0_id 4 "node"] "health"]
  111     } else {
  112         fail "New primary never detected the node failed"
  113     }
  114 }
  115 
  116 set primary_id 4
  117 set replica_id 0
  118 
  119 test "Restarting primary node" {
  120     restart_instance redis $replica_id
  121 }
  122 
  123 test "Instance #0 gets converted into a replica" {
  124     wait_for_condition 1000 50 {
  125         [RI $replica_id role] eq {slave}
  126     } else {
  127         fail "Old primary was not converted into replica"
  128     }
  129 }
  130 
  131 test "Test the replica reports a loading state while it's loading" {
  132     # Test the command is good for verifying everything moves to a happy state
  133     set replica_cluster_id [R $replica_id CLUSTER MYID]
  134     wait_for_condition 50 1000 {
  135         [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health] eq "online"
  136     } else {
  137         fail "Replica never transitioned to online"
  138     }
  139 
  140     # Set 1 MB of data, so there is something to load on full sync
  141     R $primary_id debug populate 1000 key 1000
  142 
  143     # Kill replica client for primary and load new data to the primary
  144     R $primary_id config set repl-backlog-size 100
  145 
  146     # Set the key load delay so that it will take at least
  147     # 2 seconds to fully load the data.
  148     R $replica_id config set key-load-delay 4000
  149 
  150     # Trigger event loop processing every 1024 bytes, this trigger
  151     # allows us to send and receive cluster messages, so we are setting
  152     # it low so that the cluster messages are sent more frequently.
  153     R $replica_id config set loading-process-events-interval-bytes 1024
  154 
  155     R $primary_id multi
  156     R $primary_id client kill type replica
  157     # populate the correct data
  158     set num 100
  159     set value [string repeat A 1024]
  160     for {set j 0} {$j < $num} {incr j} {
  161         # Use hashtag valid for shard #0
  162         set key "{ch3}$j"
  163         R $primary_id set $key $value
  164     }
  165     R $primary_id exec
  166 
  167     # The replica should reconnect and start a full sync, it will gossip about it's health to the primary.
  168     wait_for_condition 50 1000 {
  169         "loading" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
  170     } else {
  171         fail "Replica never transitioned to loading"
  172     }
  173 
  174     # Speed up the key loading and verify everything resumes
  175     R $replica_id config set key-load-delay 0
  176 
  177     wait_for_condition 50 1000 {
  178         "online" eq [dict get [get_node_info_from_shard $replica_cluster_id $primary_id "node"] health]
  179     } else {
  180         fail "Replica never transitioned to online"
  181     }
  182 
  183     # Final sanity, the replica agrees it is online. 
  184     assert_equal "online" [dict get [get_node_info_from_shard $replica_cluster_id $replica_id "node"] health]
  185 }
  186 
  187 test "Regression test for a crash when calling SHARDS during handshake" {
  188     # Reset forget a node, so we can use it to establish handshaking connections
  189     set id [R 19 CLUSTER MYID]
  190     R 19 CLUSTER RESET HARD
  191     for {set i 0} {$i < 19} {incr i} {
  192         R $i CLUSTER FORGET $id
  193     }
  194     R 19 cluster meet 127.0.0.1 [get_instance_attrib redis 0 port]
  195     # This should line would previously crash, since all the outbound
  196     # connections were in handshake state.
  197     R 19 CLUSTER SHARDS
  198 }
  199 
  200 test "Cluster is up" {
  201     assert_cluster_state ok
  202 }