浏览代码

Add fail no agree test.

Jing Yang 5 年之前
父节点
当前提交
397c8aad63
共有 2 个文件被更改,包括 75 次插入9 次删除
  1. 50 0
      tests/agreement_tests.rs
  2. 25 9
      tests/config/mod.rs

+ 50 - 0
tests/agreement_tests.rs

@@ -60,3 +60,53 @@ fn fail_agree() -> config::Result<()> {
 
     Ok(())
 }
+
+#[test]
+fn fail_no_agree() -> config::Result<()> {
+    const SERVERS: usize = 5;
+    let cfg = config::make_config(SERVERS, false);
+    let _guard = cfg.deferred_cleanup();
+
+    cfg.begin("Test (2B): no agreement if too many followers disconnect");
+
+    cfg.one(10, SERVERS, false)?;
+
+    // 3 of 5 followers disconnect
+    let leader = cfg.check_one_leader()?;
+    cfg.disconnect((leader + 1) % SERVERS);
+    cfg.disconnect((leader + 2) % SERVERS);
+    cfg.disconnect((leader + 3) % SERVERS);
+
+    let result = cfg.leader_start(leader, 20);
+    assert!(result.is_some(), "leader rejected start()");
+    let index = result.unwrap().1;
+    assert_eq!(2, index, "expected index 2, got {}", index);
+
+    config::sleep_election_timeouts(2);
+
+    let (commit_count, _) = cfg.committed_count(index)?;
+    assert_eq!(
+        0, commit_count,
+        "{} committed but no majority",
+        commit_count
+    );
+
+    // repair
+    cfg.connect((leader + 1) % SERVERS);
+    cfg.connect((leader + 2) % SERVERS);
+    cfg.connect((leader + 3) % SERVERS);
+
+    // the disconnected majority may have chosen a leader from
+    // among their own ranks, forgetting index 2.
+    let leader2 = cfg.check_one_leader()?;
+    let result = cfg.leader_start(leader2, 30);
+    assert!(result.is_some(), "leader2 rejected start()");
+    let index = result.unwrap().1;
+    assert!(index == 2 || index == 3, "unexpected index {}", index);
+
+    cfg.one(1000, SERVERS, true)?;
+
+    cfg.end();
+
+    Ok(())
+}

+ 25 - 9
tests/config/mod.rs

@@ -1,9 +1,15 @@
+use std::collections::HashMap;
+use std::sync::Arc;
+use std::time::Instant;
+
+pub use anyhow::Result;
 use parking_lot::Mutex;
 use rand::{thread_rng, Rng};
+use tokio::time::Duration;
+
 use ruaft::rpcs::register_server;
+use ruaft::utils::DropGuard;
 use ruaft::{Raft, RpcClient};
-use std::collections::HashMap;
-use std::sync::Arc;
 
 struct ConfigState {
     rafts: Vec<Option<Raft>>,
@@ -23,11 +29,6 @@ pub struct Config {
     log: Arc<Mutex<LogState>>,
 }
 
-pub use anyhow::Result;
-use ruaft::utils::DropGuard;
-use std::time::Instant;
-use tokio::time::Duration;
-
 impl Config {
     fn server_name(i: usize) -> String {
         format!("ruaft-server-{}", i)
@@ -226,7 +227,7 @@ impl Config {
         }
     }
 
-    pub fn start1(&mut self, index: usize) -> std::io::Result<()> {
+    pub fn start1(&mut self, index: usize) -> Result<()> {
         if self.state.lock().rafts[index].is_some() {
             self.crash1(index);
         }
@@ -249,7 +250,22 @@ impl Config {
         self.state.lock().rafts[index].replace(raft.clone());
 
         let raft = Arc::new(raft);
-        register_server(raft, Self::server_name(index), self.network.as_ref())
+        register_server(raft, Self::server_name(index), self.network.as_ref())?;
+        Ok(())
+    }
+
+    pub fn leader_start(
+        &self,
+        leader: usize,
+        cmd: i32,
+    ) -> Option<(usize, usize)> {
+        self.state.lock().rafts[leader]
+            .as_ref()
+            .map(|raft| {
+                raft.start(ruaft::Command(cmd))
+                    .map(|(term, index)| (term.0, index))
+            })
+            .unwrap()
     }
 
     pub fn end(&self) {}