persist_tests.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. #[macro_use]
  2. extern crate anyhow;
  3. extern crate bytes;
  4. extern crate labrpc;
  5. extern crate ruaft;
  6. #[macro_use]
  7. extern crate scopeguard;
  8. use std::sync::atomic::AtomicBool;
  9. use std::sync::atomic::Ordering;
  10. use std::sync::Arc;
  11. use rand::{thread_rng, Rng};
  12. mod config;
  13. #[test]
  14. fn persist() -> config::Result<()> {
  15. const SERVERS: usize = 5;
  16. let cfg = config::make_config(SERVERS, false);
  17. defer!(cfg.cleanup());
  18. cfg.begin("Test (2C): basic persistence");
  19. cfg.one(11, SERVERS, true)?;
  20. // crash and re-start all
  21. for i in 0..SERVERS {
  22. cfg.start1(i)?;
  23. }
  24. for i in 0..SERVERS {
  25. cfg.disconnect(i);
  26. cfg.connect(i);
  27. }
  28. cfg.one(12, SERVERS, true)?;
  29. let leader1 = cfg.check_one_leader()?;
  30. cfg.disconnect(leader1);
  31. cfg.start1(leader1)?;
  32. cfg.connect(leader1);
  33. cfg.one(13, SERVERS, true)?;
  34. let leader2 = cfg.check_one_leader()?;
  35. cfg.disconnect(leader2);
  36. cfg.one(14, SERVERS - 1, true)?;
  37. cfg.start1(leader2)?;
  38. cfg.connect(leader2);
  39. // wait for leader2 to join before killing i3
  40. cfg.wait(4, SERVERS, None)?;
  41. let i3 = (cfg.check_one_leader()? + 1) % SERVERS;
  42. cfg.disconnect(i3);
  43. cfg.one(15, SERVERS - 1, true)?;
  44. cfg.start1(i3)?;
  45. cfg.connect(i3);
  46. cfg.one(16, SERVERS, true)?;
  47. cfg.end();
  48. Ok(())
  49. }
  50. #[test]
  51. fn persist2() -> config::Result<()> {
  52. const SERVERS: usize = 5;
  53. let cfg = config::make_config(SERVERS, false);
  54. defer!(cfg.cleanup());
  55. cfg.begin("Test (2C): more persistence");
  56. let mut index = 1;
  57. for _ in 0..5 {
  58. cfg.one(10 + index, SERVERS, true)?;
  59. index += 1;
  60. let leader1 = cfg.check_one_leader()?;
  61. cfg.disconnect((leader1 + 1) % SERVERS);
  62. cfg.disconnect((leader1 + 2) % SERVERS);
  63. cfg.one(10 + index, SERVERS - 2, true)?;
  64. index += 1;
  65. cfg.disconnect((leader1 + 0) % SERVERS);
  66. cfg.disconnect((leader1 + 3) % SERVERS);
  67. cfg.disconnect((leader1 + 4) % SERVERS);
  68. cfg.start1((leader1 + 1) % SERVERS)?;
  69. cfg.start1((leader1 + 2) % SERVERS)?;
  70. cfg.connect((leader1 + 1) % SERVERS);
  71. cfg.connect((leader1 + 2) % SERVERS);
  72. config::sleep_election_timeouts(1);
  73. cfg.start1((leader1 + 3) % SERVERS)?;
  74. cfg.connect((leader1 + 3) % SERVERS);
  75. cfg.one(10 + index, SERVERS - 2, true)?;
  76. index += 1;
  77. cfg.connect((leader1 + 4) % SERVERS);
  78. cfg.connect((leader1 + 0) % SERVERS);
  79. }
  80. cfg.one(1000, SERVERS, true)?;
  81. cfg.end();
  82. Ok(())
  83. }
  84. #[test]
  85. fn persist3() -> config::Result<()> {
  86. const SERVERS: usize = 3;
  87. let cfg = config::make_config(SERVERS, false);
  88. defer!(cfg.cleanup());
  89. cfg.begin(
  90. "Test (2C): partitioned leader and one follower crash, leader restarts",
  91. );
  92. cfg.one(101, 3, true)?;
  93. let leader = cfg.check_one_leader()?;
  94. cfg.disconnect((leader + 2) % SERVERS);
  95. cfg.one(102, 2, true)?;
  96. cfg.crash1((leader + 0) % SERVERS);
  97. cfg.crash1((leader + 1) % SERVERS);
  98. cfg.connect((leader + 2) % SERVERS);
  99. cfg.start1((leader + 0) % SERVERS)?;
  100. cfg.connect((leader + 0) % SERVERS);
  101. cfg.one(103, 2, true)?;
  102. cfg.start1((leader + 1) % SERVERS)?;
  103. cfg.connect((leader + 1) % SERVERS);
  104. cfg.one(104, SERVERS, true)?;
  105. cfg.end();
  106. Ok(())
  107. }
  108. #[test]
  109. fn figure8() -> config::Result<()> {
  110. const SERVERS: usize = 5;
  111. let cfg = config::make_config(SERVERS, false);
  112. defer!(cfg.cleanup());
  113. cfg.begin("Test (2C): Figure 8");
  114. cfg.one(thread_rng().gen(), 1, true)?;
  115. let mut nup = SERVERS;
  116. for _ in 0..1000 {
  117. let mut leader = None;
  118. for i in 0..SERVERS {
  119. if cfg.is_server_alive(i) {
  120. if let Some(_) = cfg.leader_start(i, thread_rng().gen()) {
  121. leader = Some(i);
  122. }
  123. }
  124. }
  125. let millis_upper = if thread_rng().gen_ratio(100, 1000) {
  126. config::LONG_ELECTION_TIMEOUT_MILLIS >> 1
  127. } else {
  128. // Magic number 13?
  129. 13
  130. };
  131. let millis = thread_rng().gen_range(0, millis_upper);
  132. config::sleep_millis(millis);
  133. if let Some(leader) = leader {
  134. cfg.crash1(leader);
  135. nup -= 1;
  136. }
  137. if nup < 3 {
  138. let index = thread_rng().gen_range(0, SERVERS);
  139. if !cfg.is_server_alive(index) {
  140. cfg.start1(index)?;
  141. cfg.connect(index);
  142. nup += 1
  143. }
  144. }
  145. }
  146. for index in 0..SERVERS {
  147. if !cfg.is_server_alive(index) {
  148. cfg.start1(index)?;
  149. cfg.connect(index);
  150. }
  151. }
  152. cfg.one(thread_rng().gen(), SERVERS, true)?;
  153. cfg.end();
  154. Ok(())
  155. }
  156. #[test]
  157. fn unreliable_agree() -> config::Result<()> {
  158. const SERVERS: usize = 5;
  159. let cfg = Arc::new(config::make_config(SERVERS, true));
  160. defer!(cfg.cleanup());
  161. cfg.begin("Test (2C): unreliable agreement");
  162. let mut handles = vec![];
  163. for iters in 1..50 {
  164. for j in 0..4 {
  165. let cfg = cfg.clone();
  166. let handle =
  167. std::thread::spawn(move || cfg.one(100 * iters + j, 1, true));
  168. handles.push(handle);
  169. }
  170. cfg.one(iters, 1, true)?;
  171. }
  172. cfg.set_unreliable(false);
  173. for handle in handles {
  174. handle.join().expect("Thread join should not fail")?;
  175. }
  176. cfg.one(100, SERVERS, true)?;
  177. cfg.end();
  178. Ok(())
  179. }
  180. #[test]
  181. fn figure8_unreliable() -> config::Result<()> {
  182. const SERVERS: usize = 5;
  183. let cfg = config::make_config(SERVERS, false);
  184. defer!(cfg.cleanup());
  185. cfg.begin("Test (2C): Figure 8 (unreliable)");
  186. cfg.one(thread_rng().gen_range(0, 10000), 1, true)?;
  187. let mut nup = SERVERS;
  188. for iters in 0..1000 {
  189. if iters == 200 {
  190. cfg.set_long_reordering(true);
  191. }
  192. let mut leader = None;
  193. for i in 0..SERVERS {
  194. if cfg.is_server_alive(i) {
  195. if let Some(_) = cfg.leader_start(i, thread_rng().gen()) {
  196. if cfg.is_connected(i) {
  197. leader = Some(i);
  198. }
  199. }
  200. }
  201. }
  202. let millis_upper = if thread_rng().gen_ratio(100, 1000) {
  203. config::LONG_ELECTION_TIMEOUT_MILLIS >> 1
  204. } else {
  205. // Magic number 13?
  206. 13
  207. };
  208. let millis = thread_rng().gen_range(0, millis_upper);
  209. config::sleep_millis(millis);
  210. if let Some(leader) = leader {
  211. if thread_rng().gen_ratio(1, 2) {
  212. cfg.disconnect(leader);
  213. nup -= 1;
  214. }
  215. }
  216. if nup < 3 {
  217. let index = thread_rng().gen_range(0, SERVERS);
  218. if !cfg.is_connected(index) {
  219. cfg.connect(index);
  220. nup += 1
  221. }
  222. }
  223. }
  224. for index in 0..SERVERS {
  225. if !cfg.is_connected(index) {
  226. cfg.connect(index);
  227. }
  228. }
  229. cfg.one(thread_rng().gen_range(0, 10000), SERVERS, true)?;
  230. cfg.end();
  231. Ok(())
  232. }
  233. fn internal_churn(unreliable: bool) -> config::Result<()> {
  234. const SERVERS: usize = 5;
  235. let cfg = Arc::new(config::make_config(SERVERS, false));
  236. defer!(cfg.cleanup());
  237. if unreliable {
  238. cfg.begin("Test (2C): unreliable churn");
  239. } else {
  240. cfg.begin("Test (2C): churn");
  241. }
  242. let stop = Arc::new(AtomicBool::new(false));
  243. let mut handles = vec![];
  244. for client_index in 0..3 {
  245. let stop = stop.clone();
  246. let cfg = cfg.clone();
  247. let handle = std::thread::spawn(move || {
  248. let mut cmds = vec![];
  249. while !stop.load(Ordering::SeqCst) {
  250. let cmd = thread_rng().gen();
  251. let mut index = None;
  252. for i in 0..SERVERS {
  253. if cfg.is_server_alive(i) {
  254. let start = cfg.leader_start(i, cmd);
  255. if start.is_some() {
  256. index = Some(i);
  257. }
  258. }
  259. }
  260. if let Some(index) = index {
  261. for millis in [10, 20, 50, 100, 200].iter() {
  262. let (cmd_index, cmd_committed) =
  263. // somehow the compiler cannot infer the error type.
  264. match cfg.committed_count(index) {
  265. Ok(t) => t,
  266. Err(e) => return Err(e),
  267. };
  268. if cmd_index > 0 {
  269. if cmd_committed == cmd {
  270. cmds.push(cmd);
  271. }
  272. // The contract we started might not get
  273. }
  274. config::sleep_millis(*millis);
  275. }
  276. } else {
  277. config::sleep_millis(79 + client_index * 17);
  278. }
  279. }
  280. Ok(cmds)
  281. });
  282. handles.push(handle);
  283. }
  284. for _ in 0..20 {
  285. if thread_rng().gen_ratio(200, 1000) {
  286. cfg.disconnect(thread_rng().gen_range(0, SERVERS));
  287. }
  288. if thread_rng().gen_ratio(500, 1000) {
  289. let server = thread_rng().gen_range(0, SERVERS);
  290. if !cfg.is_server_alive(server) {
  291. cfg.start1(server)?;
  292. }
  293. cfg.connect(server);
  294. }
  295. if thread_rng().gen_ratio(200, 1000) {
  296. let server = thread_rng().gen_range(0, SERVERS);
  297. if cfg.is_server_alive(server) {
  298. cfg.crash1(server);
  299. }
  300. }
  301. config::sleep_millis(config::LONG_ELECTION_TIMEOUT_MILLIS / 10 * 7);
  302. }
  303. config::sleep_election_timeouts(1);
  304. cfg.set_unreliable(false);
  305. for i in 0..SERVERS {
  306. if !cfg.is_server_alive(i) {
  307. cfg.start1(i)?;
  308. }
  309. cfg.connect(i);
  310. }
  311. stop.store(true, Ordering::SeqCst);
  312. let mut all_cmds = vec![];
  313. for handle in handles {
  314. let mut cmds = handle.join().expect("Client should not fail")?;
  315. all_cmds.append(&mut cmds);
  316. }
  317. config::sleep_election_timeouts(1);
  318. let last_cmd_index = cfg.one(thread_rng().gen(), SERVERS, true)?;
  319. let mut consented = vec![];
  320. for cmd_index in 1..last_cmd_index + 1 {
  321. let cmd = cfg.wait(cmd_index, SERVERS, None)?;
  322. let cmd = cmd.expect("There should always be a command");
  323. consented.push(cmd);
  324. }
  325. for cmd in all_cmds {
  326. assert!(
  327. consented.contains(&cmd),
  328. "Cmd {} not found in {:?}",
  329. cmd,
  330. consented
  331. );
  332. }
  333. cfg.end();
  334. Ok(())
  335. }
  336. #[test]
  337. fn reliable_churn() -> config::Result<()> {
  338. internal_churn(false)
  339. }
  340. #[test]
  341. fn unreliable_churn() -> config::Result<()> {
  342. internal_churn(true)
  343. }