persist_tests.rs 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. #![allow(clippy::identity_op)]
  2. #[macro_use]
  3. extern crate anyhow;
  4. extern crate bytes;
  5. extern crate labrpc;
  6. extern crate ruaft;
  7. #[macro_use]
  8. extern crate scopeguard;
  9. use std::sync::atomic::AtomicBool;
  10. use std::sync::atomic::Ordering;
  11. use std::sync::Arc;
  12. use rand::{thread_rng, Rng};
  13. // This is to remove the annoying "unused code in config" warnings.
  14. pub mod config;
  15. #[test]
  16. fn persist1() -> config::Result<()> {
  17. const SERVERS: usize = 5;
  18. let cfg = make_config!(SERVERS, false);
  19. defer!(cfg.cleanup());
  20. cfg.begin("Test (2C): basic persistence");
  21. cfg.one(11, SERVERS, true)?;
  22. // crash and re-start all
  23. for i in 0..SERVERS {
  24. cfg.start1(i)?;
  25. }
  26. for i in 0..SERVERS {
  27. cfg.disconnect(i);
  28. cfg.connect(i);
  29. }
  30. cfg.one(12, SERVERS, true)?;
  31. let leader1 = cfg.check_one_leader()?;
  32. cfg.disconnect(leader1);
  33. cfg.start1(leader1)?;
  34. cfg.connect(leader1);
  35. cfg.one(13, SERVERS, true)?;
  36. let leader2 = cfg.check_one_leader()?;
  37. cfg.disconnect(leader2);
  38. cfg.one(14, SERVERS - 1, true)?;
  39. cfg.start1(leader2)?;
  40. cfg.connect(leader2);
  41. // wait for leader2 to join before killing i3
  42. cfg.wait(4, SERVERS, None)?;
  43. let i3 = (cfg.check_one_leader()? + 1) % SERVERS;
  44. cfg.disconnect(i3);
  45. cfg.one(15, SERVERS - 1, true)?;
  46. cfg.start1(i3)?;
  47. cfg.connect(i3);
  48. cfg.one(16, SERVERS, true)?;
  49. cfg.end();
  50. Ok(())
  51. }
  52. #[test]
  53. fn persist2() -> config::Result<()> {
  54. const SERVERS: usize = 5;
  55. let cfg = make_config!(SERVERS, false);
  56. defer!(cfg.cleanup());
  57. cfg.begin("Test (2C): more persistence");
  58. let mut index = 1;
  59. for _ in 0..5 {
  60. cfg.one(10 + index, SERVERS, true)?;
  61. index += 1;
  62. let leader1 = cfg.check_one_leader()?;
  63. cfg.disconnect((leader1 + 1) % SERVERS);
  64. cfg.disconnect((leader1 + 2) % SERVERS);
  65. cfg.one(10 + index, SERVERS - 2, true)?;
  66. index += 1;
  67. cfg.disconnect((leader1 + 0) % SERVERS);
  68. cfg.disconnect((leader1 + 3) % SERVERS);
  69. cfg.disconnect((leader1 + 4) % SERVERS);
  70. cfg.start1((leader1 + 1) % SERVERS)?;
  71. cfg.start1((leader1 + 2) % SERVERS)?;
  72. cfg.connect((leader1 + 1) % SERVERS);
  73. cfg.connect((leader1 + 2) % SERVERS);
  74. config::sleep_election_timeouts(1);
  75. cfg.start1((leader1 + 3) % SERVERS)?;
  76. cfg.connect((leader1 + 3) % SERVERS);
  77. cfg.one(10 + index, SERVERS - 2, true)?;
  78. index += 1;
  79. cfg.connect((leader1 + 4) % SERVERS);
  80. cfg.connect((leader1 + 0) % SERVERS);
  81. }
  82. cfg.one(1000, SERVERS, true)?;
  83. cfg.end();
  84. Ok(())
  85. }
  86. #[test]
  87. fn persist3() -> config::Result<()> {
  88. const SERVERS: usize = 3;
  89. let cfg = make_config!(SERVERS, false);
  90. defer!(cfg.cleanup());
  91. cfg.begin(
  92. "Test (2C): partitioned leader and one follower crash, leader restarts",
  93. );
  94. cfg.one(101, 3, true)?;
  95. let leader = cfg.check_one_leader()?;
  96. cfg.disconnect((leader + 2) % SERVERS);
  97. cfg.one(102, 2, true)?;
  98. cfg.crash1((leader + 0) % SERVERS);
  99. cfg.crash1((leader + 1) % SERVERS);
  100. cfg.connect((leader + 2) % SERVERS);
  101. cfg.start1((leader + 0) % SERVERS)?;
  102. cfg.connect((leader + 0) % SERVERS);
  103. cfg.one(103, 2, true)?;
  104. cfg.start1((leader + 1) % SERVERS)?;
  105. cfg.connect((leader + 1) % SERVERS);
  106. cfg.one(104, SERVERS, true)?;
  107. cfg.end();
  108. Ok(())
  109. }
  110. #[test]
  111. fn figure8() -> config::Result<()> {
  112. const SERVERS: usize = 5;
  113. let cfg = make_config!(SERVERS, false);
  114. defer!(cfg.cleanup());
  115. cfg.begin("Test (2C): Figure 8");
  116. cfg.one(thread_rng().gen(), 1, true)?;
  117. let mut nup = SERVERS;
  118. for _ in 0..1000 {
  119. let mut leader = None;
  120. for i in 0..SERVERS {
  121. if cfg.is_server_alive(i)
  122. && cfg.leader_start(i, thread_rng().gen()).is_some()
  123. {
  124. leader = Some(i);
  125. }
  126. }
  127. let millis_upper = if thread_rng().gen_ratio(100, 1000) {
  128. config::LONG_ELECTION_TIMEOUT_MILLIS >> 1
  129. } else {
  130. // Magic number 13?
  131. 13
  132. };
  133. let millis = thread_rng().gen_range(0..millis_upper);
  134. config::sleep_millis(millis);
  135. if let Some(leader) = leader {
  136. cfg.crash1(leader);
  137. nup -= 1;
  138. }
  139. if nup < 3 {
  140. let index = thread_rng().gen_range(0..SERVERS);
  141. if !cfg.is_server_alive(index) {
  142. cfg.start1(index)?;
  143. cfg.connect(index);
  144. nup += 1
  145. }
  146. }
  147. }
  148. for index in 0..SERVERS {
  149. if !cfg.is_server_alive(index) {
  150. cfg.start1(index)?;
  151. cfg.connect(index);
  152. }
  153. }
  154. cfg.one(thread_rng().gen(), SERVERS, true)?;
  155. cfg.end();
  156. Ok(())
  157. }
  158. #[test]
  159. fn unreliable_agree() -> config::Result<()> {
  160. const SERVERS: usize = 5;
  161. let cfg = Arc::new(make_config!(SERVERS, true));
  162. defer!(cfg.cleanup());
  163. cfg.begin("Test (2C): unreliable agreement");
  164. let mut handles = vec![];
  165. for iters in 1..50 {
  166. for j in 0..4 {
  167. let cfg = cfg.clone();
  168. let logger = test_utils::thread_local_logger::get();
  169. let handle = std::thread::spawn(move || {
  170. test_utils::thread_local_logger::set(logger);
  171. cfg.one(100 * iters + j, 1, true)
  172. });
  173. handles.push(handle);
  174. }
  175. cfg.one(iters, 1, true)?;
  176. }
  177. cfg.set_unreliable(false);
  178. for handle in handles {
  179. handle.join().expect("Thread join should not fail")?;
  180. }
  181. cfg.one(100, SERVERS, true)?;
  182. cfg.end();
  183. Ok(())
  184. }
  185. #[test]
  186. fn figure8_unreliable() -> config::Result<()> {
  187. const SERVERS: usize = 5;
  188. let cfg = make_config!(SERVERS, false);
  189. defer!(cfg.cleanup());
  190. cfg.begin("Test (2C): Figure 8 (unreliable)");
  191. cfg.one(thread_rng().gen_range(0..10000), 1, true)?;
  192. let mut nup = SERVERS;
  193. for iters in 0..1000 {
  194. if iters == 200 {
  195. cfg.set_long_reordering(true);
  196. }
  197. let mut leader = None;
  198. for i in 0..SERVERS {
  199. if cfg.is_server_alive(i)
  200. && cfg.leader_start(i, thread_rng().gen()).is_some()
  201. && cfg.is_connected(i)
  202. {
  203. leader = Some(i);
  204. }
  205. }
  206. let millis_upper = if thread_rng().gen_ratio(100, 1000) {
  207. config::LONG_ELECTION_TIMEOUT_MILLIS >> 1
  208. } else {
  209. // Magic number 13?
  210. 13
  211. };
  212. let millis = thread_rng().gen_range(0..millis_upper);
  213. config::sleep_millis(millis);
  214. if let Some(leader) = leader {
  215. if thread_rng().gen_ratio(1, 2) {
  216. cfg.disconnect(leader);
  217. nup -= 1;
  218. }
  219. }
  220. if nup < 3 {
  221. let index = thread_rng().gen_range(0..SERVERS);
  222. if !cfg.is_connected(index) {
  223. cfg.connect(index);
  224. nup += 1
  225. }
  226. }
  227. }
  228. for index in 0..SERVERS {
  229. if !cfg.is_connected(index) {
  230. cfg.connect(index);
  231. }
  232. }
  233. cfg.one(thread_rng().gen_range(0..10000), SERVERS, true)?;
  234. cfg.end();
  235. Ok(())
  236. }
  237. fn internal_churn(unreliable: bool) -> config::Result<()> {
  238. const SERVERS: usize = 5;
  239. let cfg = Arc::new(make_config!(SERVERS, false));
  240. defer!(cfg.cleanup());
  241. if unreliable {
  242. cfg.begin("Test (2C): unreliable churn");
  243. } else {
  244. cfg.begin("Test (2C): churn");
  245. }
  246. let stop = Arc::new(AtomicBool::new(false));
  247. let mut handles = vec![];
  248. for client_index in 0..3 {
  249. let stop = stop.clone();
  250. let cfg = cfg.clone();
  251. let logger = test_utils::thread_local_logger::get();
  252. let handle = std::thread::spawn(move || {
  253. test_utils::thread_local_logger::set(logger);
  254. let mut cmds = vec![];
  255. while !stop.load(Ordering::SeqCst) {
  256. let cmd = thread_rng().gen();
  257. let mut index = None;
  258. for i in 0..SERVERS {
  259. if cfg.is_server_alive(i) {
  260. let start = cfg.leader_start(i, cmd);
  261. if start.is_some() {
  262. index = Some(i);
  263. }
  264. }
  265. }
  266. if let Some(index) = index {
  267. for millis in [10, 20, 50, 100, 200].iter() {
  268. let (cmd_index, cmd_committed) =
  269. // somehow the compiler cannot infer the error type.
  270. match cfg.committed_count(index) {
  271. Ok(t) => t,
  272. Err(e) => return Err(e),
  273. };
  274. #[allow(clippy::collapsible_if)]
  275. if cmd_index > 0 {
  276. if cmd_committed == cmd {
  277. cmds.push(cmd);
  278. }
  279. // The contract we started might not get
  280. }
  281. config::sleep_millis(*millis);
  282. }
  283. } else {
  284. config::sleep_millis(79 + client_index * 17);
  285. }
  286. }
  287. Ok(cmds)
  288. });
  289. handles.push(handle);
  290. }
  291. for _ in 0..20 {
  292. if thread_rng().gen_ratio(200, 1000) {
  293. cfg.disconnect(thread_rng().gen_range(0..SERVERS));
  294. }
  295. if thread_rng().gen_ratio(500, 1000) {
  296. let server = thread_rng().gen_range(0..SERVERS);
  297. if !cfg.is_server_alive(server) {
  298. cfg.start1(server)?;
  299. }
  300. cfg.connect(server);
  301. }
  302. if thread_rng().gen_ratio(200, 1000) {
  303. let server = thread_rng().gen_range(0..SERVERS);
  304. if cfg.is_server_alive(server) {
  305. cfg.crash1(server);
  306. }
  307. }
  308. config::sleep_millis(config::LONG_ELECTION_TIMEOUT_MILLIS / 10 * 7);
  309. }
  310. config::sleep_election_timeouts(1);
  311. cfg.set_unreliable(false);
  312. for i in 0..SERVERS {
  313. if !cfg.is_server_alive(i) {
  314. cfg.start1(i)?;
  315. }
  316. cfg.connect(i);
  317. }
  318. stop.store(true, Ordering::SeqCst);
  319. let mut all_cmds = vec![];
  320. for handle in handles {
  321. let mut cmds = handle.join().expect("Client should not fail")?;
  322. all_cmds.append(&mut cmds);
  323. }
  324. config::sleep_election_timeouts(1);
  325. let last_cmd_index = cfg.one(thread_rng().gen(), SERVERS, true)?;
  326. let mut consented = vec![];
  327. for cmd_index in 1..last_cmd_index + 1 {
  328. let cmd = cfg.wait(cmd_index, SERVERS, None)?;
  329. let cmd = cmd.expect("There should always be a command");
  330. consented.push(cmd);
  331. }
  332. for cmd in all_cmds {
  333. assert!(
  334. consented.contains(&cmd),
  335. "Cmd {} not found in {:?}",
  336. cmd,
  337. consented
  338. );
  339. }
  340. cfg.end();
  341. Ok(())
  342. }
  343. #[test]
  344. fn reliable_churn() -> config::Result<()> {
  345. internal_churn(false)
  346. }
  347. #[test]
  348. fn unreliable_churn() -> config::Result<()> {
  349. internal_churn(true)
  350. }