lib.rs 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896
  1. extern crate bincode;
  2. extern crate futures;
  3. extern crate labrpc;
  4. extern crate rand;
  5. #[macro_use]
  6. extern crate serde_derive;
  7. extern crate tokio;
  8. use std::convert::TryFrom;
  9. use std::sync::atomic::{AtomicBool, Ordering};
  10. use std::sync::Arc;
  11. use std::time::{Duration, Instant};
  12. use crossbeam_utils::sync::WaitGroup;
  13. use parking_lot::{Condvar, Mutex};
  14. use rand::{thread_rng, Rng};
  15. use crate::persister::PersistedRaftState;
  16. pub use crate::persister::Persister;
  17. pub use crate::rpcs::RpcClient;
  18. use crate::utils::retry_rpc;
  19. mod persister;
  20. pub mod rpcs;
  21. pub mod utils;
  22. #[derive(Debug, Eq, PartialEq)]
  23. enum State {
  24. Follower,
  25. Candidate,
  26. // TODO: add PreVote
  27. Leader,
  28. }
  29. #[derive(
  30. Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize,
  31. )]
  32. pub struct Term(pub usize);
  33. #[derive(Clone, Copy, Debug, Eq, PartialEq, Serialize, Deserialize)]
  34. struct Peer(usize);
  35. pub type Index = usize;
  36. #[derive(Clone, Debug, Serialize, Deserialize)]
  37. pub struct Command(pub i32);
  38. #[derive(Clone, Debug, Serialize, Deserialize)]
  39. struct LogEntry {
  40. term: Term,
  41. index: Index,
  42. // TODO: Allow sending of arbitrary information.
  43. command: Command,
  44. }
  45. struct RaftState {
  46. current_term: Term,
  47. voted_for: Option<Peer>,
  48. log: Vec<LogEntry>,
  49. commit_index: Index,
  50. last_applied: Index,
  51. next_index: Vec<Index>,
  52. match_index: Vec<Index>,
  53. current_step: Vec<i64>,
  54. state: State,
  55. leader_id: Peer,
  56. }
  57. struct ElectionState {
  58. // Timer will be removed upon shutdown or elected.
  59. timer: Mutex<(usize, Option<Instant>)>,
  60. // Wake up the timer thread when the timer is reset or cancelled.
  61. signal: Condvar,
  62. }
  63. #[derive(Clone)]
  64. pub struct Raft {
  65. inner_state: Arc<Mutex<RaftState>>,
  66. peers: Vec<RpcClient>,
  67. me: Peer,
  68. persister: Arc<dyn Persister>,
  69. new_log_entry: Option<std::sync::mpsc::Sender<Option<Peer>>>,
  70. apply_command_signal: Arc<Condvar>,
  71. keep_running: Arc<AtomicBool>,
  72. election: Arc<ElectionState>,
  73. thread_pool: Arc<tokio::runtime::Runtime>,
  74. stop_wait_group: WaitGroup,
  75. }
  76. #[derive(Clone, Debug, Serialize, Deserialize)]
  77. struct RequestVoteArgs {
  78. term: Term,
  79. candidate_id: Peer,
  80. last_log_index: Index,
  81. last_log_term: Term,
  82. }
  83. #[derive(Clone, Debug, Serialize, Deserialize)]
  84. struct RequestVoteReply {
  85. term: Term,
  86. vote_granted: bool,
  87. }
  88. #[derive(Clone, Debug, Serialize, Deserialize)]
  89. struct AppendEntriesArgs {
  90. term: Term,
  91. leader_id: Peer,
  92. prev_log_index: Index,
  93. prev_log_term: Term,
  94. entries: Vec<LogEntry>,
  95. leader_commit: Index,
  96. }
  97. #[derive(Clone, Debug, Serialize, Deserialize)]
  98. struct AppendEntriesReply {
  99. term: Term,
  100. success: bool,
  101. }
  102. impl Raft {
  103. pub fn new<Func>(
  104. peers: Vec<RpcClient>,
  105. me: usize,
  106. persister: Arc<dyn Persister>,
  107. apply_command: Func,
  108. ) -> Self
  109. where
  110. Func: 'static + Send + FnMut(Index, Command),
  111. {
  112. let peer_size = peers.len();
  113. let mut state = RaftState {
  114. current_term: Term(0),
  115. voted_for: None,
  116. log: vec![LogEntry {
  117. term: Term(0),
  118. index: 0,
  119. command: Command(0),
  120. }],
  121. commit_index: 0,
  122. last_applied: 0,
  123. next_index: vec![1; peer_size],
  124. match_index: vec![0; peer_size],
  125. current_step: vec![0; peer_size],
  126. state: State::Follower,
  127. leader_id: Peer(me),
  128. };
  129. if let Ok(persisted_state) =
  130. PersistedRaftState::try_from(persister.read_state())
  131. {
  132. state.current_term = persisted_state.current_term;
  133. state.voted_for = persisted_state.voted_for;
  134. state.log = persisted_state.log;
  135. }
  136. let election = ElectionState {
  137. timer: Mutex::new((0, None)),
  138. signal: Condvar::new(),
  139. };
  140. election.reset_election_timer();
  141. let thread_pool = tokio::runtime::Builder::new()
  142. .threaded_scheduler()
  143. .enable_time()
  144. .thread_name(format!("raft-instance-{}", me))
  145. .core_threads(peer_size)
  146. .max_threads(peer_size * 2)
  147. .build()
  148. .expect("Creating thread pool should not fail");
  149. let mut this = Raft {
  150. inner_state: Arc::new(Mutex::new(state)),
  151. peers,
  152. me: Peer(me),
  153. persister,
  154. new_log_entry: None,
  155. apply_command_signal: Arc::new(Default::default()),
  156. keep_running: Arc::new(Default::default()),
  157. election: Arc::new(election),
  158. thread_pool: Arc::new(thread_pool),
  159. stop_wait_group: WaitGroup::new(),
  160. };
  161. this.keep_running.store(true, Ordering::SeqCst);
  162. // Running in a standalone thread.
  163. this.run_log_entry_daemon();
  164. // Running in a standalone thread.
  165. this.run_apply_command_daemon(apply_command);
  166. // One off function that schedules many little tasks, running on the
  167. // internal thread pool.
  168. this.schedule_heartbeats(Duration::from_millis(
  169. HEARTBEAT_INTERVAL_MILLIS,
  170. ));
  171. // The last step is to start running election timer.
  172. this.run_election_timer();
  173. this
  174. }
  175. pub(crate) fn process_request_vote(
  176. &self,
  177. args: RequestVoteArgs,
  178. ) -> RequestVoteReply {
  179. let mut rf = self.inner_state.lock();
  180. let term = rf.current_term;
  181. #[allow(clippy::comparison_chain)]
  182. if args.term < term {
  183. return RequestVoteReply {
  184. term,
  185. vote_granted: false,
  186. };
  187. } else if args.term > term {
  188. rf.current_term = args.term;
  189. rf.voted_for = None;
  190. rf.state = State::Follower;
  191. self.election.reset_election_timer();
  192. self.persister.save_state(rf.persisted_state().into());
  193. }
  194. let voted_for = rf.voted_for;
  195. let (last_log_index, last_log_term) = rf.last_log_index_and_term();
  196. if (voted_for.is_none() || voted_for == Some(args.candidate_id))
  197. && (args.last_log_term > last_log_term
  198. || (args.last_log_term == last_log_term
  199. && args.last_log_index >= last_log_index))
  200. {
  201. rf.voted_for = Some(args.candidate_id);
  202. // It is possible that we have set a timer above when updating the
  203. // current term. It does not hurt to update the timer again.
  204. // We do need to persist, though.
  205. self.election.reset_election_timer();
  206. self.persister.save_state(rf.persisted_state().into());
  207. RequestVoteReply {
  208. term: args.term,
  209. vote_granted: true,
  210. }
  211. } else {
  212. RequestVoteReply {
  213. term: args.term,
  214. vote_granted: false,
  215. }
  216. }
  217. }
  218. pub(crate) fn process_append_entries(
  219. &self,
  220. args: AppendEntriesArgs,
  221. ) -> AppendEntriesReply {
  222. let mut rf = self.inner_state.lock();
  223. if rf.current_term > args.term {
  224. return AppendEntriesReply {
  225. term: rf.current_term,
  226. success: false,
  227. };
  228. }
  229. if rf.current_term < args.term {
  230. rf.current_term = args.term;
  231. rf.voted_for = None;
  232. self.persister.save_state(rf.persisted_state().into());
  233. }
  234. rf.state = State::Follower;
  235. rf.leader_id = args.leader_id;
  236. self.election.reset_election_timer();
  237. if rf.log.len() <= args.prev_log_index
  238. || rf.log[args.prev_log_index].term != args.prev_log_term
  239. {
  240. return AppendEntriesReply {
  241. term: args.term,
  242. success: false,
  243. };
  244. }
  245. for (i, entry) in args.entries.iter().enumerate() {
  246. let index = i + args.prev_log_index + 1;
  247. if rf.log.len() > index {
  248. if rf.log[index].term != entry.term {
  249. rf.log.truncate(index);
  250. rf.log.push(entry.clone());
  251. }
  252. } else {
  253. rf.log.push(entry.clone());
  254. }
  255. }
  256. self.persister.save_state(rf.persisted_state().into());
  257. if args.leader_commit > rf.commit_index {
  258. rf.commit_index = if args.leader_commit < rf.log.len() {
  259. args.leader_commit
  260. } else {
  261. rf.log.len() - 1
  262. };
  263. self.apply_command_signal.notify_one();
  264. }
  265. AppendEntriesReply {
  266. term: args.term,
  267. success: true,
  268. }
  269. }
  270. fn run_election_timer(&self) -> std::thread::JoinHandle<()> {
  271. let this = self.clone();
  272. std::thread::spawn(move || {
  273. let election = this.election.clone();
  274. let mut should_run = None;
  275. while this.keep_running.load(Ordering::SeqCst) {
  276. let mut cancel_handle = should_run
  277. .map(|last_timer_count| this.run_election(last_timer_count))
  278. .flatten();
  279. let mut guard = election.timer.lock();
  280. let (timer_count, deadline) = *guard;
  281. if let Some(last_timer_count) = should_run {
  282. // If the timer was changed more than once, we know the
  283. // last scheduled election should have been cancelled.
  284. if timer_count > last_timer_count + 1 {
  285. cancel_handle.take().map(|c| c.send(()));
  286. }
  287. }
  288. // check the running signal before sleeping. We are holding the
  289. // timer lock, so no one can change it. The kill() method will
  290. // not be able to notify this thread before `wait` is called.
  291. if !this.keep_running.load(Ordering::SeqCst) {
  292. break;
  293. }
  294. should_run = match deadline {
  295. Some(timeout) => loop {
  296. let ret =
  297. election.signal.wait_until(&mut guard, timeout);
  298. let fired = ret.timed_out() && Instant::now() > timeout;
  299. // If the timer has been updated, do not schedule,
  300. // break so that we could cancel.
  301. if timer_count != guard.0 {
  302. // Timer has been updated, cancel current
  303. // election, and block on timeout again.
  304. break None;
  305. } else if fired {
  306. // Timer has fired, remove the timer and allow
  307. // running the next election at timer_count.
  308. // If the next election is cancelled before we
  309. // are back on wait, timer_count will be set to
  310. // a different value.
  311. guard.0 += 1;
  312. guard.1.take();
  313. break Some(guard.0);
  314. }
  315. },
  316. None => {
  317. election.signal.wait(&mut guard);
  318. // The timeout has changed, check again.
  319. None
  320. }
  321. };
  322. drop(guard);
  323. // Whenever woken up, cancel the current running election.
  324. // There are 3 cases we could reach here
  325. // 1. We received an AppendEntries, or decided to vote for
  326. // a peer, and thus turned into a follower. In this case we'll
  327. // be notified by the election signal.
  328. // 2. We are a follower but didn't receive a heartbeat on time,
  329. // or we are a candidate but didn't not collect enough vote on
  330. // time. In this case we'll have a timeout.
  331. // 3. When become a leader, or are shutdown. In this case we'll
  332. // be notified by the election signal.
  333. cancel_handle.map(|c| c.send(()));
  334. }
  335. let stop_wait_group = this.stop_wait_group.clone();
  336. // Making sure the rest of `this` is dropped before the wait group.
  337. drop(this);
  338. drop(stop_wait_group);
  339. })
  340. }
  341. fn run_election(
  342. &self,
  343. timer_count: usize,
  344. ) -> Option<futures::channel::oneshot::Sender<()>> {
  345. let me = self.me;
  346. let (term, args) = {
  347. let mut rf = self.inner_state.lock();
  348. // The previous election is faster and reached the critical section
  349. // before us. We should stop and not run this election.
  350. // Or someone else increased the term and the timer is reset.
  351. if !self.election.try_reset_election_timer(timer_count) {
  352. return None;
  353. }
  354. rf.current_term.0 += 1;
  355. rf.voted_for = Some(me);
  356. rf.state = State::Candidate;
  357. self.persister.save_state(rf.persisted_state().into());
  358. let term = rf.current_term;
  359. let (last_log_index, last_log_term) = rf.last_log_index_and_term();
  360. (
  361. term,
  362. RequestVoteArgs {
  363. term,
  364. candidate_id: me,
  365. last_log_index,
  366. last_log_term,
  367. },
  368. )
  369. };
  370. let mut votes = vec![];
  371. for (index, rpc_client) in self.peers.iter().enumerate() {
  372. if index != self.me.0 {
  373. // RpcClient must be cloned to avoid sending its reference
  374. // across threads.
  375. let rpc_client = rpc_client.clone();
  376. // RPCs are started right away.
  377. let one_vote = self
  378. .thread_pool
  379. .spawn(Self::request_vote(rpc_client, args.clone()));
  380. // Futures must be pinned so that they have Unpin, as required
  381. // by futures::future::select.
  382. votes.push(one_vote);
  383. }
  384. }
  385. let (tx, rx) = futures::channel::oneshot::channel();
  386. self.thread_pool.spawn(Self::count_vote_util_cancelled(
  387. me,
  388. term,
  389. self.inner_state.clone(),
  390. self.election.clone(),
  391. votes,
  392. self.peers.len() / 2,
  393. rx,
  394. self.new_log_entry.clone().unwrap(),
  395. ));
  396. Some(tx)
  397. }
  398. const REQUEST_VOTE_RETRY: usize = 4;
  399. async fn request_vote(
  400. rpc_client: RpcClient,
  401. args: RequestVoteArgs,
  402. ) -> Option<bool> {
  403. let term = args.term;
  404. let reply = retry_rpc(Self::REQUEST_VOTE_RETRY, move |_round| {
  405. rpc_client.clone().call_request_vote(args.clone())
  406. })
  407. .await;
  408. if let Ok(reply) = reply {
  409. return Some(reply.vote_granted && reply.term == term);
  410. }
  411. None
  412. }
  413. async fn count_vote_util_cancelled(
  414. me: Peer,
  415. term: Term,
  416. rf: Arc<Mutex<RaftState>>,
  417. election: Arc<ElectionState>,
  418. votes: Vec<tokio::task::JoinHandle<Option<bool>>>,
  419. majority: usize,
  420. cancel_token: futures::channel::oneshot::Receiver<()>,
  421. new_log_entry: std::sync::mpsc::Sender<Option<Peer>>,
  422. ) {
  423. let mut vote_count = 0;
  424. let mut against_count = 0;
  425. let mut cancel_token = cancel_token;
  426. let mut futures_vec = votes;
  427. while vote_count < majority && against_count <= majority {
  428. // Mixing tokio futures with futures-rs ones. Fingers crossed.
  429. let selected = futures::future::select(
  430. cancel_token,
  431. futures::future::select_all(futures_vec),
  432. )
  433. .await;
  434. let ((one_vote, _, rest), new_token) = match selected {
  435. futures::future::Either::Left(_) => break,
  436. futures::future::Either::Right(tuple) => tuple,
  437. };
  438. futures_vec = rest;
  439. cancel_token = new_token;
  440. if let Ok(Some(vote)) = one_vote {
  441. if vote {
  442. vote_count += 1
  443. } else {
  444. against_count += 1
  445. }
  446. }
  447. }
  448. if vote_count < majority {
  449. return;
  450. }
  451. let mut rf = rf.lock();
  452. if rf.current_term == term && rf.state == State::Candidate {
  453. // We are the leader now. The election timer can be stopped.
  454. election.stop_election_timer();
  455. rf.state = State::Leader;
  456. rf.leader_id = me;
  457. let log_len = rf.log.len();
  458. for item in rf.next_index.iter_mut() {
  459. *item = log_len;
  460. }
  461. for item in rf.match_index.iter_mut() {
  462. *item = 0;
  463. }
  464. for item in rf.current_step.iter_mut() {
  465. *item = 0;
  466. }
  467. // Sync all logs now.
  468. new_log_entry
  469. .send(None)
  470. .expect("Triggering log entry syncing should not fail");
  471. }
  472. }
  473. fn schedule_heartbeats(&self, interval: Duration) {
  474. for (peer_index, rpc_client) in self.peers.iter().enumerate() {
  475. if peer_index != self.me.0 {
  476. // rf is now owned by the outer async function.
  477. let rf = self.inner_state.clone();
  478. // RPC client must be cloned into the outer async function.
  479. let rpc_client = rpc_client.clone();
  480. // Shutdown signal.
  481. let keep_running = self.keep_running.clone();
  482. self.thread_pool.spawn(async move {
  483. let mut interval = tokio::time::interval(interval);
  484. while keep_running.load(Ordering::SeqCst) {
  485. interval.tick().await;
  486. if let Some(args) = Self::build_heartbeat(&rf) {
  487. tokio::spawn(Self::send_heartbeat(
  488. rpc_client.clone(),
  489. args,
  490. ));
  491. }
  492. }
  493. });
  494. }
  495. }
  496. }
  497. fn build_heartbeat(
  498. rf: &Arc<Mutex<RaftState>>,
  499. ) -> Option<AppendEntriesArgs> {
  500. let rf = rf.lock();
  501. if !rf.is_leader() {
  502. return None;
  503. }
  504. let (last_log_index, last_log_term) = rf.last_log_index_and_term();
  505. let args = AppendEntriesArgs {
  506. term: rf.current_term,
  507. leader_id: rf.leader_id,
  508. prev_log_index: last_log_index,
  509. prev_log_term: last_log_term,
  510. entries: vec![],
  511. leader_commit: rf.commit_index,
  512. };
  513. Some(args)
  514. }
  515. const HEARTBEAT_RETRY: usize = 3;
  516. async fn send_heartbeat(
  517. rpc_client: RpcClient,
  518. args: AppendEntriesArgs,
  519. ) -> std::io::Result<()> {
  520. retry_rpc(Self::HEARTBEAT_RETRY, move |_round| {
  521. rpc_client.clone().call_append_entries(args.clone())
  522. })
  523. .await?;
  524. Ok(())
  525. }
  526. fn run_log_entry_daemon(&mut self) -> std::thread::JoinHandle<()> {
  527. let (tx, rx) = std::sync::mpsc::channel::<Option<Peer>>();
  528. self.new_log_entry.replace(tx);
  529. // Clone everything that the thread needs.
  530. let this = self.clone();
  531. std::thread::spawn(move || {
  532. while let Ok(peer) = rx.recv() {
  533. if !this.keep_running.load(Ordering::SeqCst) {
  534. break;
  535. }
  536. if !this.inner_state.lock().is_leader() {
  537. continue;
  538. }
  539. for (i, rpc_client) in this.peers.iter().enumerate() {
  540. if i != this.me.0 && peer.map(|p| p.0 == i).unwrap_or(true)
  541. {
  542. this.thread_pool.spawn(Self::sync_log_entry(
  543. this.inner_state.clone(),
  544. rpc_client.clone(),
  545. i,
  546. this.new_log_entry.clone().unwrap(),
  547. this.apply_command_signal.clone(),
  548. ));
  549. }
  550. }
  551. }
  552. let stop_wait_group = this.stop_wait_group.clone();
  553. // Making sure the rest of `this` is dropped before the wait group.
  554. drop(this);
  555. drop(stop_wait_group);
  556. })
  557. }
  558. async fn sync_log_entry(
  559. rf: Arc<Mutex<RaftState>>,
  560. rpc_client: RpcClient,
  561. peer_index: usize,
  562. rerun: std::sync::mpsc::Sender<Option<Peer>>,
  563. apply_command_signal: Arc<Condvar>,
  564. ) {
  565. // TODO: cancel in flight changes?
  566. let args = match Self::build_append_entries(&rf, peer_index) {
  567. Some(args) => args,
  568. None => return,
  569. };
  570. let term = args.term;
  571. let match_index = args.prev_log_index + args.entries.len();
  572. let result = tokio::time::timeout(
  573. Duration::from_millis(HEARTBEAT_INTERVAL_MILLIS),
  574. Self::append_entries(rpc_client, args),
  575. )
  576. .await;
  577. let succeeded = match result {
  578. Ok(succeeded) => succeeded,
  579. Err(_) => {
  580. let _ = rerun.send(Some(Peer(peer_index)));
  581. return;
  582. }
  583. };
  584. match succeeded {
  585. Ok(Some(succeeded)) => {
  586. if succeeded {
  587. let mut rf = rf.lock();
  588. rf.next_index[peer_index] = match_index + 1;
  589. rf.current_step[peer_index] = 0;
  590. if match_index > rf.match_index[peer_index] {
  591. rf.match_index[peer_index] = match_index;
  592. if rf.is_leader() && rf.current_term == term {
  593. let mut matched = rf.match_index.to_vec();
  594. let mid = matched.len() / 2 + 1;
  595. matched.sort();
  596. let new_commit_index = matched[mid];
  597. if new_commit_index > rf.commit_index
  598. && rf.log[new_commit_index].term
  599. == rf.current_term
  600. {
  601. rf.commit_index = new_commit_index;
  602. apply_command_signal.notify_one();
  603. }
  604. }
  605. }
  606. } else {
  607. let mut rf = rf.lock();
  608. let step = &mut rf.current_step[peer_index];
  609. if *step < 5 {
  610. *step += 1;
  611. }
  612. let diff = (1 << 8) << *step;
  613. let next_index = &mut rf.next_index[peer_index];
  614. if diff >= *next_index {
  615. *next_index = 1usize;
  616. } else {
  617. *next_index -= diff;
  618. }
  619. rerun
  620. .send(Some(Peer(peer_index)))
  621. .expect("Triggering log entry syncing should not fail");
  622. }
  623. }
  624. // Do nothing, not our term anymore.
  625. Ok(None) => {}
  626. Err(_) => {
  627. tokio::time::delay_for(Duration::from_millis(
  628. HEARTBEAT_INTERVAL_MILLIS,
  629. ))
  630. .await;
  631. rerun
  632. .send(Some(Peer(peer_index)))
  633. .expect("Triggering log entry syncing should not fail");
  634. }
  635. };
  636. }
  637. fn build_append_entries(
  638. rf: &Arc<Mutex<RaftState>>,
  639. peer_index: usize,
  640. ) -> Option<AppendEntriesArgs> {
  641. let rf = rf.lock();
  642. if !rf.is_leader() {
  643. return None;
  644. }
  645. let prev_log_index = rf.next_index[peer_index] - 1;
  646. let prev_log_term = rf.log[prev_log_index].term;
  647. Some(AppendEntriesArgs {
  648. term: rf.current_term,
  649. leader_id: rf.leader_id,
  650. prev_log_index,
  651. prev_log_term,
  652. entries: rf.log[rf.next_index[peer_index]..].to_vec(),
  653. leader_commit: rf.commit_index,
  654. })
  655. }
  656. const APPEND_ENTRIES_RETRY: usize = 3;
  657. async fn append_entries(
  658. rpc_client: RpcClient,
  659. args: AppendEntriesArgs,
  660. ) -> std::io::Result<Option<bool>> {
  661. let term = args.term;
  662. let reply = retry_rpc(Self::APPEND_ENTRIES_RETRY, move |_round| {
  663. rpc_client.clone().call_append_entries(args.clone())
  664. })
  665. .await?;
  666. Ok(if reply.term == term {
  667. Some(reply.success)
  668. } else {
  669. None
  670. })
  671. }
  672. fn run_apply_command_daemon<Func>(
  673. &self,
  674. mut apply_command: Func,
  675. ) -> std::thread::JoinHandle<()>
  676. where
  677. Func: 'static + Send + FnMut(Index, Command),
  678. {
  679. let keep_running = self.keep_running.clone();
  680. let rf = self.inner_state.clone();
  681. let condvar = self.apply_command_signal.clone();
  682. let stop_wait_group = self.stop_wait_group.clone();
  683. std::thread::spawn(move || {
  684. while keep_running.load(Ordering::SeqCst) {
  685. let (mut index, commands) = {
  686. let mut rf = rf.lock();
  687. if rf.last_applied >= rf.commit_index {
  688. condvar.wait_for(
  689. &mut rf,
  690. Duration::from_millis(HEARTBEAT_INTERVAL_MILLIS),
  691. );
  692. }
  693. if rf.last_applied < rf.commit_index {
  694. let index = rf.last_applied + 1;
  695. let last_one = rf.commit_index + 1;
  696. let commands: Vec<Command> = rf.log[index..last_one]
  697. .iter()
  698. .map(|entry| entry.command.clone())
  699. .collect();
  700. rf.last_applied = rf.commit_index;
  701. (index, commands)
  702. } else {
  703. continue;
  704. }
  705. };
  706. // Release the lock while calling external functions.
  707. for command in commands {
  708. apply_command(index, command);
  709. index += 1;
  710. }
  711. }
  712. drop(stop_wait_group);
  713. })
  714. }
  715. pub fn start(&self, command: Command) -> Option<(Term, Index)> {
  716. let mut rf = self.inner_state.lock();
  717. let term = rf.current_term;
  718. if !rf.is_leader() {
  719. return None;
  720. }
  721. let index = rf.log.len();
  722. rf.log.push(LogEntry {
  723. term,
  724. index,
  725. command,
  726. });
  727. self.persister.save_state(rf.persisted_state().into());
  728. self.new_log_entry
  729. .clone()
  730. .unwrap()
  731. .send(None)
  732. .expect("Sending to new log entry queue should never fail.");
  733. Some((term, index))
  734. }
  735. pub fn kill(mut self) {
  736. self.keep_running.store(false, Ordering::SeqCst);
  737. self.election.stop_election_timer();
  738. self.new_log_entry.take().map(|n| n.send(None));
  739. self.apply_command_signal.notify_all();
  740. self.stop_wait_group.wait();
  741. std::sync::Arc::try_unwrap(self.thread_pool)
  742. .expect(
  743. "All references to the thread pool should have been dropped.",
  744. )
  745. .shutdown_timeout(Duration::from_millis(
  746. HEARTBEAT_INTERVAL_MILLIS * 2,
  747. ));
  748. }
  749. pub fn get_state(&self) -> (Term, bool) {
  750. let state = self.inner_state.lock();
  751. (state.current_term, state.is_leader())
  752. }
  753. }
  754. impl RaftState {
  755. fn persisted_state(&self) -> PersistedRaftState {
  756. self.into()
  757. }
  758. fn last_log_index_and_term(&self) -> (Index, Term) {
  759. let len = self.log.len();
  760. assert!(len > 0, "There should always be at least one entry in log");
  761. (len - 1, self.log.last().unwrap().term)
  762. }
  763. fn is_leader(&self) -> bool {
  764. self.state == State::Leader
  765. }
  766. }
  767. const HEARTBEAT_INTERVAL_MILLIS: u64 = 150;
  768. const ELECTION_TIMEOUT_BASE_MILLIS: u64 = 150;
  769. const ELECTION_TIMEOUT_VAR_MILLIS: u64 = 250;
  770. impl ElectionState {
  771. fn reset_election_timer(&self) {
  772. let mut guard = self.timer.lock();
  773. guard.0 += 1;
  774. guard.1.replace(Self::election_timeout());
  775. self.signal.notify_one();
  776. }
  777. fn try_reset_election_timer(&self, timer_count: usize) -> bool {
  778. let mut guard = self.timer.lock();
  779. if guard.0 != timer_count {
  780. return false;
  781. }
  782. guard.0 += 1;
  783. guard.1.replace(Self::election_timeout());
  784. self.signal.notify_one();
  785. true
  786. }
  787. fn election_timeout() -> Instant {
  788. Instant::now()
  789. + Duration::from_millis(
  790. ELECTION_TIMEOUT_BASE_MILLIS
  791. + thread_rng().gen_range(0, ELECTION_TIMEOUT_VAR_MILLIS),
  792. )
  793. }
  794. fn stop_election_timer(&self) {
  795. let mut guard = self.timer.lock();
  796. guard.0 += 1;
  797. guard.1.take();
  798. self.signal.notify_one();
  799. }
  800. }