Преглед изворни кода

Merge branch 'remote_context_impl'

Put the RPC client and other informatin in thread local storage.

We can save a ton of cloning and CPU cache contention.
Jing Yang пре 3 година
родитељ
комит
e4ec9a63ee
9 измењених фајлова са 282 додато и 102 уклоњено
  1. 11 21
      src/election.rs
  2. 10 31
      src/heartbeats.rs
  3. 2 0
      src/lib.rs
  4. 39 11
      src/raft.rs
  5. 163 0
      src/remote_context.rs
  6. 27 0
      src/remote_peer.rs
  7. 16 23
      src/sync_log_entries.rs
  8. 14 12
      src/term_marker.rs
  9. 0 4
      src/verify_authority.rs

+ 11 - 21
src/election.rs

@@ -5,13 +5,13 @@ use std::time::{Duration, Instant};
 use parking_lot::{Condvar, Mutex};
 use rand::{thread_rng, Rng};
 
+use crate::remote_context::RemoteContext;
 use crate::sync_log_entries::SyncLogEntriesComms;
-use crate::term_marker::TermMarker;
 use crate::utils::{retry_rpc, RPC_DEADLINE};
 use crate::verify_authority::VerifyAuthorityDaemon;
 use crate::{
-    Peer, Persister, Raft, RaftState, RemoteRaft, ReplicableCommand,
-    RequestVoteArgs, State, Term,
+    Peer, Persister, Raft, RaftState, ReplicableCommand, RequestVoteArgs,
+    State, Term,
 };
 
 struct VersionedDeadline {
@@ -253,17 +253,11 @@ impl<Command: ReplicableCommand> Raft<Command> {
         };
 
         let mut votes = vec![];
-        let term_marker = self.term_marker();
-        for (index, rpc_client) in self.peers.iter().enumerate() {
-            if index != self.me.0 {
-                // RpcClient must be cloned so that it lives long enough for
-                // spawn(), which requires static life time.
-                // RPCs are started right away.
-                let one_vote = self.thread_pool.spawn(Self::request_vote(
-                    rpc_client.clone(),
-                    args.clone(),
-                    term_marker.clone(),
-                ));
+        for peer in self.peers.clone().into_iter() {
+            if peer != self.me {
+                let one_vote = self
+                    .thread_pool
+                    .spawn(Self::request_vote(peer, args.clone()));
                 votes.push(one_vote);
             }
         }
@@ -284,21 +278,17 @@ impl<Command: ReplicableCommand> Raft<Command> {
     }
 
     const REQUEST_VOTE_RETRY: usize = 1;
-    async fn request_vote(
-        rpc_client: impl RemoteRaft<Command>,
-        args: RequestVoteArgs,
-        term_marker: TermMarker<Command>,
-    ) -> Option<bool> {
+    async fn request_vote(peer: Peer, args: RequestVoteArgs) -> Option<bool> {
         let term = args.term;
         // See the comment in send_heartbeat() for this override.
-        let rpc_client = &rpc_client;
+        let rpc_client = RemoteContext::<Command>::rpc_client(peer);
         let reply =
             retry_rpc(Self::REQUEST_VOTE_RETRY, RPC_DEADLINE, move |_round| {
                 rpc_client.request_vote(args.clone())
             })
             .await;
         if let Ok(reply) = reply {
-            term_marker.mark(reply.term);
+            RemoteContext::<Command>::term_marker().mark(reply.term);
             return Some(reply.vote_granted && reply.term == term);
         }
         None

+ 10 - 31
src/heartbeats.rs

@@ -4,12 +4,9 @@ use std::time::{Duration, Instant};
 
 use parking_lot::Mutex;
 
-use crate::term_marker::TermMarker;
+use crate::remote_context::RemoteContext;
 use crate::utils::{retry_rpc, RPC_DEADLINE};
-use crate::verify_authority::DaemonBeatTicker;
-use crate::{
-    AppendEntriesArgs, Raft, RaftState, RemoteRaft, ReplicableCommand,
-};
+use crate::{AppendEntriesArgs, Peer, Raft, RaftState, ReplicableCommand};
 
 pub(crate) const HEARTBEAT_INTERVAL: Duration = Duration::from_millis(150);
 
@@ -70,19 +67,12 @@ impl<Command: ReplicableCommand> Raft<Command> {
     /// The request message is a stripped down version of `AppendEntries`. The
     /// response from the peer is ignored.
     pub(crate) fn schedule_heartbeats(&self, interval: Duration) {
-        for (peer_index, rpc_client) in self.peers.iter().enumerate() {
-            if peer_index != self.me.0 {
+        for peer in self.peers.clone().into_iter() {
+            if peer != self.me {
                 // rf is now owned by the outer async function.
                 let rf = self.inner_state.clone();
-                // A function that updates term with responses to heartbeats.
-                let term_marker = self.term_marker();
-                // A function that casts an "authoritative" vote with Ok()
-                // responses to heartbeats.
-                let beat_ticker = self.beat_ticker(peer_index);
                 // A on-demand trigger to sending a heartbeat.
                 let mut trigger = self.heartbeats_daemon.sender.subscribe();
-                // RPC client must be cloned into the outer async function.
-                let rpc_client = rpc_client.clone();
                 // Shutdown signal.
                 let keep_running = self.keep_running.clone();
                 self.thread_pool.spawn(async move {
@@ -94,12 +84,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
                         let _ =
                             futures_util::future::select(tick, trigger).await;
                         if let Some(args) = Self::build_heartbeat(&rf) {
-                            tokio::spawn(Self::send_heartbeat(
-                                rpc_client.clone(),
-                                args,
-                                term_marker.clone(),
-                                beat_ticker.clone(),
-                            ));
+                            tokio::spawn(Self::send_heartbeat(peer, args));
                         }
                     }
                 });
@@ -130,18 +115,12 @@ impl<Command: ReplicableCommand> Raft<Command> {
 
     const HEARTBEAT_RETRY: usize = 1;
     async fn send_heartbeat(
-        // Here rpc_client must be owned by the returned future. The returned
-        // future is scheduled to run on a thread pool. We do not control when
-        // the future will be run, or when it will be done with the RPC client.
-        // If a reference is passed in, the reference essentially has to be a
-        // static one, i.e. lives forever. Thus we chose to let the future own
-        // the RPC client.
-        rpc_client: impl RemoteRaft<Command>,
+        peer: Peer,
         args: AppendEntriesArgs<Command>,
-        term_watermark: TermMarker<Command>,
-        beat_ticker: DaemonBeatTicker,
     ) -> std::io::Result<()> {
         let term = args.term;
+        let beat_ticker = RemoteContext::<Command>::beat_ticker(peer);
+
         let beat = beat_ticker.next_beat();
         // Passing a reference that is moved to the following closure.
         //
@@ -157,7 +136,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
         // Another option is to use non-move closures, in which case rpc_client
         // of type Arc can be passed-in directly. However that requires args to
         // be sync because they can be shared by more than one futures.
-        let rpc_client = &rpc_client;
+        let rpc_client = RemoteContext::<Command>::rpc_client(peer);
         let response =
             retry_rpc(Self::HEARTBEAT_RETRY, RPC_DEADLINE, move |_round| {
                 rpc_client.append_entries(args.clone())
@@ -166,7 +145,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
         if term == response.term {
             beat_ticker.tick(beat);
         } else {
-            term_watermark.mark(response.term);
+            RemoteContext::<Command>::term_marker().mark(response.term);
         }
         Ok(())
     }

+ 2 - 0
src/lib.rs

@@ -29,6 +29,8 @@ mod process_install_snapshot;
 mod process_request_vote;
 mod raft;
 mod raft_state;
+mod remote_context;
+mod remote_peer;
 mod remote_raft;
 mod replicable_command;
 mod snapshot;

+ 39 - 11
src/raft.rs

@@ -12,8 +12,11 @@ use crate::daemon_watch::{Daemon, DaemonWatch};
 use crate::election::ElectionState;
 use crate::heartbeats::{HeartbeatsDaemon, HEARTBEAT_INTERVAL};
 use crate::persister::PersistedRaftState;
+use crate::remote_context::RemoteContext;
+use crate::remote_peer::RemotePeer;
 use crate::snapshot::{RequestSnapshotFnMut, SnapshotDaemon};
 use crate::sync_log_entries::SyncLogEntriesComms;
+use crate::term_marker::TermMarker;
 use crate::verify_authority::VerifyAuthorityDaemon;
 use crate::{IndexTerm, Persister, RaftState, RemoteRaft, ReplicableCommand};
 
@@ -28,7 +31,7 @@ pub struct Peer(pub usize);
 #[derive(Clone)]
 pub struct Raft<Command> {
     pub(crate) inner_state: Arc<Mutex<RaftState<Command>>>,
-    pub(crate) peers: Vec<Arc<dyn RemoteRaft<Command>>>,
+    pub(crate) peers: Vec<Peer>,
 
     pub(crate) me: Peer,
 
@@ -88,9 +91,31 @@ impl<Command: ReplicableCommand> Raft<Command> {
                 .expect("Persisted log should not contain error");
         }
 
-        let election = ElectionState::create();
+        let inner_state = Arc::new(Mutex::new(state));
+        let election = Arc::new(ElectionState::create());
         election.reset_election_timer();
 
+        let term_marker = TermMarker::create(
+            inner_state.clone(),
+            election.clone(),
+            persister.clone(),
+        );
+
+        let verify_authority_daemon = VerifyAuthorityDaemon::create(peer_size);
+        let remote_peers = peers
+            .into_iter()
+            .enumerate()
+            .map(|(index, remote_raft)| {
+                RemotePeer::create(
+                    Peer(index),
+                    remote_raft,
+                    verify_authority_daemon.beat_ticker(index),
+                )
+            })
+            .collect();
+
+        let context = RemoteContext::create(term_marker, remote_peers);
+
         let daemon_env = DaemonEnv::create();
         let thread_env = daemon_env.for_thread();
         let thread_pool = tokio::runtime::Builder::new_multi_thread()
@@ -98,28 +123,31 @@ impl<Command: ReplicableCommand> Raft<Command> {
             .enable_io()
             .thread_name(format!("raft-instance-{}", me))
             .worker_threads(peer_size)
-            .on_thread_start(move || thread_env.clone().attach())
-            .on_thread_stop(ThreadEnv::detach)
+            .on_thread_start(move || {
+                context.clone().attach();
+                thread_env.clone().attach();
+            })
+            .on_thread_stop(move || {
+                RemoteContext::<Command>::detach();
+                ThreadEnv::detach();
+            })
             .build()
             .expect("Creating thread pool should not fail");
-        let peers = peers
-            .into_iter()
-            .map(|r| Arc::new(r) as Arc<dyn RemoteRaft<Command>>)
-            .collect();
+        let peers = (0..peer_size).map(Peer).collect();
         let (sync_log_entries_comms, sync_log_entries_daemon) =
             crate::sync_log_entries::create(peer_size);
 
         let mut this = Raft {
-            inner_state: Arc::new(Mutex::new(state)),
+            inner_state,
             peers,
             me: Peer(me),
             persister,
             sync_log_entries_comms,
             apply_command_signal: Arc::new(Condvar::new()),
             keep_running: Arc::new(AtomicBool::new(true)),
-            election: Arc::new(election),
+            election,
             snapshot_daemon: SnapshotDaemon::create(),
-            verify_authority_daemon: VerifyAuthorityDaemon::create(peer_size),
+            verify_authority_daemon,
             heartbeats_daemon: HeartbeatsDaemon::create(),
             thread_pool: thread_pool.handle().clone(),
             stop_wait_group: WaitGroup::new(),

+ 163 - 0
src/remote_context.rs

@@ -0,0 +1,163 @@
+use std::any::Any;
+use std::cell::RefCell;
+
+use crate::remote_peer::RemotePeer;
+use crate::term_marker::TermMarker;
+use crate::verify_authority::DaemonBeatTicker;
+use crate::{Peer, RemoteRaft};
+
+#[derive(Clone)]
+pub(crate) struct RemoteContext<Command> {
+    term_marker: TermMarker<Command>,
+    remote_peers: Vec<RemotePeer<Command, Peer>>,
+}
+
+impl<Command: 'static> RemoteContext<Command> {
+    pub fn create(
+        term_marker: TermMarker<Command>,
+        remote_peers: Vec<RemotePeer<Command, Peer>>,
+    ) -> Self {
+        Self {
+            term_marker,
+            remote_peers,
+        }
+    }
+
+    pub fn term_marker() -> &'static TermMarker<Command> {
+        &Self::fetch_context().term_marker
+    }
+
+    pub fn remote_peer(peer: Peer) -> &'static RemotePeer<Command, Peer> {
+        &Self::fetch_context().remote_peers[peer.0]
+    }
+
+    pub fn rpc_client(peer: Peer) -> &'static dyn RemoteRaft<Command> {
+        Self::remote_peer(peer).rpc_client.as_ref()
+    }
+
+    pub fn beat_ticker(peer: Peer) -> &'static DaemonBeatTicker {
+        &Self::remote_peer(peer).beat_ticker
+    }
+
+    thread_local! {
+        // Using Any to mask the fact that we are storing a generic struct.
+        static REMOTE_CONTEXT: RefCell<Option<&'static dyn Any>> = RefCell::new(None);
+    }
+
+    pub fn attach(self) {
+        Self::set_context(Box::new(self))
+    }
+
+    pub fn detach() -> Box<Self> {
+        let static_context = Self::fetch_context();
+        unsafe { Box::from_raw((static_context as *const Self) as *mut Self) }
+    }
+
+    fn set_context(context: Box<Self>) {
+        let context_ref = Box::leak(context);
+        let any_ref: &'static mut dyn Any = context_ref;
+        Self::REMOTE_CONTEXT
+            .with(|context| *context.borrow_mut() = Some(any_ref));
+    }
+
+    fn fetch_context() -> &'static Self {
+        let any_ref = Self::REMOTE_CONTEXT.with(|context| *context.borrow());
+        if let Some(any_ref) = any_ref {
+            any_ref
+                .downcast_ref::<Self>()
+                .expect("Context is set to the wrong type.")
+        } else {
+            panic!("Context is not set");
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use std::sync::Arc;
+
+    use async_trait::async_trait;
+    use bytes::Bytes;
+    use parking_lot::Mutex;
+
+    use crate::election::ElectionState;
+    use crate::remote_peer::RemotePeer;
+    use crate::term_marker::TermMarker;
+    use crate::verify_authority::VerifyAuthorityDaemon;
+    use crate::{
+        AppendEntriesArgs, AppendEntriesReply, InstallSnapshotArgs,
+        InstallSnapshotReply, Peer, Persister, RaftState, RemoteRaft,
+        RequestVoteArgs, RequestVoteReply,
+    };
+
+    use super::RemoteContext;
+
+    struct DoNothingPersister;
+    impl Persister for DoNothingPersister {
+        fn read_state(&self) -> Bytes {
+            Bytes::new()
+        }
+
+        fn save_state(&self, _bytes: Bytes) {}
+
+        fn state_size(&self) -> usize {
+            0
+        }
+
+        fn save_snapshot_and_state(&self, _: Bytes, _: &[u8]) {}
+    }
+
+    struct DoNothingRemoteRaft;
+    #[async_trait]
+    impl<Command: 'static + Send> RemoteRaft<Command> for DoNothingRemoteRaft {
+        async fn request_vote(
+            &self,
+            _args: RequestVoteArgs,
+        ) -> std::io::Result<RequestVoteReply> {
+            unimplemented!()
+        }
+
+        async fn append_entries(
+            &self,
+            _args: AppendEntriesArgs<Command>,
+        ) -> std::io::Result<AppendEntriesReply> {
+            unimplemented!()
+        }
+
+        async fn install_snapshot(
+            &self,
+            _args: InstallSnapshotArgs,
+        ) -> std::io::Result<InstallSnapshotReply> {
+            unimplemented!()
+        }
+    }
+
+    #[test]
+    fn test_context_api() {
+        let rf = Arc::new(Mutex::new(RaftState::<i32>::create(1, Peer(0))));
+        let election = Arc::new(ElectionState::create());
+        let verify_authority_daemon = VerifyAuthorityDaemon::create(1);
+        let term_marker =
+            TermMarker::create(rf, election, Arc::new(DoNothingPersister));
+        let remote_peer = RemotePeer::create(
+            Peer(0),
+            DoNothingRemoteRaft,
+            verify_authority_daemon.beat_ticker(0),
+        );
+
+        let context =
+            Box::new(RemoteContext::create(term_marker, vec![remote_peer]));
+        let context_ptr: *const RemoteContext<i32> = &*context;
+
+        RemoteContext::set_context(context);
+
+        let fetched_context = RemoteContext::fetch_context();
+        let fetched_context_ptr: *const RemoteContext<i32> = fetched_context;
+        assert_eq!(context_ptr, fetched_context_ptr);
+
+        let detached_context = RemoteContext::detach();
+        let detached_context_ptr: *const RemoteContext<i32> =
+            &*detached_context;
+        assert_eq!(context_ptr, detached_context_ptr);
+    }
+}

+ 27 - 0
src/remote_peer.rs

@@ -0,0 +1,27 @@
+use std::sync::Arc;
+
+use crate::verify_authority::DaemonBeatTicker;
+use crate::RemoteRaft;
+
+#[derive(Clone)]
+pub(crate) struct RemotePeer<Command, UniqueId> {
+    #[allow(dead_code)]
+    pub unique_id: UniqueId,
+    pub rpc_client: Arc<dyn RemoteRaft<Command>>,
+    pub beat_ticker: DaemonBeatTicker,
+}
+
+impl<Command, UniqueId> RemotePeer<Command, UniqueId> {
+    pub fn create<RpcClient: 'static + RemoteRaft<Command>>(
+        unique_id: UniqueId,
+        rpc_client: RpcClient,
+        beat_ticker: DaemonBeatTicker,
+    ) -> Self {
+        let rpc_client = Arc::new(rpc_client);
+        RemotePeer {
+            unique_id,
+            rpc_client,
+            beat_ticker,
+        }
+    }
+}

+ 16 - 23
src/sync_log_entries.rs

@@ -6,12 +6,11 @@ use parking_lot::{Condvar, Mutex};
 use crate::daemon_env::ErrorKind;
 use crate::heartbeats::HEARTBEAT_INTERVAL;
 use crate::peer_progress::PeerProgress;
-use crate::term_marker::TermMarker;
+use crate::remote_context::RemoteContext;
 use crate::utils::{retry_rpc, SharedSender, RPC_DEADLINE};
-use crate::verify_authority::DaemonBeatTicker;
 use crate::{
     check_or_record, AppendEntriesArgs, Index, IndexTerm, InstallSnapshotArgs,
-    Peer, Raft, RaftState, RemoteRaft, ReplicableCommand, Term,
+    Peer, Raft, RaftState, ReplicableCommand, Term,
 };
 
 #[derive(Eq, PartialEq)]
@@ -132,9 +131,9 @@ impl<Command: ReplicableCommand> Raft<Command> {
                 if !this.inner_state.lock().is_leader() {
                     continue;
                 }
-                for (i, rpc_client) in this.peers.iter().enumerate() {
-                    if i != this.me.0 && event.should_schedule(Peer(i)) {
-                        let progress = &peer_progress[i];
+                for peer in this.peers.clone().into_iter() {
+                    if peer != this.me && event.should_schedule(peer) {
+                        let progress = &peer_progress[peer.0];
                         if let Event::NewTerm(_term, index) = event {
                             progress.reset_progress(index);
                         }
@@ -144,12 +143,9 @@ impl<Command: ReplicableCommand> Raft<Command> {
                             task_number += 1;
                             this.thread_pool.spawn(Self::sync_log_entries(
                                 this.inner_state.clone(),
-                                rpc_client.clone(),
                                 this.sync_log_entries_comms.clone(),
                                 progress.clone(),
                                 this.apply_command_signal.clone(),
-                                this.term_marker(),
-                                this.beat_ticker(i),
                                 TaskNumber(task_number),
                             ));
                         }
@@ -201,15 +197,11 @@ impl<Command: ReplicableCommand> Raft<Command> {
     /// failure of the last case, we will never hit the other failure again,
     /// since in the last case we always sync log entry at a committed index,
     /// and a committed log entry can never diverge.
-    #[allow(clippy::too_many_arguments)]
     async fn sync_log_entries(
         rf: Arc<Mutex<RaftState<Command>>>,
-        rpc_client: impl RemoteRaft<Command>,
         comms: SyncLogEntriesComms,
         progress: PeerProgress,
         apply_command_signal: Arc<Condvar>,
-        term_marker: TermMarker<Command>,
-        beat_ticker: DaemonBeatTicker,
         task_number: TaskNumber,
     ) {
         if !progress.take_task() {
@@ -224,8 +216,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
                 let term = args.term;
                 let prev_log_index = args.prev_log_index;
                 let match_index = args.prev_log_index + args.entries.len();
-                let succeeded =
-                    Self::append_entries(&rpc_client, args, beat_ticker).await;
+                let succeeded = Self::append_entries(peer, args).await;
 
                 (term, prev_log_index, match_index, succeeded)
             }
@@ -233,9 +224,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
                 let term = args.term;
                 let prev_log_index = args.last_included_index;
                 let match_index = args.last_included_index;
-                let succeeded =
-                    Self::install_snapshot(&rpc_client, args, beat_ticker)
-                        .await;
+                let succeeded = Self::install_snapshot(peer, args).await;
 
                 (term, prev_log_index, match_index, succeeded)
             }
@@ -367,7 +356,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
             }
             // Do nothing, not our term anymore.
             Ok(SyncLogEntriesResult::TermElapsed(term)) => {
-                term_marker.mark(term);
+                RemoteContext::<Command>::term_marker().mark(term);
             }
             Err(_) => {
                 tokio::time::sleep(HEARTBEAT_INTERVAL).await;
@@ -479,10 +468,12 @@ impl<Command: ReplicableCommand> Raft<Command> {
 
     const APPEND_ENTRIES_RETRY: usize = 1;
     async fn append_entries(
-        rpc_client: &dyn RemoteRaft<Command>,
+        peer: Peer,
         args: AppendEntriesArgs<Command>,
-        beat_ticker: DaemonBeatTicker,
     ) -> std::io::Result<SyncLogEntriesResult> {
+        let beat_ticker = RemoteContext::<Command>::beat_ticker(peer);
+        let rpc_client = RemoteContext::<Command>::rpc_client(peer);
+
         let term = args.term;
         let beat = beat_ticker.next_beat();
         let reply = retry_rpc(
@@ -522,10 +513,12 @@ impl<Command: ReplicableCommand> Raft<Command> {
 
     const INSTALL_SNAPSHOT_RETRY: usize = 1;
     async fn install_snapshot(
-        rpc_client: &dyn RemoteRaft<Command>,
+        peer: Peer,
         args: InstallSnapshotArgs,
-        beat_ticker: DaemonBeatTicker,
     ) -> std::io::Result<SyncLogEntriesResult> {
+        let beat_ticker = RemoteContext::<Command>::beat_ticker(peer);
+        let rpc_client = RemoteContext::<Command>::rpc_client(peer);
+
         let term = args.term;
         let beat = beat_ticker.next_beat();
         let reply = retry_rpc(

+ 14 - 12
src/term_marker.rs

@@ -4,7 +4,7 @@ use parking_lot::Mutex;
 use serde::Serialize;
 
 use crate::election::ElectionState;
-use crate::{Persister, Raft, RaftState, State, Term};
+use crate::{Persister, RaftState, State, Term};
 
 /// A closure that updates the `Term` of the `RaftState`.
 #[derive(Clone)]
@@ -15,23 +15,25 @@ pub(crate) struct TermMarker<Command> {
 }
 
 impl<Command: Clone + Serialize> TermMarker<Command> {
+    /// Create a `TermMarker` that can be passed to async tasks.
+    pub fn create(
+        rf: Arc<Mutex<RaftState<Command>>>,
+        election: Arc<ElectionState>,
+        persister: Arc<dyn Persister>,
+    ) -> Self {
+        Self {
+            rf,
+            election,
+            persister,
+        }
+    }
+
     pub fn mark(&self, term: Term) {
         let mut rf = self.rf.lock();
         mark_term(&mut rf, &self.election, self.persister.as_ref(), term)
     }
 }
 
-impl<Command: Clone + Serialize> Raft<Command> {
-    /// Create a `TermMarker` that can be passed to tasks.
-    pub(crate) fn term_marker(&self) -> TermMarker<Command> {
-        TermMarker {
-            rf: self.inner_state.clone(),
-            election: self.election.clone(),
-            persister: self.persister.clone(),
-        }
-    }
-}
-
 /// Update the term of the `RaftState`.
 pub(crate) fn mark_term<Command: Clone + Serialize>(
     rf: &mut RaftState<Command>,

+ 0 - 4
src/verify_authority.rs

@@ -400,10 +400,6 @@ impl<Command: 'static + Send> Raft<Command> {
                 .expect("Verify authority daemon never drops senders")
         })
     }
-
-    pub(crate) fn beat_ticker(&self, peer_index: usize) -> DaemonBeatTicker {
-        self.verify_authority_daemon.beat_ticker(peer_index)
-    }
 }
 
 #[cfg(test)]