|
@@ -1,20 +1,81 @@
|
|
|
-use std::sync::atomic::{AtomicUsize, Ordering};
|
|
|
|
|
|
|
+use std::sync::atomic::Ordering;
|
|
|
use std::sync::Arc;
|
|
use std::sync::Arc;
|
|
|
|
|
|
|
|
use parking_lot::{Condvar, Mutex};
|
|
use parking_lot::{Condvar, Mutex};
|
|
|
|
|
|
|
|
use crate::daemon_env::{Daemon, ErrorKind};
|
|
use crate::daemon_env::{Daemon, ErrorKind};
|
|
|
use crate::heartbeats::HEARTBEAT_INTERVAL;
|
|
use crate::heartbeats::HEARTBEAT_INTERVAL;
|
|
|
|
|
+use crate::peer_progress::PeerProgress;
|
|
|
use crate::term_marker::TermMarker;
|
|
use crate::term_marker::TermMarker;
|
|
|
use crate::utils::{retry_rpc, SharedSender, RPC_DEADLINE};
|
|
use crate::utils::{retry_rpc, SharedSender, RPC_DEADLINE};
|
|
|
use crate::verify_authority::DaemonBeatTicker;
|
|
use crate::verify_authority::DaemonBeatTicker;
|
|
|
use crate::{
|
|
use crate::{
|
|
|
- check_or_record, AppendEntriesArgs, IndexTerm, InstallSnapshotArgs, Peer,
|
|
|
|
|
- Raft, RaftState, RemoteRaft, ReplicableCommand, Term,
|
|
|
|
|
|
|
+ check_or_record, AppendEntriesArgs, Index, IndexTerm, InstallSnapshotArgs,
|
|
|
|
|
+ Peer, Raft, RaftState, RemoteRaft, ReplicableCommand, Term,
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
-#[repr(align(64))]
|
|
|
|
|
-struct Opening(Arc<AtomicUsize>);
|
|
|
|
|
|
|
+#[derive(Clone, Eq, PartialEq)]
|
|
|
|
|
+enum Event {
|
|
|
|
|
+ NewTerm(Term, Index),
|
|
|
|
|
+ NewLogEntry(Index),
|
|
|
|
|
+ Rerun(Peer),
|
|
|
|
|
+ Shutdown,
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+impl Event {
|
|
|
|
|
+ fn should_schedule(&self, peer: Peer) -> bool {
|
|
|
|
|
+ match self {
|
|
|
|
|
+ Event::NewTerm(..) => true,
|
|
|
|
|
+ Event::NewLogEntry(_index) => true,
|
|
|
|
|
+ Event::Rerun(p) => p == &peer,
|
|
|
|
|
+ Event::Shutdown => false,
|
|
|
|
|
+ }
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+#[derive(Clone)]
|
|
|
|
|
+pub(crate) struct SyncLogEntriesComms {
|
|
|
|
|
+ tx: crate::utils::SharedSender<Event>,
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+impl SyncLogEntriesComms {
|
|
|
|
|
+ pub fn update_followers(&self, index: Index) {
|
|
|
|
|
+ // Ignore the error. The log syncing thread must have died.
|
|
|
|
|
+ let _ = self.tx.send(Event::NewLogEntry(index));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ pub fn reset_progress(&self, term: Term, index: Index) {
|
|
|
|
|
+ let _ = self.tx.send(Event::NewTerm(term, index));
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ pub fn kill(&self) {
|
|
|
|
|
+ self.tx
|
|
|
|
|
+ .send(Event::Shutdown)
|
|
|
|
|
+ .expect("The sync log entries daemon should still be alive");
|
|
|
|
|
+ }
|
|
|
|
|
+
|
|
|
|
|
+ fn rerun(&self, peer: Peer) {
|
|
|
|
|
+ // Ignore the error. The log syncing thread must have died.
|
|
|
|
|
+ let _ = self.tx.send(Event::Rerun(peer));
|
|
|
|
|
+ }
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+pub(crate) struct SyncLogEntriesDaemon {
|
|
|
|
|
+ rx: std::sync::mpsc::Receiver<Event>,
|
|
|
|
|
+ peer_progress: Vec<PeerProgress>,
|
|
|
|
|
+}
|
|
|
|
|
+
|
|
|
|
|
+pub(crate) fn create(
|
|
|
|
|
+ peer_size: usize,
|
|
|
|
|
+) -> (SyncLogEntriesComms, SyncLogEntriesDaemon) {
|
|
|
|
|
+ let (tx, rx) = std::sync::mpsc::channel();
|
|
|
|
|
+ let tx = SharedSender::new(tx);
|
|
|
|
|
+ let peer_progress = (0..peer_size).map(PeerProgress::create).collect();
|
|
|
|
|
+ (
|
|
|
|
|
+ SyncLogEntriesComms { tx },
|
|
|
|
|
+ SyncLogEntriesDaemon { rx, peer_progress },
|
|
|
|
|
+ )
|
|
|
|
|
+}
|
|
|
|
|
|
|
|
enum SyncLogEntriesOperation<Command> {
|
|
enum SyncLogEntriesOperation<Command> {
|
|
|
AppendEntries(AppendEntriesArgs<Command>),
|
|
AppendEntries(AppendEntriesArgs<Command>),
|
|
@@ -54,24 +115,17 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
///
|
|
///
|
|
|
/// See comments on [`Raft::sync_log_entries`] to learn about the syncing
|
|
/// See comments on [`Raft::sync_log_entries`] to learn about the syncing
|
|
|
/// and backoff strategy.
|
|
/// and backoff strategy.
|
|
|
- pub(crate) fn run_log_entry_daemon(&mut self) {
|
|
|
|
|
- let (tx, rx) = std::sync::mpsc::channel::<Option<Peer>>();
|
|
|
|
|
- let tx = SharedSender::new(tx);
|
|
|
|
|
- self.new_log_entry.replace(tx);
|
|
|
|
|
-
|
|
|
|
|
|
|
+ pub(crate) fn run_log_entry_daemon(
|
|
|
|
|
+ &self,
|
|
|
|
|
+ SyncLogEntriesDaemon { rx, peer_progress }: SyncLogEntriesDaemon,
|
|
|
|
|
+ ) {
|
|
|
// Clone everything that the thread needs.
|
|
// Clone everything that the thread needs.
|
|
|
let this = self.clone();
|
|
let this = self.clone();
|
|
|
let sync_log_entry_daemon = move || {
|
|
let sync_log_entry_daemon = move || {
|
|
|
log::info!("{:?} sync log entries daemon running ...", this.me);
|
|
log::info!("{:?} sync log entries daemon running ...", this.me);
|
|
|
|
|
|
|
|
- let mut openings = vec![];
|
|
|
|
|
- openings.resize_with(this.peers.len(), || {
|
|
|
|
|
- Opening(Arc::new(AtomicUsize::new(0)))
|
|
|
|
|
- });
|
|
|
|
|
- let openings = openings; // Not mutable beyond this point.
|
|
|
|
|
-
|
|
|
|
|
let mut task_number = 0;
|
|
let mut task_number = 0;
|
|
|
- while let Ok(peer) = rx.recv() {
|
|
|
|
|
|
|
+ while let Ok(event) = rx.recv() {
|
|
|
if !this.keep_running.load(Ordering::Relaxed) {
|
|
if !this.keep_running.load(Ordering::Relaxed) {
|
|
|
break;
|
|
break;
|
|
|
}
|
|
}
|
|
@@ -79,18 +133,20 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
continue;
|
|
continue;
|
|
|
}
|
|
}
|
|
|
for (i, rpc_client) in this.peers.iter().enumerate() {
|
|
for (i, rpc_client) in this.peers.iter().enumerate() {
|
|
|
- if i != this.me.0 && peer.map(|p| p.0 == i).unwrap_or(true)
|
|
|
|
|
- {
|
|
|
|
|
|
|
+ if i != this.me.0 && event.should_schedule(Peer(i)) {
|
|
|
|
|
+ let progress = &peer_progress[i];
|
|
|
|
|
+ if let Event::NewTerm(_term, index) = event {
|
|
|
|
|
+ progress.reset_progress(index);
|
|
|
|
|
+ }
|
|
|
// Only schedule a new task if the last task has cleared
|
|
// Only schedule a new task if the last task has cleared
|
|
|
// the queue of RPC requests.
|
|
// the queue of RPC requests.
|
|
|
- if openings[i].0.fetch_add(1, Ordering::AcqRel) == 0 {
|
|
|
|
|
|
|
+ if progress.should_schedule() {
|
|
|
task_number += 1;
|
|
task_number += 1;
|
|
|
this.thread_pool.spawn(Self::sync_log_entries(
|
|
this.thread_pool.spawn(Self::sync_log_entries(
|
|
|
this.inner_state.clone(),
|
|
this.inner_state.clone(),
|
|
|
rpc_client.clone(),
|
|
rpc_client.clone(),
|
|
|
- i,
|
|
|
|
|
- this.new_log_entry.clone().unwrap(),
|
|
|
|
|
- openings[i].0.clone(),
|
|
|
|
|
|
|
+ this.sync_log_entries_comms.clone(),
|
|
|
|
|
+ progress.clone(),
|
|
|
this.apply_command_signal.clone(),
|
|
this.apply_command_signal.clone(),
|
|
|
this.term_marker(),
|
|
this.term_marker(),
|
|
|
this.beat_ticker(i),
|
|
this.beat_ticker(i),
|
|
@@ -151,20 +207,20 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
async fn sync_log_entries(
|
|
async fn sync_log_entries(
|
|
|
rf: Arc<Mutex<RaftState<Command>>>,
|
|
rf: Arc<Mutex<RaftState<Command>>>,
|
|
|
rpc_client: impl RemoteRaft<Command>,
|
|
rpc_client: impl RemoteRaft<Command>,
|
|
|
- peer_index: usize,
|
|
|
|
|
- rerun: SharedSender<Option<Peer>>,
|
|
|
|
|
- opening: Arc<AtomicUsize>,
|
|
|
|
|
|
|
+ comms: SyncLogEntriesComms,
|
|
|
|
|
+ progress: PeerProgress,
|
|
|
apply_command_signal: Arc<Condvar>,
|
|
apply_command_signal: Arc<Condvar>,
|
|
|
term_marker: TermMarker<Command>,
|
|
term_marker: TermMarker<Command>,
|
|
|
beat_ticker: DaemonBeatTicker,
|
|
beat_ticker: DaemonBeatTicker,
|
|
|
task_number: TaskNumber,
|
|
task_number: TaskNumber,
|
|
|
) {
|
|
) {
|
|
|
- if opening.swap(0, Ordering::AcqRel) == 0 {
|
|
|
|
|
|
|
+ if !progress.take_task() {
|
|
|
return;
|
|
return;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ let peer = progress.peer;
|
|
|
let operation =
|
|
let operation =
|
|
|
- Self::build_sync_log_entries(&rf, peer_index, task_number);
|
|
|
|
|
|
|
+ Self::build_sync_log_entries(&rf, &progress, task_number);
|
|
|
let (term, prev_log_index, match_index, succeeded) = match operation {
|
|
let (term, prev_log_index, match_index, succeeded) = match operation {
|
|
|
SyncLogEntriesOperation::AppendEntries(args) => {
|
|
SyncLogEntriesOperation::AppendEntries(args) => {
|
|
|
let term = args.term;
|
|
let term = args.term;
|
|
@@ -188,7 +244,6 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
SyncLogEntriesOperation::None => return,
|
|
SyncLogEntriesOperation::None => return,
|
|
|
};
|
|
};
|
|
|
|
|
|
|
|
- let peer = Peer(peer_index);
|
|
|
|
|
match succeeded {
|
|
match succeeded {
|
|
|
Ok(SyncLogEntriesResult::Success) => {
|
|
Ok(SyncLogEntriesResult::Success) => {
|
|
|
let mut rf = rf.lock();
|
|
let mut rf = rf.lock();
|
|
@@ -208,8 +263,8 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
&rf
|
|
&rf
|
|
|
);
|
|
);
|
|
|
|
|
|
|
|
- rf.next_index[peer_index] = match_index + 1;
|
|
|
|
|
- rf.current_step[peer_index] = 0;
|
|
|
|
|
|
|
+ progress.record_success(match_index);
|
|
|
|
|
+ let peer_index = peer.0;
|
|
|
if match_index > rf.match_index[peer_index] {
|
|
if match_index > rf.match_index[peer_index] {
|
|
|
rf.match_index[peer_index] = match_index;
|
|
rf.match_index[peer_index] = match_index;
|
|
|
let mut matched = rf.match_index.to_vec();
|
|
let mut matched = rf.match_index.to_vec();
|
|
@@ -260,11 +315,11 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
if prev_log_index == match_index {
|
|
if prev_log_index == match_index {
|
|
|
// If we did not make any progress this time, try again.
|
|
// If we did not make any progress this time, try again.
|
|
|
// This can only happen when installing snapshots.
|
|
// This can only happen when installing snapshots.
|
|
|
- let _ = rerun.send(Some(Peer(peer_index)));
|
|
|
|
|
|
|
+ comms.rerun(peer);
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|
|
|
Ok(SyncLogEntriesResult::Archived(committed)) => {
|
|
Ok(SyncLogEntriesResult::Archived(committed)) => {
|
|
|
- let mut rf = rf.lock();
|
|
|
|
|
|
|
+ let rf = rf.lock();
|
|
|
|
|
|
|
|
check_or_record!(
|
|
check_or_record!(
|
|
|
prev_log_index < committed.index,
|
|
prev_log_index < committed.index,
|
|
@@ -273,26 +328,25 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
committed.index
|
|
committed.index
|
|
|
),
|
|
),
|
|
|
format!(
|
|
format!(
|
|
|
- "Peer {} misbehaves: claimed log index {} is archived, \
|
|
|
|
|
|
|
+ "{:?} misbehaves: claimed log index {} is archived, \
|
|
|
but commit index is at {:?}) which is before that",
|
|
but commit index is at {:?}) which is before that",
|
|
|
- peer_index, prev_log_index, committed
|
|
|
|
|
|
|
+ peer, prev_log_index, committed
|
|
|
),
|
|
),
|
|
|
&rf
|
|
&rf
|
|
|
);
|
|
);
|
|
|
|
|
|
|
|
Self::check_committed(&rf, peer, committed.clone());
|
|
Self::check_committed(&rf, peer, committed.clone());
|
|
|
|
|
|
|
|
- rf.current_step[peer_index] = 0;
|
|
|
|
|
// Next index moves towards the log end. This is the only place
|
|
// Next index moves towards the log end. This is the only place
|
|
|
// where that happens. committed.index should be between log
|
|
// where that happens. committed.index should be between log
|
|
|
// start and end, guaranteed by check_committed() above.
|
|
// start and end, guaranteed by check_committed() above.
|
|
|
- rf.next_index[peer_index] = committed.index + 1;
|
|
|
|
|
|
|
+ progress.record_success(committed.index + 1);
|
|
|
|
|
|
|
|
// Ignore the error. The log syncing thread must have died.
|
|
// Ignore the error. The log syncing thread must have died.
|
|
|
- let _ = rerun.send(Some(Peer(peer_index)));
|
|
|
|
|
|
|
+ comms.rerun(peer);
|
|
|
}
|
|
}
|
|
|
Ok(SyncLogEntriesResult::Diverged(committed)) => {
|
|
Ok(SyncLogEntriesResult::Diverged(committed)) => {
|
|
|
- let mut rf = rf.lock();
|
|
|
|
|
|
|
+ let rf = rf.lock();
|
|
|
check_or_record!(
|
|
check_or_record!(
|
|
|
prev_log_index > committed.index,
|
|
prev_log_index > committed.index,
|
|
|
ErrorKind::DivergedBeforeCommitted(
|
|
ErrorKind::DivergedBeforeCommitted(
|
|
@@ -300,33 +354,18 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
committed.index
|
|
committed.index
|
|
|
),
|
|
),
|
|
|
format!(
|
|
format!(
|
|
|
- "Peer {} claimed log index {} does not match, \
|
|
|
|
|
|
|
+ "{:?} claimed log index {} does not match, \
|
|
|
but commit index is at {:?}) which is after that.",
|
|
but commit index is at {:?}) which is after that.",
|
|
|
- peer_index, prev_log_index, committed
|
|
|
|
|
|
|
+ peer, prev_log_index, committed
|
|
|
),
|
|
),
|
|
|
&rf
|
|
&rf
|
|
|
);
|
|
);
|
|
|
Self::check_committed(&rf, peer, committed.clone());
|
|
Self::check_committed(&rf, peer, committed.clone());
|
|
|
|
|
|
|
|
- let step = &mut rf.current_step[peer_index];
|
|
|
|
|
- if *step < 5 {
|
|
|
|
|
- *step += 1;
|
|
|
|
|
- }
|
|
|
|
|
- let diff = 4 << *step;
|
|
|
|
|
-
|
|
|
|
|
- let next_index = &mut rf.next_index[peer_index];
|
|
|
|
|
- if diff >= *next_index {
|
|
|
|
|
- *next_index = 1usize;
|
|
|
|
|
- } else {
|
|
|
|
|
- *next_index -= diff;
|
|
|
|
|
- }
|
|
|
|
|
-
|
|
|
|
|
- if *next_index < committed.index {
|
|
|
|
|
- *next_index = committed.index;
|
|
|
|
|
- }
|
|
|
|
|
|
|
+ progress.record_failure(committed.index);
|
|
|
|
|
|
|
|
// Ignore the error. The log syncing thread must have died.
|
|
// Ignore the error. The log syncing thread must have died.
|
|
|
- let _ = rerun.send(Some(Peer(peer_index)));
|
|
|
|
|
|
|
+ comms.rerun(peer);
|
|
|
}
|
|
}
|
|
|
// Do nothing, not our term anymore.
|
|
// Do nothing, not our term anymore.
|
|
|
Ok(SyncLogEntriesResult::TermElapsed(term)) => {
|
|
Ok(SyncLogEntriesResult::TermElapsed(term)) => {
|
|
@@ -335,7 +374,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
Err(_) => {
|
|
Err(_) => {
|
|
|
tokio::time::sleep(HEARTBEAT_INTERVAL).await;
|
|
tokio::time::sleep(HEARTBEAT_INTERVAL).await;
|
|
|
// Ignore the error. The log syncing thread must have died.
|
|
// Ignore the error. The log syncing thread must have died.
|
|
|
- let _ = rerun.send(Some(Peer(peer_index)));
|
|
|
|
|
|
|
+ comms.rerun(peer);
|
|
|
}
|
|
}
|
|
|
};
|
|
};
|
|
|
}
|
|
}
|
|
@@ -374,7 +413,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
|
|
|
|
|
fn build_sync_log_entries(
|
|
fn build_sync_log_entries(
|
|
|
rf: &Mutex<RaftState<Command>>,
|
|
rf: &Mutex<RaftState<Command>>,
|
|
|
- peer_index: usize,
|
|
|
|
|
|
|
+ progress: &PeerProgress,
|
|
|
task_number: TaskNumber,
|
|
task_number: TaskNumber,
|
|
|
) -> SyncLogEntriesOperation<Command> {
|
|
) -> SyncLogEntriesOperation<Command> {
|
|
|
let rf = rf.lock();
|
|
let rf = rf.lock();
|
|
@@ -382,27 +421,30 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
return SyncLogEntriesOperation::None;
|
|
return SyncLogEntriesOperation::None;
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
+ let peer = progress.peer;
|
|
|
|
|
+
|
|
|
// To send AppendEntries request, next_index must be strictly larger
|
|
// To send AppendEntries request, next_index must be strictly larger
|
|
|
// than start(). Otherwise we won't be able to know the log term of the
|
|
// than start(). Otherwise we won't be able to know the log term of the
|
|
|
// entry right before next_index.
|
|
// entry right before next_index.
|
|
|
- if rf.next_index[peer_index] > rf.log.start() {
|
|
|
|
|
- if rf.next_index[peer_index] < rf.log.end() {
|
|
|
|
|
|
|
+ let next_index = progress.next_index();
|
|
|
|
|
+ if next_index > rf.log.start() {
|
|
|
|
|
+ if next_index < rf.log.end() {
|
|
|
log::debug!(
|
|
log::debug!(
|
|
|
"{:?} building append entries {:?} from {} to {:?}",
|
|
"{:?} building append entries {:?} from {} to {:?}",
|
|
|
rf.leader_id,
|
|
rf.leader_id,
|
|
|
task_number,
|
|
task_number,
|
|
|
- rf.next_index[peer_index] - 1,
|
|
|
|
|
- Peer(peer_index)
|
|
|
|
|
|
|
+ next_index - 1,
|
|
|
|
|
+ peer
|
|
|
);
|
|
);
|
|
|
SyncLogEntriesOperation::AppendEntries(
|
|
SyncLogEntriesOperation::AppendEntries(
|
|
|
- Self::build_append_entries(&rf, peer_index),
|
|
|
|
|
|
|
+ Self::build_append_entries(&rf, next_index),
|
|
|
)
|
|
)
|
|
|
} else {
|
|
} else {
|
|
|
log::debug!(
|
|
log::debug!(
|
|
|
"{:?} nothing in append entries {:?} to {:?}",
|
|
"{:?} nothing in append entries {:?} to {:?}",
|
|
|
rf.leader_id,
|
|
rf.leader_id,
|
|
|
task_number,
|
|
task_number,
|
|
|
- Peer(peer_index)
|
|
|
|
|
|
|
+ peer
|
|
|
);
|
|
);
|
|
|
SyncLogEntriesOperation::None
|
|
SyncLogEntriesOperation::None
|
|
|
}
|
|
}
|
|
@@ -412,7 +454,7 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
rf.leader_id,
|
|
rf.leader_id,
|
|
|
task_number,
|
|
task_number,
|
|
|
rf.log.first_index_term().index,
|
|
rf.log.first_index_term().index,
|
|
|
- Peer(peer_index)
|
|
|
|
|
|
|
+ peer,
|
|
|
);
|
|
);
|
|
|
SyncLogEntriesOperation::InstallSnapshot(
|
|
SyncLogEntriesOperation::InstallSnapshot(
|
|
|
Self::build_install_snapshot(&rf),
|
|
Self::build_install_snapshot(&rf),
|
|
@@ -422,17 +464,17 @@ impl<Command: ReplicableCommand> Raft<Command> {
|
|
|
|
|
|
|
|
fn build_append_entries(
|
|
fn build_append_entries(
|
|
|
rf: &RaftState<Command>,
|
|
rf: &RaftState<Command>,
|
|
|
- peer_index: usize,
|
|
|
|
|
|
|
+ next_index: Index,
|
|
|
) -> AppendEntriesArgs<Command> {
|
|
) -> AppendEntriesArgs<Command> {
|
|
|
// It is guaranteed that next_index <= rf.log.end(). Panic otherwise.
|
|
// It is guaranteed that next_index <= rf.log.end(). Panic otherwise.
|
|
|
- let prev_log_index = rf.next_index[peer_index] - 1;
|
|
|
|
|
|
|
+ let prev_log_index = next_index - 1;
|
|
|
let prev_log_term = rf.log.at(prev_log_index).term;
|
|
let prev_log_term = rf.log.at(prev_log_index).term;
|
|
|
AppendEntriesArgs {
|
|
AppendEntriesArgs {
|
|
|
term: rf.current_term,
|
|
term: rf.current_term,
|
|
|
leader_id: rf.leader_id,
|
|
leader_id: rf.leader_id,
|
|
|
prev_log_index,
|
|
prev_log_index,
|
|
|
prev_log_term,
|
|
prev_log_term,
|
|
|
- entries: rf.log.after(rf.next_index[peer_index]).to_vec(),
|
|
|
|
|
|
|
+ entries: rf.log.after(next_index).to_vec(),
|
|
|
leader_commit: rf.commit_index,
|
|
leader_commit: rf.commit_index,
|
|
|
}
|
|
}
|
|
|
}
|
|
}
|