Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: support for syncing pre-genesis blocks #203

Merged
merged 21 commits into from
Oct 9, 2024
Merged
Show file tree
Hide file tree
Changes from 16 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/protobuf.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,4 +72,4 @@ jobs:
with:
github_token: ${{ github.token }}
- name: buf breaking
run: buf breaking './after.binpb' --against './before.binpb' --config '{"version":"v1","breaking":{"use":["WIRE_JSON","WIRE"]}}' --error-format 'github-actions'
run: buf breaking './after.binpb' --against './before.binpb' --config '{"version":"v1","breaking":{"use":["WIRE_JSON"]}}' --error-format 'github-actions'
7 changes: 7 additions & 0 deletions node/actors/bft/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,13 @@ impl Config {
anyhow::ensure!(genesis.protocol_version == validator::ProtocolVersion::CURRENT);
genesis.verify().context("genesis().verify()")?;

if let Some(prev) = genesis.first_block.prev() {
tracing::info!("Waiting for the pre-genesis blocks to be persisted");
if let Err(ctx::Canceled) = self.block_store.wait_until_persisted(ctx, prev).await {
return Ok(());
}
}

let cfg = Arc::new(self);
let (leader, leader_send) = leader::StateMachine::new(ctx, cfg.clone(), pipe.send.clone());
let (replica, replica_send) =
Expand Down
2 changes: 1 addition & 1 deletion node/actors/bft/src/replica/block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ impl StateMachine {
);
self.config
.block_store
.queue_block(ctx, block.clone())
.queue_block(ctx, block.clone().into())
.await?;
// For availability, replica should not proceed until it stores the block persistently.
self.config
Expand Down
2 changes: 1 addition & 1 deletion node/actors/bft/src/replica/tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ async fn leader_prepare_invalid_payload() {
util.replica
.config
.block_store
.queue_block(ctx, block)
.queue_block(ctx, block.into())
.await
.unwrap();

Expand Down
1 change: 0 additions & 1 deletion node/actors/bft/src/testonly/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,6 @@ pub(super) struct Node {
pub(crate) net: network::Config,
pub(crate) behavior: Behavior,
pub(crate) block_store: Arc<storage::BlockStore>,
pub(crate) batch_store: Arc<storage::BatchStore>,
}

impl Node {
Expand Down
25 changes: 9 additions & 16 deletions node/actors/bft/src/testonly/run.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ use zksync_concurrency::{
oneshot, scope,
};
use zksync_consensus_network::{self as network};
use zksync_consensus_roles::validator;
use zksync_consensus_roles::{validator, validator::testonly::Setup};
use zksync_consensus_storage::{testonly::TestMemoryStorage, BlockStore};
use zksync_consensus_utils::pipe;

Expand Down Expand Up @@ -114,26 +114,23 @@ impl Test {
/// Run a test with the given parameters and a random network setup.
pub(crate) async fn run(&self, ctx: &ctx::Ctx) -> Result<(), TestError> {
let rng = &mut ctx.rng();
let setup = validator::testonly::Setup::new_with_weights(
rng,
self.nodes.iter().map(|(_, w)| *w).collect(),
);
let setup = Setup::new_with_weights(rng, self.nodes.iter().map(|(_, w)| *w).collect());
let nets: Vec<_> = network::testonly::new_configs(rng, &setup, 1);
self.run_with_config(ctx, nets, &setup.genesis).await
self.run_with_config(ctx, nets, &setup).await
}

/// Run a test with the given parameters and network configuration.
pub(crate) async fn run_with_config(
&self,
ctx: &ctx::Ctx,
nets: Vec<Config>,
genesis: &validator::Genesis,
setup: &Setup,
) -> Result<(), TestError> {
let mut nodes = vec![];
let mut honest = vec![];
scope::run!(ctx, |ctx, s| async {
for (i, net) in nets.into_iter().enumerate() {
let store = TestMemoryStorage::new(ctx, genesis).await;
let store = TestMemoryStorage::new(ctx, setup).await;
s.spawn_bg(async { Ok(store.runner.run(ctx).await?) });

if self.nodes[i].0 == Behavior::Honest {
Expand All @@ -144,15 +141,14 @@ impl Test {
net,
behavior: self.nodes[i].0,
block_store: store.blocks,
batch_store: store.batches,
});
}
assert!(!honest.is_empty());
s.spawn_bg(async { Ok(run_nodes(ctx, &self.network, &nodes).await?) });

// Run the nodes until all honest nodes store enough finalized blocks.
assert!(self.blocks_to_finalize > 0);
let first = genesis.first_block;
let first = setup.genesis.first_block;
let last = first + (self.blocks_to_finalize as u64 - 1);
for store in &honest {
store.wait_until_queued(ctx, last).await?;
Expand All @@ -165,7 +161,7 @@ impl Test {
let want = honest[0].block(ctx, i).await?.context("missing block")?;
for store in &honest[1..] {
let got = store.block(ctx, i).await?.context("missing block")?;
if want.payload != got.payload {
if want.payload() != got.payload() {
return Err(TestError::BlockConflict);
}
}
Expand All @@ -189,11 +185,8 @@ async fn run_nodes_real(ctx: &ctx::Ctx, specs: &[Node]) -> anyhow::Result<()> {
scope::run!(ctx, |ctx, s| async {
let mut nodes = vec![];
for (i, spec) in specs.iter().enumerate() {
let (node, runner) = network::testonly::Instance::new(
spec.net.clone(),
spec.block_store.clone(),
spec.batch_store.clone(),
);
let (node, runner) =
network::testonly::Instance::new(spec.net.clone(), spec.block_store.clone());
s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node", i)));
nodes.push(node);
}
Expand Down
89 changes: 46 additions & 43 deletions node/actors/bft/src/testonly/ut_harness.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,11 +10,11 @@ use assert_matches::assert_matches;
use std::sync::Arc;
use zksync_concurrency::{ctx, sync::prunable_mpsc};
use zksync_consensus_network as network;
use zksync_consensus_roles::validator::{
self, CommitQC, LeaderCommit, LeaderPrepare, Phase, PrepareQC, ReplicaCommit, ReplicaPrepare,
SecretKey, Signed, ViewNumber,
use zksync_consensus_roles::validator;
use zksync_consensus_storage::{
testonly::{in_memory, TestMemoryStorage},
BlockStoreRunner,
};
use zksync_consensus_storage::testonly::{in_memory, TestMemoryStorage, TestMemoryStorageRunner};
use zksync_consensus_utils::enum_util::Variant;

pub(crate) const MAX_PAYLOAD_SIZE: usize = 1000;
Expand All @@ -28,7 +28,7 @@ pub(crate) const MAX_PAYLOAD_SIZE: usize = 1000;
pub(crate) struct UTHarness {
pub(crate) leader: leader::StateMachine,
pub(crate) replica: replica::StateMachine,
pub(crate) keys: Vec<SecretKey>,
pub(crate) keys: Vec<validator::SecretKey>,
pub(crate) leader_send: prunable_mpsc::Sender<network::io::ConsensusReq>,
pipe: ctx::channel::UnboundedReceiver<OutputMessage>,
}
Expand All @@ -38,7 +38,7 @@ impl UTHarness {
pub(crate) async fn new(
ctx: &ctx::Ctx,
num_validators: usize,
) -> (UTHarness, TestMemoryStorageRunner) {
) -> (UTHarness, BlockStoreRunner) {
Self::new_with_payload(
ctx,
num_validators,
Expand All @@ -51,10 +51,10 @@ impl UTHarness {
ctx: &ctx::Ctx,
num_validators: usize,
payload_manager: Box<dyn PayloadManager>,
) -> (UTHarness, TestMemoryStorageRunner) {
) -> (UTHarness, BlockStoreRunner) {
let rng = &mut ctx.rng();
let setup = validator::testonly::Setup::new(rng, num_validators);
let store = TestMemoryStorage::new(ctx, &setup.genesis).await;
let store = TestMemoryStorage::new(ctx, &setup).await;
let (send, recv) = ctx::channel::unbounded();

let cfg = Arc::new(Config {
Expand All @@ -75,23 +75,23 @@ impl UTHarness {
keys: setup.validator_keys.clone(),
leader_send,
};
let _: Signed<ReplicaPrepare> = this.try_recv().unwrap();
let _: validator::Signed<validator::ReplicaPrepare> = this.try_recv().unwrap();
(this, store.runner)
}

/// Creates a new `UTHarness` with minimally-significant validator set size.
pub(crate) async fn new_many(ctx: &ctx::Ctx) -> (UTHarness, TestMemoryStorageRunner) {
pub(crate) async fn new_many(ctx: &ctx::Ctx) -> (UTHarness, BlockStoreRunner) {
let num_validators = 6;
let (util, runner) = UTHarness::new(ctx, num_validators).await;
assert!(util.genesis().validators.max_faulty_weight() > 0);
(util, runner)
}

/// Triggers replica timeout, validates the new ReplicaPrepare
/// Triggers replica timeout, validates the new validator::ReplicaPrepare
/// then executes the whole new view to make sure that the consensus
/// recovers after a timeout.
pub(crate) async fn produce_block_after_timeout(&mut self, ctx: &ctx::Ctx) {
let want = ReplicaPrepare {
let want = validator::ReplicaPrepare {
view: validator::View {
genesis: self.genesis().hash(),
number: self.replica.view.next(),
Expand All @@ -112,11 +112,11 @@ impl UTHarness {
.unwrap();
}

pub(crate) fn owner_key(&self) -> &SecretKey {
pub(crate) fn owner_key(&self) -> &validator::SecretKey {
&self.replica.config.secret_key
}

pub(crate) fn sign<V: Variant<validator::Msg>>(&self, msg: V) -> Signed<V> {
pub(crate) fn sign<V: Variant<validator::Msg>>(&self, msg: V) -> validator::Signed<V> {
self.replica.config.secret_key.sign_msg(msg)
}

Expand All @@ -135,54 +135,54 @@ impl UTHarness {
}
}

pub(crate) fn new_replica_prepare(&mut self) -> ReplicaPrepare {
pub(crate) fn new_replica_prepare(&mut self) -> validator::ReplicaPrepare {
self.set_owner_as_view_leader();
ReplicaPrepare {
validator::ReplicaPrepare {
view: self.replica_view(),
high_vote: self.replica.high_vote.clone(),
high_qc: self.replica.high_qc.clone(),
}
}

pub(crate) fn new_current_replica_commit(&self) -> ReplicaCommit {
ReplicaCommit {
pub(crate) fn new_current_replica_commit(&self) -> validator::ReplicaCommit {
validator::ReplicaCommit {
view: self.replica_view(),
proposal: self.replica.high_qc.as_ref().unwrap().message.proposal,
}
}

pub(crate) async fn new_leader_prepare(&mut self, ctx: &ctx::Ctx) -> LeaderPrepare {
pub(crate) async fn new_leader_prepare(&mut self, ctx: &ctx::Ctx) -> validator::LeaderPrepare {
let msg = self.new_replica_prepare();
self.process_replica_prepare_all(ctx, msg).await.msg
}

pub(crate) async fn new_replica_commit(&mut self, ctx: &ctx::Ctx) -> ReplicaCommit {
pub(crate) async fn new_replica_commit(&mut self, ctx: &ctx::Ctx) -> validator::ReplicaCommit {
let msg = self.new_leader_prepare(ctx).await;
self.process_leader_prepare(ctx, self.sign(msg))
.await
.unwrap()
.msg
}

pub(crate) async fn new_leader_commit(&mut self, ctx: &ctx::Ctx) -> LeaderCommit {
pub(crate) async fn new_leader_commit(&mut self, ctx: &ctx::Ctx) -> validator::LeaderCommit {
let msg = self.new_replica_commit(ctx).await;
self.process_replica_commit_all(ctx, msg).await.msg
}

pub(crate) async fn process_leader_prepare(
&mut self,
ctx: &ctx::Ctx,
msg: Signed<LeaderPrepare>,
) -> Result<Signed<ReplicaCommit>, leader_prepare::Error> {
msg: validator::Signed<validator::LeaderPrepare>,
) -> Result<validator::Signed<validator::ReplicaCommit>, leader_prepare::Error> {
self.replica.process_leader_prepare(ctx, msg).await?;
Ok(self.try_recv().unwrap())
}

pub(crate) async fn process_leader_commit(
&mut self,
ctx: &ctx::Ctx,
msg: Signed<LeaderCommit>,
) -> Result<Signed<ReplicaPrepare>, leader_commit::Error> {
msg: validator::Signed<validator::LeaderCommit>,
) -> Result<validator::Signed<validator::ReplicaPrepare>, leader_commit::Error> {
self.replica.process_leader_commit(ctx, msg).await?;
Ok(self.try_recv().unwrap())
}
Expand All @@ -191,8 +191,8 @@ impl UTHarness {
pub(crate) async fn process_replica_prepare(
&mut self,
ctx: &ctx::Ctx,
msg: Signed<ReplicaPrepare>,
) -> Result<Option<Signed<LeaderPrepare>>, replica_prepare::Error> {
msg: validator::Signed<validator::ReplicaPrepare>,
) -> Result<Option<validator::Signed<validator::LeaderPrepare>>, replica_prepare::Error> {
let prepare_qc = self.leader.prepare_qc.subscribe();
self.leader.process_replica_prepare(ctx, msg).await?;
if prepare_qc.has_changed().unwrap() {
Expand All @@ -212,8 +212,8 @@ impl UTHarness {
pub(crate) async fn process_replica_prepare_all(
&mut self,
ctx: &ctx::Ctx,
msg: ReplicaPrepare,
) -> Signed<LeaderPrepare> {
msg: validator::ReplicaPrepare,
) -> validator::Signed<validator::LeaderPrepare> {
let mut leader_prepare = None;
let msgs: Vec<_> = self.keys.iter().map(|k| k.sign_msg(msg.clone())).collect();
let mut first_match = true;
Expand All @@ -238,17 +238,17 @@ impl UTHarness {
pub(crate) async fn process_replica_commit(
&mut self,
ctx: &ctx::Ctx,
msg: Signed<ReplicaCommit>,
) -> Result<Option<Signed<LeaderCommit>>, replica_commit::Error> {
msg: validator::Signed<validator::ReplicaCommit>,
) -> Result<Option<validator::Signed<validator::LeaderCommit>>, replica_commit::Error> {
self.leader.process_replica_commit(ctx, msg)?;
Ok(self.try_recv())
}

async fn process_replica_commit_all(
&mut self,
ctx: &ctx::Ctx,
msg: ReplicaCommit,
) -> Signed<LeaderCommit> {
msg: validator::ReplicaCommit,
) -> validator::Signed<validator::LeaderCommit> {
let mut first_match = true;
for (i, key) in self.keys.iter().enumerate() {
let res = self
Expand All @@ -270,7 +270,7 @@ impl UTHarness {
self.try_recv().unwrap()
}

fn try_recv<V: Variant<validator::Msg>>(&mut self) -> Option<Signed<V>> {
fn try_recv<V: Variant<validator::Msg>>(&mut self) -> Option<validator::Signed<V>> {
self.pipe.try_recv().map(|message| match message {
OutputMessage::Network(network::io::ConsensusInputMessage { message, .. }) => {
message.cast().unwrap()
Expand All @@ -281,27 +281,30 @@ impl UTHarness {
pub(crate) async fn process_replica_timeout(
&mut self,
ctx: &ctx::Ctx,
) -> Signed<ReplicaPrepare> {
) -> validator::Signed<validator::ReplicaPrepare> {
self.replica.start_new_view(ctx).await.unwrap();
self.try_recv().unwrap()
}

pub(crate) fn leader_phase(&self) -> Phase {
pub(crate) fn leader_phase(&self) -> validator::Phase {
self.leader.phase
}

pub(crate) fn view_leader(&self, view: ViewNumber) -> validator::PublicKey {
pub(crate) fn view_leader(&self, view: validator::ViewNumber) -> validator::PublicKey {
self.genesis().view_leader(view)
}

pub(crate) fn genesis(&self) -> &validator::Genesis {
self.replica.config.genesis()
}

pub(crate) fn new_commit_qc(&self, mutate_fn: impl FnOnce(&mut ReplicaCommit)) -> CommitQC {
pub(crate) fn new_commit_qc(
&self,
mutate_fn: impl FnOnce(&mut validator::ReplicaCommit),
) -> validator::CommitQC {
let mut msg = self.new_current_replica_commit();
mutate_fn(&mut msg);
let mut qc = CommitQC::new(msg, self.genesis());
let mut qc = validator::CommitQC::new(msg, self.genesis());
for key in &self.keys {
qc.add(&key.sign_msg(qc.message.clone()), self.genesis())
.unwrap();
Expand All @@ -311,18 +314,18 @@ impl UTHarness {

pub(crate) fn new_prepare_qc(
&mut self,
mutate_fn: impl FnOnce(&mut ReplicaPrepare),
) -> PrepareQC {
mutate_fn: impl FnOnce(&mut validator::ReplicaPrepare),
) -> validator::PrepareQC {
let mut msg = self.new_replica_prepare();
mutate_fn(&mut msg);
let mut qc = PrepareQC::new(msg.view.clone());
let mut qc = validator::PrepareQC::new(msg.view.clone());
for key in &self.keys {
qc.add(&key.sign_msg(msg.clone()), self.genesis()).unwrap();
}
qc
}

pub(crate) fn leader_send(&self, msg: Signed<validator::ConsensusMsg>) {
pub(crate) fn leader_send(&self, msg: validator::Signed<validator::ConsensusMsg>) {
self.leader_send.send(network::io::ConsensusReq {
msg,
ack: zksync_concurrency::oneshot::channel().0,
Expand Down
Loading
Loading