Skip to content

Commit

Permalink
Merge #1671 #1673 #1683
Browse files Browse the repository at this point in the history
1671: test: disconnect check by all part r=u2,keroro520,quake a=driftluo

Disconnect check by all part

1673: docs: Update PoW mining algorithm description r=u2,quake a=ashchan

Now that this has been decided the doc should reflect that before the next round of mining test.

1683: fix: Remove descendants of committed txs from pending pool r=quake,u2 a=keroro520

* fix(tx-pool): Remove descendants of committed txs from pending-pool
* test(tx_pool): Add case of handling propose

Co-authored-by: driftluo <driftluo@foxmail.com>
Co-authored-by: James Chen <james@ashchan.com>
Co-authored-by: keroro520 <keroroxx520@gmail.com>
  • Loading branch information
4 people authored Oct 10, 2019
4 parents 186539b + 1498d48 + 1946d89 + 1c08862 commit d274a37
Show file tree
Hide file tree
Showing 18 changed files with 408 additions and 61 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ TBD.

## Mining

Mining on testnet Rylai is for testing purpose only, the mining algorithm **WILL BE CHANGED SOON**.
Testnet Rylai uses the [Eaglesong](https://github.com/nervosnetwork/rfcs/blob/master/rfcs/0010-eaglesong/0010-eaglesong.md) mining algorithm, which the mainnet is going to use too. Mining on testnet is for testing purpose only.

At this time Rylai will be **RESET** every two weeks.

Expand Down
6 changes: 6 additions & 0 deletions test/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -288,6 +288,12 @@ fn all_specs() -> SpecMap {
Box::new(ProposeButNotCommit),
Box::new(ProposeDuplicated),
Box::new(ForkedTransaction),
Box::new(HandlingDescendantsOfProposed),
Box::new(HandlingDescendantsOfCommitted),
Box::new(ProposeOutOfOrder),
Box::new(SubmitTransactionWhenItsParentInGap),
Box::new(SubmitTransactionWhenItsParentInProposed),
Box::new(ProposeTransactionButParentNot),
];
specs.into_iter().map(|spec| (spec.name(), spec)).collect()
}
Expand Down
2 changes: 1 addition & 1 deletion test/src/net.rs
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@ impl Net {
pub fn exit_ibd_mode(&self) -> BlockView {
let block = self.nodes[0].new_block(None, None, None);
self.nodes.iter().for_each(|node| {
node.submit_block(&block.data());
node.submit_block(&block);
});
block
}
Expand Down
6 changes: 3 additions & 3 deletions test/src/node.rs
Original file line number Diff line number Diff line change
Expand Up @@ -253,9 +253,9 @@ impl Node {
&self.rpc_client
}

pub fn submit_block(&self, block: &Block) -> Byte32 {
pub fn submit_block(&self, block: &BlockView) -> Byte32 {
self.rpc_client()
.submit_block("".to_owned(), block.clone().into())
.submit_block("".to_owned(), block.data().into())
.expect("submit_block failed")
}

Expand All @@ -271,7 +271,7 @@ impl Node {

// generate a new block and submit it through rpc.
pub fn generate_block(&self) -> Byte32 {
self.submit_block(&self.new_block(None, None, None).data())
self.submit_block(&self.new_block(None, None, None))
}

// Convenient way to construct an uncle block
Expand Down
4 changes: 2 additions & 2 deletions test/src/specs/mining/basic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ impl MiningBasic {
// According to the first-received policy,
// the first block is always the best block
let rpc_client = node.rpc_client();
assert_eq!(block1.hash(), node.submit_block(&block1.data()));
assert_eq!(block1.hash(), node.submit_block(&block1));
assert_eq!(block1.hash(), rpc_client.get_tip_header().hash.pack());

let template1 = rpc_client.get_block_template(None, None, None);
Expand All @@ -76,7 +76,7 @@ impl MiningBasic {
"templates keep same since block template cache",
);

assert_eq!(block2.hash(), node.submit_block(&block2.data()));
assert_eq!(block2.hash(), node.submit_block(&block2));
assert_eq!(block1.hash(), rpc_client.get_tip_header().hash.pack());
let template3 = rpc_client.get_block_template(None, None, None);
assert_eq!(block1.hash(), template3.parent_hash.pack());
Expand Down
6 changes: 3 additions & 3 deletions test/src/specs/mining/fee.rs
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ impl Spec for FeeOfMultipleMaxBlockProposalsLimit {

(0..multiple).for_each(|_| {
let block = node.new_block(None, None, None);
node.submit_block(&block.data());
node.submit_block(&block);
assert_eq!(
max_block_proposals_limit as usize,
block.union_proposal_ids_iter().count(),
Expand Down Expand Up @@ -136,7 +136,7 @@ impl Spec for ProposeButNotCommit {

// `target_node` propose `tx`
feed_blocks.iter().for_each(|block| {
target_node.submit_block(&block.data());
target_node.submit_block(&block);
});

// `target_node` keeps growing, but it will never commit `tx` since its transactions_pool
Expand Down Expand Up @@ -185,7 +185,7 @@ impl Spec for ProposeDuplicated {
.uncle(uncle2)
.build();
node.submit_transaction(tx);
node.submit_block(&block.data());
node.submit_block(&block);

let finalization_delay_length = node.consensus().finalization_delay_length();
node.generate_blocks(2 * finalization_delay_length as usize);
Expand Down
2 changes: 1 addition & 1 deletion test/src/specs/mining/size_limit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ impl Spec for TemplateSizeLimit {

info!("Generate 1 block");
let blank_block = node.new_block(None, None, None);
node.submit_block(&blank_block.data());
node.submit_block(&blank_block);
let blank_block_size = blank_block.data().serialized_size_without_uncle_proposals();

info!("Generate 6 txs");
Expand Down
20 changes: 10 additions & 10 deletions test/src/specs/mining/uncle.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,11 @@ impl Spec for UncleInheritFromForkBlock {
info!("(2) Force reorg, so that the parent of `uncle` become fork-block");
let longer_fork = (0..=target_node.get_tip_block_number()).map(|_| {
let block = feed_node.new_block(None, None, None);
feed_node.submit_block(&block.data());
feed_node.submit_block(&block);
block
});
longer_fork.for_each(|block| {
target_node.submit_block(&block.data());
target_node.submit_block(&block);
});

info!(
Expand All @@ -56,7 +56,7 @@ impl Spec for UncleInheritFromForkBlock {
.new_block_builder(None, None, None)
.set_uncles(vec![uncle.as_uncle()])
.build();
target_node.submit_block(&block.data());
target_node.submit_block(&block);
}
}

Expand All @@ -77,7 +77,7 @@ impl Spec for UncleInheritFromForkUncle {

info!("(1) Build a chain which embedded `uncle_parent` as an uncle");
let uncle_parent = construct_uncle(target_node);
target_node.submit_block(&uncle_parent.data());
target_node.submit_block(&uncle_parent);

let uncle_child = uncle_parent
.as_advanced_builder()
Expand All @@ -99,11 +99,11 @@ impl Spec for UncleInheritFromForkUncle {
info!("(2) Force reorg, so that `uncle_parent` become a fork-uncle");
let longer_fork = (0..=target_node.get_tip_block_number()).map(|_| {
let block = feed_node.new_block(None, None, None);
feed_node.submit_block(&block.data());
feed_node.submit_block(&block);
block
});
longer_fork.for_each(|block| {
target_node.submit_block(&block.data());
target_node.submit_block(&block);
});

info!("(3) Submit block with `uncle`, which is inherited from fork-uncle `uncle_parent`, should be failed");
Expand All @@ -124,7 +124,7 @@ impl Spec for UncleInheritFromForkUncle {
.new_block_builder(None, None, None)
.set_uncles(vec![uncle_child.as_uncle()])
.build();
target_node.submit_block(&block.data());
target_node.submit_block(&block);
}
}

Expand All @@ -151,7 +151,7 @@ impl Spec for PackUnclesIntoEpochStarting {
assert_eq!(current_epoch_end - 1, node.get_tip_block_number());

info!("(2) Submit the target uncle");
node.submit_block(&uncle.data());
node.submit_block(&uncle);

info!("(3) Expect the next mining block(CURRENT_EPOCH_END) contains the target uncle");
let block = node.new_block(None, None, None);
Expand All @@ -160,7 +160,7 @@ impl Spec for PackUnclesIntoEpochStarting {
// Clear the uncles in the next block, we don't want to pack the target `uncle` now.
info!("(4) Submit the next block with empty uncles");
let block_with_empty_uncles = block.as_advanced_builder().set_uncles(vec![]).build();
node.submit_block(&block_with_empty_uncles.data());
node.submit_block(&block_with_empty_uncles);

info!("(5) Expect the next mining block(NEXT_EPOCH_START) not contains the target uncle");
let block = node.new_block(None, None, None);
Expand All @@ -184,6 +184,6 @@ fn until_no_uncles_left(node: &Node) {
if block.uncles().into_iter().count() == 0 {
break;
}
node.submit_block(&block.data());
node.submit_block(&block);
}
}
17 changes: 6 additions & 11 deletions test/src/specs/relay/compact_block.rs
Original file line number Diff line number Diff line change
Expand Up @@ -110,8 +110,7 @@ impl Spec for CompactBlockPrefilled {
&node
.new_block_builder(None, None, None)
.proposal(new_tx.proposal_short_id())
.build()
.data(),
.build(),
);
node.generate_blocks(3);

Expand Down Expand Up @@ -155,8 +154,7 @@ impl Spec for CompactBlockMissingFreshTxs {
&node
.new_block_builder(None, None, None)
.proposal(new_tx.proposal_short_id())
.build()
.data(),
.build(),
);
node.generate_blocks(3);

Expand Down Expand Up @@ -214,8 +212,7 @@ impl Spec for CompactBlockMissingNotFreshTxs {
&node
.new_block_builder(None, None, None)
.proposal(new_tx.proposal_short_id())
.build()
.data(),
.build(),
);
node.generate_blocks(3);

Expand Down Expand Up @@ -266,8 +263,7 @@ impl Spec for CompactBlockLoseGetBlockTransactions {
&node0
.new_block_builder(None, None, None)
.proposal(new_tx.proposal_short_id())
.build()
.data(),
.build(),
);
// Proposal a tx, and grow up into proposal window
node0.generate_blocks(6);
Expand Down Expand Up @@ -304,7 +300,7 @@ impl Spec for CompactBlockLoseGetBlockTransactions {
);

// Submit the new block to node1. We expect node1 will relay the new block to node0.
node1.submit_block(&block.data());
node1.submit_block(&block);
node1.waiting_for_sync(node0, node1.get_tip_block().header().number());
}
}
Expand Down Expand Up @@ -333,8 +329,7 @@ impl Spec for CompactBlockRelayParentOfOrphanBlock {
&node
.new_block_builder(None, None, None)
.proposal(new_tx.proposal_short_id())
.build()
.data(),
.build(),
);
node.generate_blocks(6);

Expand Down
25 changes: 9 additions & 16 deletions test/src/specs/sync/block_sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ use crate::{Net, Node, Spec, TestProtocol};
use ckb_jsonrpc_types::ChainInfo;
use ckb_network::PeerIndex;
use ckb_sync::NetworkProtocol;
use ckb_types::packed::{Block, Byte32};
use ckb_types::packed::Byte32;
use ckb_types::{
core::BlockView,
packed::{self, SyncMessage},
Expand Down Expand Up @@ -72,8 +72,8 @@ impl Spec for BlockSyncWithUncle {
let new_block1 = new_builder.clone().nonce(0.pack()).build();
let new_block2 = new_builder.clone().nonce(1.pack()).build();

node1.submit_block(&new_block1.data());
node1.submit_block(&new_block2.data());
node1.submit_block(&new_block1);
node1.submit_block(&new_block2);

let uncle = if node1.get_tip_block() == new_block1 {
new_block2.as_uncle()
Expand All @@ -87,8 +87,7 @@ impl Spec for BlockSyncWithUncle {
&block_builder
.clone()
.set_uncles(vec![uncle.clone()])
.build()
.data(),
.build(),
);

target.connect(node1);
Expand Down Expand Up @@ -269,7 +268,7 @@ impl Spec for BlockSyncOrphanBlocks {
let mut blocks: Vec<BlockView> = (1..=5)
.map(|_| {
let block = node1.new_block(None, None, None);
node1.submit_block(&block.data());
node1.submit_block(&block);
block
})
.collect();
Expand Down Expand Up @@ -327,7 +326,7 @@ impl Spec for BlockSyncNonAncestorBestBlocks {
.timestamp((a.timestamp() + 1).pack())
.build();
assert_ne!(a.hash(), b.hash());
node1.submit_block(&b.data());
node1.submit_block(&b);

net.connect(node0);
let (peer_id, _, _) = net
Expand Down Expand Up @@ -381,14 +380,8 @@ impl Spec for RequestUnverifiedBlocks {
fork_chain.iter().for_each(|block| {
target_node.submit_block(block);
});
let main_hashes: Vec<_> = main_chain
.iter()
.map(|block| block.calc_header_hash())
.collect();
let fork_hashes: Vec<_> = fork_chain
.iter()
.map(|block| block.calc_header_hash())
.collect();
let main_hashes: Vec<_> = main_chain.iter().map(|block| block.hash()).collect();
let fork_hashes: Vec<_> = fork_chain.iter().map(|block| block.hash()).collect();

// Request for the blocks on `main_chain` and `fork_chain`. We should only receive the
// `main_chain` blocks
Expand Down Expand Up @@ -422,7 +415,7 @@ impl Spec for RequestUnverifiedBlocks {
}
}

fn build_forks(node: &Node, offsets: &[u64]) -> Vec<Block> {
fn build_forks(node: &Node, offsets: &[u64]) -> Vec<BlockView> {
let rpc_client = node.rpc_client();
let mut blocks = Vec::with_capacity(offsets.len());
for offset in offsets.iter() {
Expand Down
18 changes: 9 additions & 9 deletions test/src/specs/sync/chain_forks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ impl Spec for ChainFork5 {
.build()
}
};
node1.submit_block(&block.data());
node1.submit_block(&block);
assert_eq!(15, node1.rpc_client().get_tip_block_number());
info!("Generate 1 blocks (F) with spent transaction on node1");
let block = node1.new_block(None, None, None);
Expand Down Expand Up @@ -589,8 +589,8 @@ impl Spec for ForksContainSameUncle {
node_b.generate_block();

info!("(2) Add `uncle` into different forks in node_a and node_b");
node_a.submit_block(&uncle.data());
node_b.submit_block(&uncle.data());
node_a.submit_block(&uncle);
node_b.submit_block(&uncle);
let block_a = node_a
.new_block_builder(None, None, None)
.set_uncles(vec![uncle.as_uncle()])
Expand All @@ -600,8 +600,8 @@ impl Spec for ForksContainSameUncle {
.set_uncles(vec![uncle.as_uncle()])
.timestamp((block_a.timestamp() + 2).pack())
.build();
node_a.submit_block(&block_a.data());
node_b.submit_block(&block_b.data());
node_a.submit_block(&block_a);
node_b.submit_block(&block_b);

info!("(3) Make node_b's fork longer(to help check whether is synchronized)");
node_b.generate_block();
Expand All @@ -626,8 +626,8 @@ impl Spec for ForkedTransaction {
let finalization_delay_length = node0.consensus().finalization_delay_length();
(0..=finalization_delay_length).for_each(|_| {
let block = node0.new_block(None, None, None);
node0.submit_block(&block.data());
node1.submit_block(&block.data());
node0.submit_block(&block);
node1.submit_block(&block);
});

net.exit_ibd_mode();
Expand Down Expand Up @@ -656,7 +656,7 @@ impl Spec for ForkedTransaction {
{
(fixed_point..=node1.get_tip_block_number()).for_each(|number| {
let block = node1.get_block_by_number(number);
node0.submit_block(&block.data());
node0.submit_block(&block);
});
let tx_status = node0.rpc_client().get_transaction(tx.hash());
assert!(tx_status.is_none(), "node0 maintains tx in unverified fork");
Expand All @@ -670,7 +670,7 @@ impl Spec for ForkedTransaction {
{
(fixed_point..=node0.get_tip_block_number()).for_each(|number| {
let block = node0.get_block_by_number(number);
node1.submit_block(&block.data());
node1.submit_block(&block);
});

let is_pending = |tx_status: &TransactionWithStatus| {
Expand Down
Loading

0 comments on commit d274a37

Please sign in to comment.