diff --git a/Cargo.lock b/Cargo.lock index 781dba880cbe..0112cc51188e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -16176,6 +16176,7 @@ dependencies = [ "sp-consensus-beefy", "sp-consensus-grandpa", "sp-core", + "sp-debug-derive 14.0.0", "sp-genesis-builder", "sp-inherents", "sp-io", diff --git a/cumulus/zombienet/tests/0009-ct-shared-core.toml b/cumulus/zombienet/tests/0009-ct-shared-core.toml new file mode 100644 index 000000000000..2a1383d16d7b --- /dev/null +++ b/cumulus/zombienet/tests/0009-ct-shared-core.toml @@ -0,0 +1,48 @@ +[settings] +timeout = 1000 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.async_backing_params] + max_candidate_depth = 2 + allowed_ancestry_len = 2 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.scheduler_params] + max_validators_per_core = 1 + lookahead = 2 + num_cores = 4 + +[relaychain.genesis.runtimeGenesis.patch.configuration.config.approval_voting_params] + needed_approvals = 3 + max_approval_coalesce_count = 5 + +[relaychain] +default_image = "{{ZOMBIENET_INTEGRATION_TEST_IMAGE}}" +chain = "rococo-local" +command = "polkadot" + + [[relaychain.nodes]] + name = "alice" + args = ["-lruntime=debug,parachain=trace,runtime::parachains::scheduler=trace,runtime::inclusion-inherent=trace,runtime::inclusion=trace,parachain::collator-protocol=trace" ] + + [[relaychain.node_groups]] + name = "validator" + args = ["-lruntime=debug,parachain=trace,runtime::parachains::scheduler=trace,runtime::inclusion-inherent=trace,runtime::inclusion=trace,parachain::collator-protocol=trace" ] + count = 4 + +{% for id in range(2000,2003) %} +[[parachains]] +id = {{id}} +addToGenesis = true +cumulus_based = true +chain = "glutton-westend-local-{{id}}" + [parachains.genesis.runtimeGenesis.patch.glutton] + compute = "50000000" + storage = "2500000000" + trashDataCount = 5120 + + [parachains.collator] + name = "collator" + image = "{{CUMULUS_IMAGE}}" + command = "polkadot-parachain" + args = ["-lparachain=debug,aura::cumulus=trace,parachain::collator-protocol=trace"] + +{% endfor %} \ No newline at end of file diff --git a/cumulus/zombienet/tests/0009-ct-shared-core.zndsl b/cumulus/zombienet/tests/0009-ct-shared-core.zndsl new file mode 100644 index 000000000000..b626eaea0366 --- /dev/null +++ b/cumulus/zombienet/tests/0009-ct-shared-core.zndsl @@ -0,0 +1,23 @@ +Description: CT shared core test +Network: ./0009-ct-shared-core.toml +Creds: config + +alice: is up +collator: is up +collator-1: is up + +# configure relay chain +alice: js-script ./assign-core.js return is 0 within 600 seconds + +#collator-single-core: reports block height is at least 20 within 225 seconds +#collator-elastic: reports block height is at least 20 within 225 seconds + +#collator-elastic: restart after 30 seconds +#sleep 10 seconds +#collator-single-core: restart after 30 seconds + +#collator-single-core: reports block height is at least 40 within 225 seconds +#collator-elastic: reports block height is at least 80 within 225 seconds + + +sleep 43200 seconds \ No newline at end of file diff --git a/cumulus/zombienet/tests/assign-core-parts.js b/cumulus/zombienet/tests/assign-core-parts.js new file mode 100644 index 000000000000..a1653613698f --- /dev/null +++ b/cumulus/zombienet/tests/assign-core-parts.js @@ -0,0 +1,49 @@ +// Assign a parachain to a core. +// +// First argument should be the parachain id. +// Second argument should be the core. +// Third argument should be PartsOf57600 assigned for the parachain id. +async function run(nodeName, networkInfo, args) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + let para = Number(args[0]); + let core = Number(args[1]); + let parts_of_57600 = Number(args[2]); + + console.log(`Assigning para ${para} to core ${core} parts_of_57600 ${parts_of_57600}`); + + await zombie.util.cryptoWaitReady(); + + // Submit transaction with Alice accoung + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + // Wait for this transaction to be finalized in a block. + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(core, 0, [[{ task: para }, parts_of_57600]], null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `Transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `Transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`Transaction error`); + reject(`Transaction error`); + } + }); + }); + + + + return 0; +} + +module.exports = { run }; diff --git a/cumulus/zombienet/tests/assign-core.js b/cumulus/zombienet/tests/assign-core.js new file mode 100644 index 000000000000..d32ca13f90f0 --- /dev/null +++ b/cumulus/zombienet/tests/assign-core.js @@ -0,0 +1,59 @@ +// Assign a parachain to a core. +async function run(nodeName, networkInfo, args) { + const { wsUri, userDefinedTypes } = networkInfo.nodesByName[nodeName]; + const api = await zombie.connect(wsUri, userDefinedTypes); + + await zombie.util.cryptoWaitReady(); + + // Submit transaction with Alice accoung + const keyring = new zombie.Keyring({ type: "sr25519" }); + const alice = keyring.addFromUri("//Alice"); + + // Wait for this transaction to be finalized in a block. + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.system.killPrefix("0x638595eebaa445ce03a13547bece90e704e6ac775a3245623103ffec2cb2c92f", 10)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `killPrefix transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `killPrefix transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`killPrefix error`); + reject(`killPrefix error`); + } + }); + }); + + // Wait for this transaction to be finalized in a block. + await new Promise(async (resolve, reject) => { + const unsub = await api.tx.sudo + .sudo(api.tx.coretime.assignCore(0, 0, [[{ task: 2000 }, 19200], [{ task: 2001 }, 19200], [{ task: 2002 }, 19200]], null)) + .signAndSend(alice, ({ status, isError }) => { + if (status.isInBlock) { + console.log( + `assignCore transaction included at blockhash ${status.asInBlock}`, + ); + } else if (status.isFinalized) { + console.log( + `assignCore transaction finalized at blockHash ${status.asFinalized}`, + ); + unsub(); + return resolve(); + } else if (isError) { + console.log(`assignCore error`); + reject(`assignCore error`); + } + }); + }); + + return 0; +} + +module.exports = { run }; diff --git a/polkadot/node/core/prospective-parachains/src/lib.rs b/polkadot/node/core/prospective-parachains/src/lib.rs index d5bb5ff76ba8..57aa2b84aefa 100644 --- a/polkadot/node/core/prospective-parachains/src/lib.rs +++ b/polkadot/node/core/prospective-parachains/src/lib.rs @@ -886,6 +886,7 @@ async fn fetch_upcoming_paras( for core in cores { match core { CoreState::Occupied(occupied) => { + upcoming.insert(occupied.para_id()); if let Some(next_up_on_available) = occupied.next_up_on_available { upcoming.insert(next_up_on_available.para_id); } diff --git a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs index 88375d583090..f2799293d7a1 100644 --- a/polkadot/node/network/collator-protocol/src/collator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/collator_side/mod.rs @@ -587,7 +587,7 @@ async fn determine_cores( if relay_parent_mode.is_enabled() { // With async backing we don't care about the core state, // it is only needed for figuring our validators group. - Some(occupied.candidate_descriptor.para_id) + occupied.next_up_on_available.as_ref().map(|c| c.para_id) } else { None }, diff --git a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs index 9f037a983e51..e3720e89f0c4 100644 --- a/polkadot/node/network/collator-protocol/src/validator_side/mod.rs +++ b/polkadot/node/network/collator-protocol/src/validator_side/mod.rs @@ -498,7 +498,8 @@ where let core_now = rotation_info.core_for_group(group, cores.len()); cores.get(core_now.0 as usize).and_then(|c| match c { - CoreState::Occupied(core) if relay_parent_mode.is_enabled() => Some(core.para_id()), + CoreState::Occupied(core) if relay_parent_mode.is_enabled() => + core.next_up_on_available.as_ref().map(|c| c.para_id), CoreState::Scheduled(core) => Some(core.para_id), CoreState::Occupied(_) | CoreState::Free => None, }) @@ -1709,6 +1710,7 @@ async fn run_inner( target: LOG_TARGET, ?relay_parent, ?collator_id, + ?maybe_candidate_hash, "Timeout hit - already seconded?" ); dequeue_next_collation_and_fetch( diff --git a/polkadot/runtime/parachains/src/inclusion/mod.rs b/polkadot/runtime/parachains/src/inclusion/mod.rs index 0c7274984085..6f8ddc2fbe21 100644 --- a/polkadot/runtime/parachains/src/inclusion/mod.rs +++ b/polkadot/runtime/parachains/src/inclusion/mod.rs @@ -615,6 +615,8 @@ impl Pallet { where GV: Fn(GroupIndex) -> Option>, { + log::trace!(target: LOG_TARGET, "Processing {} candidates {:?}", candidates.len(), candidates); + if candidates.is_empty() { return Ok(ProcessedCandidates::default()) } diff --git a/polkadot/runtime/parachains/src/paras_inherent/mod.rs b/polkadot/runtime/parachains/src/paras_inherent/mod.rs index ac4cf5dc8d41..c9fd8ac23230 100644 --- a/polkadot/runtime/parachains/src/paras_inherent/mod.rs +++ b/polkadot/runtime/parachains/src/paras_inherent/mod.rs @@ -612,6 +612,9 @@ impl Pallet { scheduler::Pallet::::group_validators, core_index_enabled, )?; + + log::trace!(target: LOG_TARGET, "Processed candidates {:?}", candidate_receipt_with_backing_validator_indices.iter().map(|c| (c.0.descriptor.para_id, c.0.descriptor.relay_parent, c.0.descriptor.para_head)).collect::>()); + // Note which of the scheduled cores were actually occupied by a backed candidate. scheduler::Pallet::::occupied(occupied.into_iter().map(|e| (e.0, e.1)).collect()); diff --git a/polkadot/runtime/parachains/src/scheduler.rs b/polkadot/runtime/parachains/src/scheduler.rs index baeec49839df..2487f68bc05b 100644 --- a/polkadot/runtime/parachains/src/scheduler.rs +++ b/polkadot/runtime/parachains/src/scheduler.rs @@ -350,6 +350,11 @@ impl Pallet { /// `AssignmentProvider`. A claim is considered expired if it's `ttl` field is lower than the /// current block height. fn drop_expired_claims_from_claimqueue() { + log::trace!( + target: LOG_TARGET, + "[drop_expired_claims_from_claimqueue] enter {:?}", + ClaimQueue::::get(), + ); let now = frame_system::Pallet::::block_number(); let availability_cores = AvailabilityCores::::get(); let ttl = configuration::ActiveConfig::::get().scheduler_params.ttl; @@ -363,6 +368,12 @@ impl Pallet { while i < core_claimqueue.len() { let maybe_dropped = if let Some(entry) = core_claimqueue.get(i) { if entry.ttl < now { + log::trace!( + target: LOG_TARGET, + "[drop_expired_claims_from_claimqueue] Dropping expired claim from claimqueue; idx={} para_id={:?}", + i, + entry.para_id() + ); core_claimqueue.remove(i) } else { None @@ -379,17 +390,40 @@ impl Pallet { } } + log::trace!( + target: LOG_TARGET, + "[drop_expired_claims_from_claimqueue] After iterating the claimqueue for core_idx={:?}, num_dropped={:?}", + core_idx, + num_dropped + ); + for _ in 0..num_dropped { + log::trace!( + target: LOG_TARGET, + "[drop_expired_claims_from_claimqueue] Attempting to pop a new entry for core_idx={:?}", + core_idx, + ); // For all claims dropped due to TTL, attempt to pop a new entry to // the back of the claimqueue. if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { + log::trace!( + target: LOG_TARGET, + "[drop_expired_claims_from_claimqueue] Popped a new entry for core_idx={:?}", + core_idx, + ); core_claimqueue.push_back(ParasEntry::new(assignment, now + ttl)); } } } } + + log::trace!( + target: LOG_TARGET, + "[drop_expired_claims_from_claimqueue] before return {:?}", + ClaimQueue::::get(), + ); }); } @@ -537,6 +571,7 @@ impl Pallet { // on new session fn push_claimqueue_items_to_assignment_provider() { + log::debug!(target: LOG_TARGET, "[push_claimqueue_items_to_assignment_provider] enter"); for (_, claim_queue) in ClaimQueue::::take() { // Push back in reverse order so that when we pop from the provider again, // the entries in the claimqueue are in the same order as they are right now. @@ -550,6 +585,7 @@ impl Pallet { /// timed out on availability before. fn maybe_push_assignment(pe: ParasEntryType) { if pe.availability_timeouts == 0 { + log::debug!(target: LOG_TARGET, "[maybe_push_assignment] pushing {:?}", pe); T::AssignmentProvider::push_back_assignment(pe.assignment); } } @@ -566,6 +602,8 @@ impl Pallet { just_freed_cores: impl IntoIterator, now: BlockNumberFor, ) { + log::debug!(target: LOG_TARGET, "[free_cores_and_fill_claimqueue] enter now: {:?}, cq: {:?}", now, ClaimQueue::::get()); + let (mut concluded_paras, mut timedout_paras) = Self::free_cores(just_freed_cores); // This can only happen on new sessions at which we move all assignments back to the @@ -586,6 +624,8 @@ impl Pallet { // add previously timedout paras back into the queue if let Some(mut entry) = timedout_paras.remove(&core_idx) { + log::debug!(target: LOG_TARGET, "[free_cores_and_fill_claimqueue] timedout para: entry: {:?} core: {:?}", entry, core_idx); + if entry.availability_timeouts < max_availability_timeouts { // Increment the timeout counter. entry.availability_timeouts += 1; @@ -594,6 +634,8 @@ impl Pallet { Self::add_to_claimqueue(core_idx, entry); // The claim has been added back into the claimqueue. // Do not pop another assignment for the core. + log::debug!(target: LOG_TARGET, "[free_cores_and_fill_claimqueue] claim has been added back into the claimqueue"); + continue } else { // Consider timed out assignments for on demand parachains as concluded for @@ -611,6 +653,8 @@ impl Pallet { if Self::is_core_occupied(core_idx) { 1 } else { 0 }; for _ in n_lookahead_used..n_lookahead { if let Some(assignment) = T::AssignmentProvider::pop_assignment_for_core(core_idx) { + log::debug!(target: LOG_TARGET, "[free_cores_and_fill_claimqueue] will add to claim queue for core {:?}", core_idx); + Self::add_to_claimqueue(core_idx, ParasEntry::new(assignment, now + ttl)); } } @@ -618,6 +662,7 @@ impl Pallet { debug_assert!(timedout_paras.is_empty()); debug_assert!(concluded_paras.is_empty()); + log::debug!(target: LOG_TARGET, "[free_cores_and_fill_claimqueue] before return now: {:?}, cq: {:?}", now, ClaimQueue::::get()); } fn is_core_occupied(core_idx: CoreIndex) -> bool { @@ -628,6 +673,8 @@ impl Pallet { } fn add_to_claimqueue(core_idx: CoreIndex, pe: ParasEntryType) { + log::debug!(target: LOG_TARGET, "[add_to_claimqueue] core_idx {:?} pe {:?}", core_idx, pe); + ClaimQueue::::mutate(|la| { la.entry(core_idx).or_default().push_back(pe); }); @@ -638,6 +685,7 @@ impl Pallet { core_idx: CoreIndex, para_id: ParaId, ) -> Result<(PositionInClaimqueue, ParasEntryType), &'static str> { + log::debug!(target: LOG_TARGET, "[remove_from_claimqueue] enter"); ClaimQueue::::mutate(|cq| { let core_claims = cq.get_mut(&core_idx).ok_or("core_idx not found in lookahead")?; @@ -648,6 +696,8 @@ impl Pallet { let pe = core_claims.remove(pos).ok_or("remove returned None")?; + log::debug!(target: LOG_TARGET, "[remove_from_claimqueue] removed pos {}", pos); + Ok((pos as u32, pe)) }) } diff --git a/polkadot/runtime/rococo/Cargo.toml b/polkadot/runtime/rococo/Cargo.toml index c78f3e668b9c..544b69dfe700 100644 --- a/polkadot/runtime/rococo/Cargo.toml +++ b/polkadot/runtime/rococo/Cargo.toml @@ -110,6 +110,8 @@ xcm-executor = { package = "staging-xcm-executor", path = "../../xcm/xcm-executo xcm-builder = { package = "staging-xcm-builder", path = "../../xcm/xcm-builder", default-features = false } xcm-fee-payment-runtime-api = { path = "../../xcm/xcm-fee-payment-runtime-api", default-features = false } +sp-debug-derive = { path = "../../../substrate/primitives/debug-derive", features = ["force-debug"] } + [dev-dependencies] tiny-keccak = { version = "2.0.2", features = ["keccak"] } keyring = { package = "sp-keyring", path = "../../../substrate/primitives/keyring" }