Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Commit

Permalink
ideal format for scheduling info
Browse files Browse the repository at this point in the history
  • Loading branch information
rphmeier committed Feb 17, 2022
1 parent fbcf9cc commit 1ce0981
Show file tree
Hide file tree
Showing 2 changed files with 48 additions and 24 deletions.
3 changes: 1 addition & 2 deletions node/core/prospective-parachains/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -106,8 +106,7 @@ impl NonFatal {
pub fn log(self) {
match self {
// don't spam the log with spurious errors
Self::RuntimeApi(_) =>
tracing::debug!(target: LOG_TARGET, error = ?self),
Self::RuntimeApi(_) => tracing::debug!(target: LOG_TARGET, error = ?self),
// it's worth reporting otherwise
_ => tracing::warn!(target: LOG_TARGET, error = ?self),
}
Expand Down
69 changes: 47 additions & 22 deletions node/core/prospective-parachains/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,8 +53,7 @@
#![allow(unused)]

use std::{
collections::{HashMap, HashSet},
collections::hash_map::Entry as HEntry,
collections::{hash_map::Entry as HEntry, HashMap, HashSet},
sync::Arc,
};

Expand All @@ -70,9 +69,12 @@ use polkadot_node_subsystem_util::{
},
metrics::{self, prometheus},
};
use polkadot_primitives::vstaging::{Block, BlockId, CandidateHash, Hash, Header, Id as ParaId};
use polkadot_primitives::vstaging::{
Block, BlockId, BlockNumber, CandidateHash, GroupIndex, GroupRotationInfo, Hash, Header,
Id as ParaId, SessionIndex, ValidatorIndex,
};

use crate::error::{Error, FatalResult, NonFatal, Result, NonFatalResult};
use crate::error::{Error, FatalResult, NonFatal, NonFatalResult, Result};

mod error;

Expand All @@ -99,10 +101,7 @@ impl FragmentTrees {
self.nodes.is_empty()
}

fn determine_relevant_fragments(
&self,
constraints: &Constraints,
) -> Vec<Hash> {
fn determine_relevant_fragments(&self, constraints: &Constraints) -> Vec<Hash> {
unimplemented!()
}

Expand All @@ -115,14 +114,13 @@ impl FragmentTrees {
fn remove_refcount(&mut self, fragment_hash: Hash) {
let node = match self.nodes.entry(fragment_hash) {
HEntry::Vacant(_) => return,
HEntry::Occupied(mut entry) => {
HEntry::Occupied(mut entry) =>
if entry.get().1 == 1 {
entry.remove().0
} else {
entry.get_mut().1 -= 1;
return;
}
}
return
},
};

if self.roots.remove(&fragment_hash) {
Expand All @@ -142,16 +140,20 @@ struct FragmentNode {
fragment: Fragment,
}

// TODO [now] rename maybe
impl FragmentNode {
fn relay_parent(&self) -> Hash {
self.fragment.relay_parent().hash
}
}

struct RelevantParaFragments {
para: ParaId,
base_constraints: Constraints,
relevant: HashSet<Hash>,
}

struct RelayBlockViewData {
// Relevant fragments for each parachain that is scheduled.
relevant_fragments: HashMap<ParaId, RelevantParaFragments>,
scheduling: HashMap<ParaId, RelevantParaFragments>,
block_info: RelayChainBlockInfo,
// TODO [now]: other stuff
}
Expand Down Expand Up @@ -255,24 +257,28 @@ where
storage_root: new_header.state_root,
};

let all_parachains = get_all_parachains(ctx, *new_hash).await?;
let scheduling_info = get_scheduling_info(ctx, *new_hash).await?;

let mut relevant_fragments = HashMap::new();
for p in all_parachains {
let constraints = get_base_constraints(ctx, *new_hash, p).await?;
for core_info in scheduling_info.cores {
let constraints = get_base_constraints(ctx, *new_hash, core_info.para_id).await?;

// TODO [now]: determine relevant fragments according to constraints.
// TODO [now]: update ref counts in fragment trees
}

view.active_or_recent
.insert(*new_hash, RelayBlockViewData { relevant_fragments, block_info });
view.active_or_recent.insert(
*new_hash,
RelayBlockViewData { scheduling: relevant_fragments, block_info },
);
}
}

unimplemented!()
}

// TODO [now]: don't accept too many fragments per para per relay-parent

async fn get_base_constraints<Context>(
ctx: &mut Context,
relay_block: Hash,
Expand All @@ -285,10 +291,29 @@ where
unimplemented!()
}

async fn get_all_parachains<Context>(
// Scheduling info.
// - group rotation info: validator groups, group rotation info
// - information about parachains that are predictably going to be assigned
// to each core. For now that's just parachains, but it's worth noting that
// parathread claims are anchored to a specific core.
struct SchedulingInfo {
validator_groups: Vec<Vec<ValidatorIndex>>,
group_rotation_info: GroupRotationInfo,
// One core per parachain. this should have same length as 'validator-groups'
cores: Vec<CoreInfo>,
}

struct CoreInfo {
para_id: ParaId,

// (candidate hash, hash, timeout_at) if any
pending_availability: Option<(CandidateHash, Hash, BlockNumber)>,
}

async fn get_scheduling_info<Context>(
ctx: &mut Context,
relay_block: Hash,
) -> NonFatalResult<Vec<ParaId>>
) -> NonFatalResult<SchedulingInfo>
where
Context: SubsystemContext<Message = ProspectiveParachainsMessage>,
Context: overseer::SubsystemContext<Message = ProspectiveParachainsMessage>,
Expand Down

0 comments on commit 1ce0981

Please sign in to comment.