Skip to content
This repository has been archived by the owner on Nov 15, 2023. It is now read-only.

Runtime Task Executor + Example for staking slashing spans #8197

Closed
wants to merge 17 commits into from
Closed
4 changes: 2 additions & 2 deletions frame/staking/src/benchmarking.rs
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ benchmarks! {

// all nominators now should be nominating our validator...
for n in nominator_stashes.iter() {
assert!(Nominators::<T>::get(n).unwrap().targets.contains(&stash));
assert!(Nominators::<T>::get(n).unwrap().targets.iter().map(|(t, _)| t).collect::<Vec<_>>().contains(&&stash));
kianenigma marked this conversation as resolved.
Show resolved Hide resolved
}

// we need the unlookuped version of the nominator stash for the kick.
Expand All @@ -256,7 +256,7 @@ benchmarks! {
verify {
// all nominators now should *not* be nominating our validator...
for n in nominator_stashes.iter() {
assert!(!Nominators::<T>::get(n).unwrap().targets.contains(&stash));
assert!(!Nominators::<T>::get(n).unwrap().targets.iter().map(|(t, _)| t).collect::<Vec<_>>().contains(&&stash));
kianenigma marked this conversation as resolved.
Show resolved Hide resolved
}
}

Expand Down
136 changes: 116 additions & 20 deletions frame/staking/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,8 @@ pub mod offchain_election;
pub mod inflation;
pub mod weights;

mod scheduler;

use sp_std::{
result,
prelude::*,
Expand Down Expand Up @@ -613,7 +615,7 @@ impl<AccountId, Balance> StakingLedger<AccountId, Balance> where
#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)]
pub struct Nominations<AccountId> {
/// The targets of nomination.
pub targets: Vec<AccountId>,
pub targets: Vec<(AccountId, bool)>,
/// The era the nominations were submitted.
///
/// Except for initial nominations which are considered submitted at era 0.
Expand Down Expand Up @@ -910,6 +912,67 @@ impl Default for Releases {
}
}

use frame_support::{CloneNoBound, PartialEqNoBound, EqNoBound, RuntimeDebugNoBound};
#[derive(Encode, Decode, CloneNoBound, PartialEqNoBound, EqNoBound, RuntimeDebugNoBound)]
pub struct ChillTaskFull<T: Config> {
slashed: T::AccountId,
last_key: Vec<u8>,
}

impl<T: Config> Default for ChillTaskFull<T> where T::AccountId: Default {
fn default() -> Self {
Self {
slashed: Default::default(),
last_key: vec![],
}
}
}

impl<T: Config> scheduler::RuntimeTask for ChillTaskFull<T> {
fn execute(mut self, max_weight: Weight) -> (Option<Self>, Weight) {
let weight_per_iteration = <T as frame_system::Config>::DbWeight::get().reads_writes(1, 1);
let max_iterations = max_weight / weight_per_iteration;
let mut iterated: Weight = 0;

while let Some(next_key) = sp_io::storage::next_key(self.last_key.as_ref()) {
if iterated > max_iterations {
break;
}

let mut next_nominations: Nominations<T::AccountId> =
frame_support::storage::unhashed::get(&next_key).unwrap();
// TODO: 4 cases need to be considered: 1- a key is added before the current
// cursor: don't care. 2- a key is removed after the current cursor: don't care.
// 3- a key is added after the current cursor: worth having a test, but no
// biggie. 4- a key is removed after the current cursor: worth having a test,
// but no biggie. 5- the current key is deleted: then the next_key must be
// updated.

next_nominations.targets.iter_mut().for_each(|(target, active)| {
if target == &self.slashed {
*active = false;
}
});

frame_support::storage::unhashed::put(&next_key, &next_nominations);

iterated = iterated.saturating_add(1);
self.last_key = next_key;
}

let consumed_weight = weight_per_iteration.saturating_mul(iterated);
// to avoid any edge cases, we simply check one last time if there is any further keys to
// work on:
let leftover_task = if sp_io::storage::next_key(self.last_key.as_ref()).is_some() {
Some(self)
} else {
None
};

(leftover_task, consumed_weight)
}
}

decl_storage! {
trait Store for Module<T: Config> as Staking {
/// Number of eras to keep in history.
Expand Down Expand Up @@ -1066,6 +1129,9 @@ decl_storage! {
/// The earliest era for which we have a pending, unapplied slash.
EarliestUnappliedSlash: Option<EraIndex>;

/// Task scheduler for chill tasks
NominatorChillTasks: scheduler::RuntimeTaskScheduler<ChillTaskFull<T>>;

/// Snapshot of validators at the beginning of the current election window. This should only
/// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`.
pub SnapshotValidators get(fn snapshot_validators): Option<Vec<T::AccountId>>;
Expand Down Expand Up @@ -1151,6 +1217,40 @@ pub mod migrations {
ErasValidatorPrefs::<T>::translate::<OldValidatorPrefs, _>(|_, _, p| Some(p.upgraded()));
T::BlockWeights::get().max_block
}

#[derive(Decode)]
struct OldNominations<AccountId> {
targets: Vec<AccountId>,
submitted_in: EraIndex,
suppressed: bool,
}

pub fn migrate_to_reversable_slash<T: Config>() -> frame_support::weights::Weight {
Nominators::<T>::translate::<OldNominations<T::AccountId>, _>(|_, nomination| {
let targets = nomination
.targets
.iter()
.map(|target| {
(
target.clone(),
<SlashingSpans<T>>::get(&target).map_or(true, |spans| {
nomination.submitted_in >= spans.last_nonzero_slash()
}),
/* active if no slashing span, or if submitted after the last nonzero
* slash. */
)
})
.collect::<Vec<_>>();
Some(Nominations {
targets,
suppressed: nomination.suppressed,
submitted_in: nomination.submitted_in,
})
// TODO: any reason to keep the submitted_in around? I don't think so.
});

T::BlockWeights::get().max_block
}
}

decl_event!(
Expand Down Expand Up @@ -1360,6 +1460,9 @@ decl_module! {
add_weight(3, 0, 0);
// Additional read from `on_finalize`
add_weight(1, 0, 0);

let _weight = <NominatorChillTasks<T>>::mutate(|task_scheduler| task_scheduler.execute(1000));
kianenigma marked this conversation as resolved.
Show resolved Hide resolved

consumed_weight
}

Expand Down Expand Up @@ -1721,16 +1824,16 @@ decl_module! {
ensure!(!targets.is_empty(), Error::<T>::EmptyTargets);
ensure!(targets.len() <= MAX_NOMINATIONS, Error::<T>::TooManyTargets);

let old = Nominators::<T>::get(stash).map_or_else(Vec::new, |x| x.targets);
let old = Nominators::<T>::get(stash).map_or_else(Vec::new, |x| x.targets.into_iter().map(|(x, _)| x).collect::<Vec<_>>());

let targets = targets.into_iter()
.map(|t| T::Lookup::lookup(t).map_err(DispatchError::from))
.map(|n| n.and_then(|n| if old.contains(&n) || !Validators::<T>::get(&n).blocked {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

same note about allocations here

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure how it can be avoided here?

Ok(n)
Ok((n, true)) // all newly submitted nominations are active again TODO: test for this.
} else {
Err(Error::<T>::BadTarget.into())
}))
.collect::<result::Result<Vec<T::AccountId>, _>>()?;
.collect::<result::Result<Vec<(T::AccountId, bool)>, _>>()?;

let nominations = Nominations {
targets,
Expand Down Expand Up @@ -2247,7 +2350,7 @@ decl_module! {
.into_iter()
{
Nominators::<T>::mutate(&nom_stash, |maybe_nom| if let Some(ref mut nom) = maybe_nom {
if let Some(pos) = nom.targets.iter().position(|v| v == stash) {
if let Some(pos) = nom.targets.iter().position(|(v, _)| v == stash) {
nom.targets.swap_remove(pos);
Self::deposit_event(RawEvent::Kicked(nom_stash.clone(), stash.clone()));
}
Expand Down Expand Up @@ -2659,16 +2762,9 @@ impl<T: Config> Module<T> {
for (t, _) in distribution {
// each target in the provided distribution must be actually nominated by the
// nominator after the last non-zero slash.
if nomination.targets.iter().find(|&tt| tt == t).is_none() {
if nomination.targets.iter().find(|&(tt, _)| tt == t).map_or(false, |(_, active)| *active) {
return Err(Error::<T>::OffchainElectionBogusNomination.into());
}

if <Self as Store>::SlashingSpans::get(&t).map_or(
false,
|spans| nomination.submitted_in < spans.last_nonzero_slash(),
) {
return Err(Error::<T>::OffchainElectionSlashedNomination.into());
}
}
} else {
// a self vote
Expand Down Expand Up @@ -3003,16 +3099,15 @@ impl<T: Config> Module<T> {
}

let nominator_votes = <Nominators<T>>::iter().map(|(nominator, nominations)| {
let Nominations { submitted_in, mut targets, suppressed: _ } = nominations;
let Nominations { targets: targets_unfiltered, suppressed: _, submitted_in: _ } =
nominations;

// Filter out nomination targets which were nominated before the most recent
// slashing span.
targets.retain(|stash| {
<Self as Store>::SlashingSpans::get(&stash).map_or(
true,
|spans| submitted_in >= spans.last_nonzero_slash(),
)
});
let targets = targets_unfiltered
.into_iter()
.filter_map(|(stash, active)| if active { Some(stash) } else { None })
.collect::<Vec<_>>();

(nominator, targets)
});
Expand Down Expand Up @@ -3373,6 +3468,7 @@ for Module<T> where
continue
}

// TODO: add a task to `NominatorChillTasks`.
let unapplied = slashing::compute_slash::<T>(slashing::SlashParams {
stash,
slash: *slash_fraction,
Expand Down
4 changes: 2 additions & 2 deletions frame/staking/src/mock.rs
Original file line number Diff line number Diff line change
Expand Up @@ -782,7 +782,7 @@ pub(crate) fn horrible_npos_solution(

// add nominator stuff
<Nominators<Test>>::iter().for_each(|(who, nomination)| {
nomination.targets.iter().for_each(|v| {
nomination.targets.iter().for_each(|(v, _)| {
*backing_stake_of.entry(*v).or_insert(Zero::zero()) +=
Staking::slashable_balance_of(&who)
})
Expand All @@ -801,7 +801,7 @@ pub(crate) fn horrible_npos_solution(
let mut staked_assignment: Vec<StakedAssignment<AccountId>> = Vec::new();
<Nominators<Test>>::iter().for_each(|(who, nomination)| {
let mut dist: Vec<(AccountId, ExtendedBalance)> = Vec::new();
nomination.targets.iter().for_each(|v| {
nomination.targets.iter().for_each(|(v, _)| {
if winners.iter().find(|w| *w == v).is_some() {
dist.push((*v, ExtendedBalance::zero()));
}
Expand Down
Loading