From 34a8194bfa76ceaa154c9f72b0dd06631fecabbd Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 17 Jun 2020 09:59:11 +0200 Subject: [PATCH 1/8] add normalize --- frame/staking/src/offchain_election.rs | 3 +- frame/staking/src/testing_utils.rs | 7 +- primitives/arithmetic/fuzzer/Cargo.toml | 4 + primitives/arithmetic/fuzzer/src/normalize.rs | 63 +++ .../fuzzer/src/per_thing_rational.rs | 2 +- primitives/arithmetic/src/lib.rs | 310 ++++++++++++++- primitives/arithmetic/src/per_things.rs | 16 +- primitives/npos-elections/benches/phragmen.rs | 4 +- primitives/npos-elections/src/helpers.rs | 57 ++- primitives/npos-elections/src/lib.rs | 120 +++--- primitives/npos-elections/src/tests.rs | 366 +++++++++++------- primitives/runtime/src/lib.rs | 2 +- test-utils/src/lib.rs | 2 +- 13 files changed, 739 insertions(+), 217 deletions(-) create mode 100644 primitives/arithmetic/fuzzer/src/normalize.rs diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs index 23453e0524aa4..79f3a5c2d94fe 100644 --- a/frame/staking/src/offchain_election.rs +++ b/frame/staking/src/offchain_election.rs @@ -203,7 +203,8 @@ pub fn prepare_submission( } // Convert back to ratio assignment. This takes less space. - let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio(staked); + let low_accuracy_assignment = sp_npos_elections::assignment_staked_to_ratio_normalized(staked) + .map_err(|e| OffchainElectionError::from(e))?; // convert back to staked to compute the score in the receiver's accuracy. This can be done // nicer, for now we do it as such since this code is not time-critical. This ensure that the diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 86d137ac30aba..a73073bb1fcb9 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -201,11 +201,8 @@ pub fn get_weak_solution( }; // convert back to ratio assignment. This takes less space. - let low_accuracy_assignment: Vec> = - staked_assignments - .into_iter() - .map(|sa| sa.into_assignment(true)) - .collect(); + let low_accuracy_assignment = assignment_staked_to_ratio_normalized(staked_assignments) + .expect("Failed to normalize"); // re-calculate score based on what the chain will decode. let score = { diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index a37ab876ef7f1..b6bbe3d8a6769 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -24,6 +24,10 @@ num-traits = "0.2" name = "biguint" path = "src/biguint.rs" +[[bin]] +name = "normalize" +path = "src/normalize.rs" + [[bin]] name = "per_thing_rational" path = "src/per_thing_rational.rs" diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs new file mode 100644 index 0000000000000..079cc51f22f8e --- /dev/null +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -0,0 +1,63 @@ +// This file is part of Substrate. + +// Copyright (C) 2020 Parity Technologies (UK) Ltd. +// SPDX-License-Identifier: Apache-2.0 + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run normalize`. `honggfuzz` CLI options can +//! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug normalize hfuzz_workspace/normalize/*.fuzz`. + +use honggfuzz::fuzz; +use sp_arithmetic::Normalizable; +use std::convert::TryInto; + +fn main() { + let sum_limit = u32::max_value() as u128; + let len_limit: usize = u32::max_value().try_into().unwrap(); + + loop { + fuzz!(|data: (Vec, u32)| { + let (data, norm) = data; + let pre_sum = data.iter().fold(0u128, |acc, x| acc + *x as u128); + + let normalized = data.normalize(norm); + // error cases. + if pre_sum > sum_limit || data.len() > len_limit { + assert!(normalized.is_err()) + } else { + if let Ok(normalized) = normalized { + // if this function returns Ok(), then it will ALWAYS be accurate. + if data.len() > 0 { + + // if sum goes beyond u128, panic. + let sum = normalized.iter().fold(0u128, |acc, x| acc + *x as u128); + assert_eq!( + sum, + norm as u128, + "sums don't match {:?}, {}", + normalized, + norm, + ); + } + } + } + }) + } +} diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index 0820a35100aee..fc22eacc9e499 100644 --- a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -114,7 +114,7 @@ fn main() { } } -fn assert_per_thing_equal_error(a: T, b: T, err: u128) { +fn assert_per_thing_equal_error(a: P, b: P, err: u128) { let a_abs = a.deconstruct().saturated_into::(); let b_abs = b.deconstruct().saturated_into::(); let diff = a_abs.max(b_abs) - a_abs.min(b_abs); diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 9fdfe4b5e1557..a551bc4c31dd8 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -41,10 +41,11 @@ mod fixed_point; mod rational128; pub use fixed_point::{FixedPointNumber, FixedPointOperand, FixedI64, FixedI128, FixedU128}; -pub use per_things::{PerThing, InnerOf, Percent, PerU16, Permill, Perbill, Perquintill}; +pub use per_things::{PerThing, InnerOf, UpperOf, Percent, PerU16, Permill, Perbill, Perquintill}; pub use rational128::Rational128; -use sp_std::cmp::Ordering; +use sp_std::{prelude::*, cmp::Ordering, fmt::Debug, convert::TryInto}; +use traits::{BaseArithmetic, One, Zero}; /// Trait for comparing two numbers with an threshold. /// @@ -85,8 +86,311 @@ where } } +/// A collection-like object that is made of values of type `T` and can normalize its individual +/// values around a centric point. +pub trait Normalizable { + /// Normalize self around `t_max`. + /// + /// Only returns `Ok` if the new sum of results is guaranteed to be equal to `t_max`. Else, + /// returns an error explaining why it failed to do so. + fn normalize(&self, t_max: T) -> Result, &'static str>; +} + +impl Normalizable for Vec { + fn normalize(&self, t_max: T) -> Result, &'static str> { + normalize(self.as_ref(), t_max) + } +} + +/// Normalize `input` so that the sum of all elements reaches `t_max`. +/// +/// This implementation is currently in a balanced position between being performant and accurate. +/// +/// 1. We prefer storing original indices, and sorting the `input` only once. This will save the +/// cost of sorting per round at the cost of a little bit of memory. +/// 2. The granularity of increment/decrements is determined by the difference and the number of +/// elements in `input` and their sum difference with `t_max`. In short, the implementation is +/// very likely to over-increment/decrement in case the `diff / input.len()` is rather small. +/// +/// This function can return an error is if `T` cannot be built from the size of `input`, or if +/// `sum(input)` cannot fit inside `T`. Moreover, if any of the internal operations saturate, it +/// will also return en `Err`. +/// +/// Based on the above, the best use case of the function will be only to correct small rounding +/// errors. If the difference of the elements is vert large, then the subtraction/addition of one +/// element with `diff / input.len()` might easily saturate and results will be `Err`. +pub fn normalize(input: &[T], t_max: T) -> Result, &'static str> + where T: Clone + Copy + Ord + BaseArithmetic + Debug, +{ + let mut sum = T::zero(); + for t in input.iter() { + sum = sum.checked_add(t).ok_or("sum of input cannot fit in `T`")?; + } + let count = input.len(); + + // early exit if needed. + if count.is_zero() { + return Ok(Vec::::new()); + } + + let diff = t_max.max(sum) - t_max.min(sum); + if diff.is_zero() { + return Ok(input.to_vec()); + } + + let needs_bump = t_max > sum; + let per_round = diff / count.try_into().map_err(|_| "failed to build T from usize")?; + let mut leftover = diff % count.try_into().map_err(|_| "failed to build T from usize")?; + + let mut output_with_idx = input.iter().cloned().enumerate().collect::>(); + // sort output once based on diff. This will require more data transfer and saving original + // index, but we sort only twice instead: once now and once at the very end. + output_with_idx.sort_unstable_by_key(|x| x.1); + + if needs_bump { + // must increase the values a bit. Bump from the min element. Index of minimum is now zero + // because we did a sort. If at any point the min goes greater or equal the `max_threshold`, + // we move to the next minimum. + let mut min_index = 0; + // at this threshold we move to next index. + let threshold = output_with_idx + .last() + .expect("length of input is greater than zero; it must have a last; qed").1; + + if !per_round.is_zero() { + for _ in 0..count { + output_with_idx[min_index].1 = output_with_idx[min_index].1 + .checked_add(&per_round) + .ok_or("Failed to add.")?; + if output_with_idx[min_index].1 >= threshold { + min_index += 1; + min_index = min_index % count; + } + } + } + + // continue with the previous min_index + while !leftover.is_zero() { + output_with_idx[min_index].1 = output_with_idx[min_index].1 + .checked_add(&One::one()) + .ok_or("Failed to add.")?; + if output_with_idx[min_index].1 >= threshold { + min_index += 1; + min_index = min_index % count; + } + leftover = leftover.saturating_sub(One::one()); + } + } else { + // must decrease the stakes a bit. decrement from the max element. index of maximum is now + // last. if at any point the max goes less or equal the `min_threshold`, we move to the next + // maximum. + let mut max_index = count - 1; + // at this threshold we move to next index. + let threshold = output_with_idx + .first() + .expect("length of input is greater than zero; it must have a fist; qed").1; + + if !per_round.is_zero() { + for _ in 0..count { + output_with_idx[max_index].1 = output_with_idx[max_index].1 + .checked_sub(&per_round) + .ok_or("Failed to subtract.")?; + if output_with_idx[max_index].1 <= threshold { + max_index = max_index.checked_sub(1).unwrap_or(count - 1); + } + } + } + + // continue with the previous max_index + while !leftover.is_zero() { + output_with_idx[max_index].1 = output_with_idx[max_index].1 + .checked_sub(&One::one()) + .ok_or("Failed to subtract.")?; + if output_with_idx[max_index].1 <= threshold { + max_index = max_index.checked_sub(1).unwrap_or(count - 1); + } + leftover = leftover.saturating_sub(One::one()); + } + } + + debug_assert_eq!( + output_with_idx.iter().fold(T::zero(), |acc, (_, x)| acc + *x), + t_max, + "sum({:?}) != {:?}", + output_with_idx, + t_max, + ); + + // sort again based on the original index. + output_with_idx.sort_unstable_by_key(|x| x.0); + Ok(output_with_idx.into_iter().map(|(_, t)| t).collect()) +} + +#[cfg(test)] +mod normalize_tests { + use super::*; + + #[test] + fn work_for_all_types() { + macro_rules! test_for { + ($type:ty) => { + assert_eq!( + normalize(vec![8 as $type, 9, 7, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + } + } + // it should work for all types as long as the length of vector can be converted to T. + test_for!(u128); + test_for!(u64); + test_for!(u32); + test_for!(u16); + test_for!(u8); + } + + #[test] + fn fails_on_large_input() { + assert!(normalize(vec![1u8; 255].as_ref(), 10).is_ok()); + assert!(normalize(vec![10u8; 256].as_ref(), 10).is_err()); + } + + #[test] + fn works_for_vec() { + use super::Normalizable; + assert_eq!(vec![8u32, 9, 7, 10].normalize(40).unwrap(), vec![10u32, 10, 10, 10]); + } + + #[test] + fn works_for_per_thing() { + assert_eq!( + vec![ + Perbill::from_percent(33), + Perbill::from_percent(33), + Perbill::from_percent(33) + ].normalize(Perbill::one()).unwrap(), + vec![ + Perbill::from_parts(333333334), + Perbill::from_parts(333333333), + Perbill::from_parts(333333333), + ] + ); + + assert_eq!( + vec![ + Perbill::from_percent(20), + Perbill::from_percent(15), + Perbill::from_percent(30) + ].normalize(Perbill::one()).unwrap(), + vec![ + Perbill::from_parts(316666666), + Perbill::from_parts(383333333), + Perbill::from_parts(300000001), + ] + ); + } + + #[test] + fn can_work_for_peru16() { + // Peru16 is a rather special case; since inner type is exactly the same as capacity, we + // could have a situation where the sum cannot be calculated in the inner type. Calculating + // using the upper type of the per_thing should assure this to be okay. + assert_eq!( + vec![ + PerU16::from_percent(40), + PerU16::from_percent(40), + PerU16::from_percent(40), + ].normalize(PerU16::one()).unwrap(), + vec![ + PerU16::from_parts(21845), // 33% + PerU16::from_parts(21845), // 33% + PerU16::from_parts(21845), // 33% + ] + ); + } + + #[test] + fn normalize_works_all_le() { + assert_eq!( + normalize(vec![8u32, 9, 7, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![7u32, 7, 7, 7].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![7u32, 7, 7, 10].as_ref(), 40).unwrap(), + vec![11, 11, 8, 10], + ); + + assert_eq!( + normalize(vec![7u32, 8, 7, 10].as_ref(), 40).unwrap(), + vec![11, 8, 11, 10], + ); + + assert_eq!( + normalize(vec![7u32, 7, 8, 10].as_ref(), 40).unwrap(), + vec![11, 11, 8, 10], + ); + } + + #[test] + fn normalize_works_some_ge() { + assert_eq!( + normalize(vec![8u32, 11, 9, 10].as_ref(), 40).unwrap(), + vec![10, 11, 9, 10], + ); + } + + #[test] + fn always_inc_min() { + assert_eq!( + normalize(vec![10u32, 7, 10, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + assert_eq!( + normalize(vec![10u32, 10, 7, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + assert_eq!( + normalize(vec![10u32, 10, 10, 7].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + } + + #[test] + fn normalize_works_all_ge() { + assert_eq!( + normalize(vec![12u32, 11, 13, 10].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![13u32, 13, 13, 13].as_ref(), 40).unwrap(), + vec![10, 10, 10, 10], + ); + + assert_eq!( + normalize(vec![13u32, 13, 13, 10].as_ref(), 40).unwrap(), + vec![12, 9, 9, 10], + ); + + assert_eq!( + normalize(vec![13u32, 12, 13, 10].as_ref(), 40).unwrap(), + vec![9, 12, 9, 10], + ); + + assert_eq!( + normalize(vec![13u32, 13, 12, 10].as_ref(), 40).unwrap(), + vec![9, 9, 12, 10], + ); + } +} + #[cfg(test)] -mod tests { +mod threshold_compare_tests { use super::*; use crate::traits::Saturating; use sp_std::cmp::Ordering; diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 50b87d5076e9e..b6eb9be3cfe7e 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -23,11 +23,15 @@ use codec::{Encode, CompactAs}; use crate::traits::{ SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, }; +use crate::{Normalizable, normalize}; use sp_debug_derive::RuntimeDebug; /// Get the inner type of a `PerThing`. pub type InnerOf

=

::Inner; +/// Get the upper type of a `PerThing`. +pub type UpperOf

=

::Upper; + /// Something that implements a fixed point ration with an arbitrary granularity `X`, as _parts per /// `X`_. pub trait PerThing: @@ -38,7 +42,9 @@ pub trait PerThing: /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. /// It must be able to compute `ACCURACY^2`. - type Upper: BaseArithmetic + Copy + From + TryInto + fmt::Debug; + type Upper: + BaseArithmetic + Copy + From + TryInto + + UniqueSaturatedInto + fmt::Debug; /// The accuracy of this type. const ACCURACY: Self::Inner; @@ -588,6 +594,14 @@ macro_rules! implement_per_thing { } } + impl Normalizable<$name> for Vec<$name> { + fn normalize(&self, t_max: $name) -> Result, &'static str> { + let inner = self.iter().map(|p| p.clone().deconstruct() as $upper_type).collect::>(); + let normalized = normalize(inner.as_ref(), t_max.deconstruct() as $upper_type)?; + Ok(normalized.into_iter().map(|i| <$name>::from_parts(i.saturated_into())).collect()) + } + } + #[cfg(test)] mod $test_mod { use codec::{Encode, Decode}; diff --git a/primitives/npos-elections/benches/phragmen.rs b/primitives/npos-elections/benches/phragmen.rs index 7e46b9dce1d0b..e2385665bf065 100644 --- a/primitives/npos-elections/benches/phragmen.rs +++ b/primitives/npos-elections/benches/phragmen.rs @@ -59,8 +59,8 @@ mod bench_closure_and_slice { } /// Converts a vector of ratio assignments into ones with absolute budget value. - pub fn assignment_ratio_to_staked_slice( - ratio: Vec>, + pub fn assignment_ratio_to_staked_slice( + ratio: Vec>, stakes: &[VoteWeight], ) -> Vec> where diff --git a/primitives/npos-elections/src/helpers.rs b/primitives/npos-elections/src/helpers.rs index 1c96300c66224..063eac70c57fd 100644 --- a/primitives/npos-elections/src/helpers.rs +++ b/primitives/npos-elections/src/helpers.rs @@ -17,37 +17,72 @@ //! Helper methods for npos-elections. -use crate::{Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf}; -use sp_arithmetic::PerThing; +use crate::{Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf, Error}; +use sp_arithmetic::{PerThing, InnerOf}; use sp_std::prelude::*; /// Converts a vector of ratio assignments into ones with absolute budget value. -pub fn assignment_ratio_to_staked( - ratio: Vec>, +/// +/// Note that this will NOT attempt at normalizing the result. +pub fn assignment_ratio_to_staked( + ratio: Vec>, stake_of: FS, ) -> Vec> where for<'r> FS: Fn(&'r A) -> VoteWeight, - T: sp_std::ops::Mul, - ExtendedBalance: From<::Inner>, + P: sp_std::ops::Mul, + ExtendedBalance: From>, { ratio .into_iter() .map(|a| { let stake = stake_of(&a.who); - a.into_staked(stake.into(), true) + a.into_staked(stake.into()) }) .collect() } +/// Same as [`assignment_ratio_to_staked`] and try and do normalization. +pub fn assignment_ratio_to_staked_normalized( + ratio: Vec>, + stake_of: FS, +) -> Result>, Error> +where + for<'r> FS: Fn(&'r A) -> VoteWeight, + P: sp_std::ops::Mul, + ExtendedBalance: From>, +{ + let mut staked = assignment_ratio_to_staked(ratio, &stake_of); + staked.iter_mut().map(|a| + a.try_normalize(stake_of(&a.who).into()).map_err(|err| Error::ArithmeticError(err)) + ).collect::>()?; + Ok(staked) +} + /// Converts a vector of staked assignments into ones with ratio values. -pub fn assignment_staked_to_ratio( +/// +/// Note that this will NOT attempt at normalizing the result. +pub fn assignment_staked_to_ratio( + staked: Vec>, +) -> Vec> +where + ExtendedBalance: From>, +{ + staked.into_iter().map(|a| a.into_assignment()).collect() +} + +/// Same as [`assignment_staked_to_ratio`] and try and do normalization. +pub fn assignment_staked_to_ratio_normalized( staked: Vec>, -) -> Vec> +) -> Result>, Error> where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From>, { - staked.into_iter().map(|a| a.into_assignment(true)).collect() + let mut ratio = staked.into_iter().map(|a| a.into_assignment()).collect::>(); + ratio.iter_mut().map(|a| + a.try_normalize().map_err(|err| Error::ArithmeticError(err)) + ).collect::>()?; + Ok(ratio) } /// consumes a vector of winners with backing stake to just winners. diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 72eddf9a1d20c..61487157a3fba 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -30,7 +30,7 @@ use sp_std::{prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, convert::TryFrom}; use sp_arithmetic::{ - PerThing, Rational128, ThresholdOrd, + PerThing, Rational128, ThresholdOrd, InnerOf, UpperOf, Normalizable, helpers_128bit::multiply_by_rational, traits::{Zero, Saturating, Bounded, SaturatedConversion}, }; @@ -84,6 +84,8 @@ pub enum Error { CompactTargetOverflow, /// One of the index functions returned none. CompactInvalidIndex, + /// An error occurred in some arithmetic operation. + ArithmeticError(&'static str), } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the @@ -155,16 +157,16 @@ pub struct ElectionResult { /// A voter's stake assignment among a set of targets, represented as ratios. #[derive(Debug, Clone, Default)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] -pub struct Assignment { +pub struct Assignment { /// Voter's identifier. pub who: AccountId, /// The distribution of the voter's stake. - pub distribution: Vec<(AccountId, T)>, + pub distribution: Vec<(AccountId, P)>, } -impl Assignment +impl Assignment where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From>, { /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. /// @@ -173,50 +175,59 @@ where /// distribution's sum is exactly equal to the total budget, by adding or subtracting the /// remainder from the last distribution. /// - /// If an edge ratio is [`Bounded::max_value()`], it is dropped. This edge can never mean + /// If an edge ratio is [`Bounded::min_value()`], it is dropped. This edge can never mean /// anything useful. - pub fn into_staked(self, stake: ExtendedBalance, fill: bool) -> StakedAssignment + pub fn into_staked(self, stake: ExtendedBalance) -> StakedAssignment where - T: sp_std::ops::Mul, + P: sp_std::ops::Mul, { - let mut sum: ExtendedBalance = Bounded::min_value(); - let mut distribution = self - .distribution + let distribution = self.distribution .into_iter() .filter_map(|(target, p)| { // if this ratio is zero, then skip it. - if p == Bounded::min_value() { + if p.is_zero() { None } else { // NOTE: this mul impl will always round to the nearest number, so we might both // overflow and underflow. let distribution_stake = p * stake; - // defensive only. We assume that balance cannot exceed extended balance. - sum = sum.saturating_add(distribution_stake); Some((target, distribution_stake)) } }) .collect::>(); - if fill { - // NOTE: we can do this better. - // https://revs.runtime-revolution.com/getting-100-with-rounded-percentages-273ffa70252b - if let Some(leftover) = stake.checked_sub(sum) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_add(leftover); - } - } else if let Some(excess) = sum.checked_sub(stake) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_sub(excess); - } - } - } - StakedAssignment { who: self.who, distribution, } } + + /// Try and normalize this assignment. + /// + /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to 100%. + pub fn try_normalize(&mut self) -> Result<(), &'static str> { + // NOTE: sadly we cannot use the impl for Vec here. Nonetheless, we use the same + // technique to prevent errors, do the calculation in the upper type. + let inners = self.distribution + .iter() + .map(|(_, p)| p) + .cloned() + .map(|p| p.deconstruct().into()) + .collect::>>(); + + let center: UpperOf

= P::one().deconstruct().into(); + inners.normalize(center) + .map(|corrected| + corrected.into_iter().map(|i| + P::from_parts(i.saturated_into()) + ).collect::>() + ) + .map(|corrected| + for ((_, ratio), normalized) in self.distribution.iter_mut().zip(corrected.into_iter()) { + *ratio = normalized; + } + ) + } } /// A voter's stake assignment among a set of targets, represented as absolute values in the scale @@ -243,42 +254,23 @@ impl StakedAssignment { /// /// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge /// can never be re-created and does not mean anything useful anymore. - pub fn into_assignment(self, fill: bool) -> Assignment + pub fn into_assignment(self) -> Assignment where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From>, + AccountId: IdentifierT, { - let accuracy: u128 = T::ACCURACY.saturated_into(); - let mut sum: u128 = Zero::zero(); - let stake = self.distribution.iter().map(|x| x.1).sum(); - let mut distribution = self - .distribution + let stake = self.total(); + let distribution = self.distribution .into_iter() .filter_map(|(target, w)| { - let per_thing = T::from_rational_approximation(w, stake); + let per_thing = P::from_rational_approximation(w, stake); if per_thing == Bounded::min_value() { None } else { - sum += per_thing.clone().deconstruct().saturated_into(); Some((target, per_thing)) } }) - .collect::>(); - - if fill { - if let Some(leftover) = accuracy.checked_sub(sum) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_add( - T::from_parts(leftover.saturated_into()) - ); - } - } else if let Some(excess) = sum.checked_sub(accuracy) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_sub( - T::from_parts(excess.saturated_into()) - ); - } - } - } + .collect::>(); Assignment { who: self.who, @@ -286,6 +278,26 @@ impl StakedAssignment { } } + /// Try and normalize this assignment. + /// + /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to + /// `stake`. + pub fn try_normalize(&mut self, stake: ExtendedBalance) -> Result<(), &'static str> { + self.distribution + .iter() + .map(|(_, ref weight)| *weight) + .collect::>() + .normalize(stake) + .map(|normalized_weights| + self.distribution + .iter_mut() + .zip(normalized_weights.into_iter()) + .for_each(|((_, weight), corrected)| { + *weight = corrected; + }) + ) + } + /// Get the total stake of this assignment (aka voter budget). pub fn total(&self) -> ExtendedBalance { self.distribution.iter().fold(Zero::zero(), |a, b| a.saturating_add(b.1)) diff --git a/primitives/npos-elections/src/tests.rs b/primitives/npos-elections/src/tests.rs index 47d619339be5d..91c3eb86b9336 100644 --- a/primitives/npos-elections/src/tests.rs +++ b/primitives/npos-elections/src/tests.rs @@ -590,184 +590,276 @@ fn self_votes_should_be_kept() { ); } -#[test] -fn assignment_convert_works() { - let staked = StakedAssignment { - who: 1 as AccountId, - distribution: vec![ - (20, 100 as ExtendedBalance), - (30, 25), - ], - }; - - let assignment = staked.clone().into_assignment(true); - assert_eq!( - assignment, - Assignment { - who: 1, +mod assignment_convert_normalize { + use super::*; + #[test] + fn assignment_convert_works() { + let staked = StakedAssignment { + who: 1 as AccountId, distribution: vec![ - (20, Perbill::from_percent(80)), - (30, Perbill::from_percent(20)), - ] - } - ); - - assert_eq!( - assignment.into_staked(125, true), - staked, - ); -} - -#[test] -fn score_comparison_is_lexicographical_no_epsilon() { - let epsilon = Perbill::zero(); - // only better in the fist parameter, worse in the other two ✅ - assert_eq!( - is_score_better([12, 10, 35], [10, 20, 30], epsilon), - true, - ); - - // worse in the first, better in the other two ❌ - assert_eq!( - is_score_better([9, 30, 10], [10, 20, 30], epsilon), - false, - ); - - // equal in the first, the second one dictates. - assert_eq!( - is_score_better([10, 25, 40], [10, 20, 30], epsilon), - true, - ); - - // equal in the first two, the last one dictates. - assert_eq!( - is_score_better([10, 20, 40], [10, 20, 30], epsilon), - false, - ); -} + (20, 100 as ExtendedBalance), + (30, 25), + ], + }; -#[test] -fn score_comparison_with_epsilon() { - let epsilon = Perbill::from_percent(1); + let assignment = staked.clone().into_assignment(); + assert_eq!( + assignment, + Assignment { + who: 1, + distribution: vec![ + (20, Perbill::from_percent(80)), + (30, Perbill::from_percent(20)), + ] + } + ); - { - // no more than 1 percent (10) better in the first param. assert_eq!( - is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), - false, + assignment.into_staked(125), + staked, ); + } - // now equal, still not better. + #[test] + fn assignment_convert_will_not_normalize() { assert_eq!( - is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), - false, + Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_percent(33)), + (3, Perbill::from_percent(66)), + ] + }.into_staked(100), + StakedAssignment { + who: 1, + distribution: vec![ + (2, 33), + (3, 66), + // sum is not 100! + ], + }, ); - // now it is. assert_eq!( - is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), - true, + StakedAssignment { + who: 1, + distribution: vec![ + (2, 333_333_333_333_333), + (3, 333_333_333_333_333), + (4, 666_666_666_666_333), + ], + }.into_assignment(), + Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_parts(250000000)), + (3, Perbill::from_parts(250000000)), + (4, Perbill::from_parts(499999999)), + // sum is not 100%! + ] + }, + ) + } + + #[test] + fn assignment_can_normalize() { + let mut a = Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_parts(330000000)), + (3, Perbill::from_parts(660000000)), + // sum is not 100%! + ] + }; + a.try_normalize().unwrap(); + assert_eq!( + a, + Assignment { + who: 1, + distribution: vec![ + (2, Perbill::from_parts(340000000)), + (3, Perbill::from_parts(660000000)), + ] + }, ); } - { - // First score score is epsilon better, but first score is no longer `ge`. Then this is - // still not a good solution. + #[test] + fn staked_assignment_can_normalize() { + let mut a = StakedAssignment { + who: 1, + distribution: vec![ + (2, 33), + (3, 66), + ] + }; + a.try_normalize(100).unwrap(); assert_eq!( - is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), - false, + a, + StakedAssignment { + who: 1, + distribution: vec![ + (2, 34), + (3, 66), + ] + }, ); } +} - { - // first score is equal or better, but not epsilon. Then second one is the determinant. +mod score { + use super::*; + #[test] + fn score_comparison_is_lexicographical_no_epsilon() { + let epsilon = Perbill::zero(); + // only better in the fist parameter, worse in the other two ✅ assert_eq!( - is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), - false, + is_score_better([12, 10, 35], [10, 20, 30], epsilon), + true, ); + // worse in the first, better in the other two ❌ assert_eq!( - is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), + is_score_better([9, 30, 10], [10, 20, 30], epsilon), false, ); + // equal in the first, the second one dictates. assert_eq!( - is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), + is_score_better([10, 25, 40], [10, 20, 30], epsilon), true, ); - } - { - // first score and second are equal or less than epsilon more, third is determinant. + // equal in the first two, the last one dictates. assert_eq!( - is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), + is_score_better([10, 20, 40], [10, 20, 30], epsilon), false, ); + } + + #[test] + fn score_comparison_with_epsilon() { + let epsilon = Perbill::from_percent(1); + + { + // no more than 1 percent (10) better in the first param. + assert_eq!( + is_score_better([1009, 5000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + // now equal, still not better. + assert_eq!( + is_score_better([1010, 5000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + // now it is. + assert_eq!( + is_score_better([1011, 5000, 100000], [1000, 5000, 100000], epsilon), + true, + ); + } + + { + // First score score is epsilon better, but first score is no longer `ge`. Then this is + // still not a good solution. + assert_eq!( + is_score_better([999, 6000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + } + + { + // first score is equal or better, but not epsilon. Then second one is the determinant. + assert_eq!( + is_score_better([1005, 5000, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5050, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5051, 100000], [1000, 5000, 100000], epsilon), + true, + ); + } + + { + // first score and second are equal or less than epsilon more, third is determinant. + assert_eq!( + is_score_better([1005, 5025, 100000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), + false, + ); + + assert_eq!( + is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), + true, + ); + } + } + + #[test] + fn score_comparison_large_value() { + // some random value taken from eras in kusama. + let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; + // this claim is 0.04090% better in the third component. It should be accepted as better if + // epsilon is smaller than 5/10_0000 + let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; assert_eq!( - is_score_better([1005, 5025, 99_000], [1000, 5000, 100000], epsilon), - false, + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(1u32, 10_000), + ), + true, ); assert_eq!( - is_score_better([1005, 5025, 98_999], [1000, 5000, 100000], epsilon), + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(2u32, 10_000), + ), true, ); - } -} - -#[test] -fn score_comparison_large_value() { - // some random value taken from eras in kusama. - let initial = [12488167277027543u128, 5559266368032409496, 118749283262079244270992278287436446]; - // this claim is 0.04090% better in the third component. It should be accepted as better if - // epsilon is smaller than 5/10_0000 - let claim = [12488167277027543u128, 5559266368032409496, 118700736389524721358337889258988054]; - - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(1u32, 10_000), - ), - true, - ); - - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(2u32, 10_000), - ), - true, - ); - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(3u32, 10_000), - ), - true, - ); + assert_eq!( + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(3u32, 10_000), + ), + true, + ); - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(4u32, 10_000), - ), - true, - ); + assert_eq!( + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(4u32, 10_000), + ), + true, + ); - assert_eq!( - is_score_better( - claim.clone(), - initial.clone(), - Perbill::from_rational_approximation(5u32, 10_000), - ), - false, - ); + assert_eq!( + is_score_better( + claim.clone(), + initial.clone(), + Perbill::from_rational_approximation(5u32, 10_000), + ), + false, + ); + } } mod compact { diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index a8a518fd7b692..881ba3d724d82 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -71,7 +71,7 @@ pub use sp_core::RuntimeDebug; /// Re-export top-level arithmetic stuff. pub use sp_arithmetic::{ - PerThing, traits::SaturatedConversion, Perquintill, Perbill, Permill, Percent, PerU16, + PerThing, traits::SaturatedConversion, Perquintill, Perbill, Permill, Percent, PerU16, InnerOf, Rational128, FixedI64, FixedI128, FixedU128, FixedPointNumber, FixedPointOperand, }; /// Re-export 128 bit helpers. diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index e600ab9fce926..8163460df7427 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -38,7 +38,7 @@ /// ``` #[macro_export] macro_rules! assert_eq_uvec { - ( $x:expr, $y:expr ) => { + ( $x:expr, $y:expr $(,)? ) => { $crate::__assert_eq_uvec!($x, $y); $crate::__assert_eq_uvec!($y, $x); } From 12d5f52fb4a20caa1d990833107943c21794604d Mon Sep 17 00:00:00 2001 From: kianenigma Date: Mon, 22 Jun 2020 10:29:45 +0200 Subject: [PATCH 2/8] better api for normalize --- frame/staking/fuzzer/src/submit_solution.rs | 17 ++++++----- primitives/arithmetic/src/lib.rs | 26 +++++++++++++--- primitives/arithmetic/src/per_things.rs | 9 ------ primitives/npos-elections/src/lib.rs | 34 +++++++-------------- 4 files changed, 42 insertions(+), 44 deletions(-) diff --git a/frame/staking/fuzzer/src/submit_solution.rs b/frame/staking/fuzzer/src/submit_solution.rs index 7094c7ed888b2..7293cf23890a5 100644 --- a/frame/staking/fuzzer/src/submit_solution.rs +++ b/frame/staking/fuzzer/src/submit_solution.rs @@ -44,7 +44,9 @@ enum Mode { } pub fn new_test_ext(iterations: u32) -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default().build_storage::().map(Into::into) + let mut ext: sp_io::TestExternalities = frame_system::GenesisConfig::default() + .build_storage::() + .map(Into::into) .expect("Failed to create test externalities."); let (offchain, offchain_state) = TestOffchainExt::new(); @@ -70,26 +72,29 @@ fn main() { loop { fuzz!(|data: (u32, u32, u32, u32, u32)| { let (mut num_validators, mut num_nominators, mut edge_per_voter, mut to_elect, mode_u32) = data; + // always run with 5 iterations. let mut ext = new_test_ext(5); let mode: Mode = unsafe { std::mem::transmute(mode_u32) }; num_validators = to_range(num_validators, 50, 1000); num_nominators = to_range(num_nominators, 50, 2000); edge_per_voter = to_range(edge_per_voter, 1, 16); to_elect = to_range(to_elect, 20, num_validators); + let do_reduce = true; - println!("+++ instance with params {} / {} / {} / {:?}({}) / {}", + println!("+++ instance with params {} / {} / {} / {} / {:?}({})", num_nominators, num_validators, edge_per_voter, + to_elect, mode, mode_u32, - to_elect, ); ext.execute_with(|| { // initial setup init_active_era(); + assert_ok!(create_validators_with_nominators_for_era::( num_validators, num_nominators, @@ -97,11 +102,11 @@ fn main() { true, None, )); + >::put(ElectionStatus::Open(1)); assert!(>::create_stakers_snapshot().0); - let origin = RawOrigin::Signed(create_funded_user::("fuzzer", 0, 100)); - println!("++ Chain setup done."); + let origin = RawOrigin::Signed(create_funded_user::("fuzzer", 0, 100)); // stuff to submit let (winners, compact, score, size) = match mode { @@ -141,8 +146,6 @@ fn main() { } }; - println!("++ Submission ready. Score = {:?}", score); - // must have chosen correct number of winners. assert_eq!(winners.len() as u32, >::validator_count()); diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index a551bc4c31dd8..188a50de4d1c3 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -45,7 +45,7 @@ pub use per_things::{PerThing, InnerOf, UpperOf, Percent, PerU16, Permill, Perbi pub use rational128::Rational128; use sp_std::{prelude::*, cmp::Ordering, fmt::Debug, convert::TryInto}; -use traits::{BaseArithmetic, One, Zero}; +use traits::{BaseArithmetic, One, Zero, SaturatedConversion}; /// Trait for comparing two numbers with an threshold. /// @@ -96,12 +96,29 @@ pub trait Normalizable { fn normalize(&self, t_max: T) -> Result, &'static str>; } -impl Normalizable for Vec { - fn normalize(&self, t_max: T) -> Result, &'static str> { - normalize(self.as_ref(), t_max) +macro_rules! impl_normalize_for_numeric { + ($numeric:ty) => { + impl Normalizable<$numeric> for Vec<$numeric> { + fn normalize(&self, t_max: $numeric) -> Result, &'static str> { + normalize(self.as_ref(), t_max) + } + } + }; +} + +impl_normalize_for_numeric!(u32); +impl_normalize_for_numeric!(u64); +impl_normalize_for_numeric!(u128); + +impl Normalizable

for Vec

{ + fn normalize(&self, t_max: P) -> Result, &'static str> { + let inners = self.iter().map(|p| p.clone().deconstruct().into()).collect::>(); + let normalized = normalize(inners.as_ref(), t_max.deconstruct().into())?; + Ok(normalized.into_iter().map(|i: UpperOf

| P::from_parts(i.saturated_into())).collect()) } } + /// Normalize `input` so that the sum of all elements reaches `t_max`. /// /// This implementation is currently in a balanced position between being performant and accurate. @@ -256,7 +273,6 @@ mod normalize_tests { #[test] fn works_for_vec() { - use super::Normalizable; assert_eq!(vec![8u32, 9, 7, 10].normalize(40).unwrap(), vec![10u32, 10, 10, 10]); } diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index b6eb9be3cfe7e..355463e6fefb7 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -23,7 +23,6 @@ use codec::{Encode, CompactAs}; use crate::traits::{ SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, }; -use crate::{Normalizable, normalize}; use sp_debug_derive::RuntimeDebug; /// Get the inner type of a `PerThing`. @@ -594,14 +593,6 @@ macro_rules! implement_per_thing { } } - impl Normalizable<$name> for Vec<$name> { - fn normalize(&self, t_max: $name) -> Result, &'static str> { - let inner = self.iter().map(|p| p.clone().deconstruct() as $upper_type).collect::>(); - let normalized = normalize(inner.as_ref(), t_max.deconstruct() as $upper_type)?; - Ok(normalized.into_iter().map(|i| <$name>::from_parts(i.saturated_into())).collect()) - } - } - #[cfg(test)] mod $test_mod { use codec::{Encode, Decode}; diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index 61487157a3fba..ce87aac61ea48 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -30,7 +30,7 @@ use sp_std::{prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, convert::TryFrom}; use sp_arithmetic::{ - PerThing, Rational128, ThresholdOrd, InnerOf, UpperOf, Normalizable, + PerThing, Rational128, ThresholdOrd, InnerOf, Normalizable, helpers_128bit::multiply_by_rational, traits::{Zero, Saturating, Bounded, SaturatedConversion}, }; @@ -206,26 +206,16 @@ where /// /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to 100%. pub fn try_normalize(&mut self) -> Result<(), &'static str> { - // NOTE: sadly we cannot use the impl for Vec here. Nonetheless, we use the same - // technique to prevent errors, do the calculation in the upper type. - let inners = self.distribution + self.distribution .iter() - .map(|(_, p)| p) - .cloned() - .map(|p| p.deconstruct().into()) - .collect::>>(); - - let center: UpperOf

= P::one().deconstruct().into(); - inners.normalize(center) - .map(|corrected| - corrected.into_iter().map(|i| - P::from_parts(i.saturated_into()) - ).collect::>() - ) - .map(|corrected| - for ((_, ratio), normalized) in self.distribution.iter_mut().zip(corrected.into_iter()) { - *ratio = normalized; - } + .map(|(_, p)| *p) + .collect::>() + .normalize(P::one()) + .map(|normalized_ratios| + self.distribution + .iter_mut() + .zip(normalized_ratios) + .for_each(|((_, old), corrected)| { *old = corrected; }) ) } } @@ -292,9 +282,7 @@ impl StakedAssignment { self.distribution .iter_mut() .zip(normalized_weights.into_iter()) - .for_each(|((_, weight), corrected)| { - *weight = corrected; - }) + .for_each(|((_, weight), corrected)| { *weight = corrected; }) ) } From cb9c22f34d43da5f4f10431ba564d6480f648d49 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Mon, 22 Jun 2020 10:37:25 +0200 Subject: [PATCH 3/8] Some grumbles --- primitives/arithmetic/fuzzer/src/normalize.rs | 4 +- primitives/arithmetic/src/lib.rs | 37 ++++++++++--------- 2 files changed, 21 insertions(+), 20 deletions(-) diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs index 079cc51f22f8e..d65937ef2a694 100644 --- a/primitives/arithmetic/fuzzer/src/normalize.rs +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -35,7 +35,7 @@ fn main() { loop { fuzz!(|data: (Vec, u32)| { let (data, norm) = data; - let pre_sum = data.iter().fold(0u128, |acc, x| acc + *x as u128); + let pre_sum: u128 = data.iter().map(|x| *x as u128).sum(); let normalized = data.normalize(norm); // error cases. @@ -47,7 +47,7 @@ fn main() { if data.len() > 0 { // if sum goes beyond u128, panic. - let sum = normalized.iter().fold(0u128, |acc, x| acc + *x as u128); + let sum: u128 = normalized.iter().map(|x| *x as u128).sum(); assert_eq!( sum, norm as u128, diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 188a50de4d1c3..e1a23934da2a6 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -89,18 +89,18 @@ where /// A collection-like object that is made of values of type `T` and can normalize its individual /// values around a centric point. pub trait Normalizable { - /// Normalize self around `t_max`. + /// Normalize self around `targeted_sum`. /// - /// Only returns `Ok` if the new sum of results is guaranteed to be equal to `t_max`. Else, - /// returns an error explaining why it failed to do so. - fn normalize(&self, t_max: T) -> Result, &'static str>; + /// Only returns `Ok` if the new sum of results is guaranteed to be equal to `targeted_sum`. + /// Else, returns an error explaining why it failed to do so. + fn normalize(&self, targeted_sum: T) -> Result, &'static str>; } macro_rules! impl_normalize_for_numeric { ($numeric:ty) => { impl Normalizable<$numeric> for Vec<$numeric> { - fn normalize(&self, t_max: $numeric) -> Result, &'static str> { - normalize(self.as_ref(), t_max) + fn normalize(&self, targeted_sum: $numeric) -> Result, &'static str> { + normalize(self.as_ref(), targeted_sum) } } }; @@ -111,23 +111,24 @@ impl_normalize_for_numeric!(u64); impl_normalize_for_numeric!(u128); impl Normalizable

for Vec

{ - fn normalize(&self, t_max: P) -> Result, &'static str> { + fn normalize(&self, targeted_sum: P) -> Result, &'static str> { let inners = self.iter().map(|p| p.clone().deconstruct().into()).collect::>(); - let normalized = normalize(inners.as_ref(), t_max.deconstruct().into())?; + let normalized = normalize(inners.as_ref(), targeted_sum.deconstruct().into())?; Ok(normalized.into_iter().map(|i: UpperOf

| P::from_parts(i.saturated_into())).collect()) } } -/// Normalize `input` so that the sum of all elements reaches `t_max`. +/// Normalize `input` so that the sum of all elements reaches `targeted_sum`. /// /// This implementation is currently in a balanced position between being performant and accurate. /// /// 1. We prefer storing original indices, and sorting the `input` only once. This will save the /// cost of sorting per round at the cost of a little bit of memory. /// 2. The granularity of increment/decrements is determined by the difference and the number of -/// elements in `input` and their sum difference with `t_max`. In short, the implementation is -/// very likely to over-increment/decrement in case the `diff / input.len()` is rather small. +/// elements in `input` and their sum difference with `targeted_sum`. In short, the +/// implementation is very likely to over-increment/decrement in case the `diff / input.len()` is +/// rather small. /// /// This function can return an error is if `T` cannot be built from the size of `input`, or if /// `sum(input)` cannot fit inside `T`. Moreover, if any of the internal operations saturate, it @@ -136,7 +137,7 @@ impl Normalizable

for Vec

{ /// Based on the above, the best use case of the function will be only to correct small rounding /// errors. If the difference of the elements is vert large, then the subtraction/addition of one /// element with `diff / input.len()` might easily saturate and results will be `Err`. -pub fn normalize(input: &[T], t_max: T) -> Result, &'static str> +pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str> where T: Clone + Copy + Ord + BaseArithmetic + Debug, { let mut sum = T::zero(); @@ -150,12 +151,12 @@ pub fn normalize(input: &[T], t_max: T) -> Result, &'static str> return Ok(Vec::::new()); } - let diff = t_max.max(sum) - t_max.min(sum); + let diff = targeted_sum.max(sum) - targeted_sum.min(sum); if diff.is_zero() { return Ok(input.to_vec()); } - let needs_bump = t_max > sum; + let needs_bump = targeted_sum > sum; let per_round = diff / count.try_into().map_err(|_| "failed to build T from usize")?; let mut leftover = diff % count.try_into().map_err(|_| "failed to build T from usize")?; @@ -195,7 +196,7 @@ pub fn normalize(input: &[T], t_max: T) -> Result, &'static str> min_index += 1; min_index = min_index % count; } - leftover = leftover.saturating_sub(One::one()); + leftover -= One::one() } } else { // must decrease the stakes a bit. decrement from the max element. index of maximum is now @@ -226,16 +227,16 @@ pub fn normalize(input: &[T], t_max: T) -> Result, &'static str> if output_with_idx[max_index].1 <= threshold { max_index = max_index.checked_sub(1).unwrap_or(count - 1); } - leftover = leftover.saturating_sub(One::one()); + leftover -= One::one() } } debug_assert_eq!( output_with_idx.iter().fold(T::zero(), |acc, (_, x)| acc + *x), - t_max, + targeted_sum, "sum({:?}) != {:?}", output_with_idx, - t_max, + targeted_sum, ); // sort again based on the original index. From 0f1948c1a59075a7b74a7271f678702317b6130c Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Mon, 22 Jun 2020 12:55:29 +0200 Subject: [PATCH 4/8] Update primitives/arithmetic/src/lib.rs Co-authored-by: Guillaume Thiolliere --- primitives/arithmetic/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index e1a23934da2a6..f681486812c16 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -130,7 +130,7 @@ impl Normalizable

for Vec

{ /// implementation is very likely to over-increment/decrement in case the `diff / input.len()` is /// rather small. /// -/// This function can return an error is if `T` cannot be built from the size of `input`, or if +/// This function can return an error is if size of `input` doesn't fit inside `T`, or if /// `sum(input)` cannot fit inside `T`. Moreover, if any of the internal operations saturate, it /// will also return en `Err`. /// From 52e65852eb8bdee603243becce67a6805bae54ed Mon Sep 17 00:00:00 2001 From: kianenigma Date: Mon, 22 Jun 2020 16:55:35 +0200 Subject: [PATCH 5/8] More great review grumbles --- primitives/arithmetic/fuzzer/src/normalize.rs | 23 +++-- primitives/arithmetic/src/lib.rs | 83 +++++++++++++------ primitives/arithmetic/src/per_things.rs | 6 +- primitives/arithmetic/src/traits.rs | 2 +- 4 files changed, 73 insertions(+), 41 deletions(-) diff --git a/primitives/arithmetic/fuzzer/src/normalize.rs b/primitives/arithmetic/fuzzer/src/normalize.rs index d65937ef2a694..34c4ef9cb0ab5 100644 --- a/primitives/arithmetic/fuzzer/src/normalize.rs +++ b/primitives/arithmetic/fuzzer/src/normalize.rs @@ -35,6 +35,7 @@ fn main() { loop { fuzz!(|data: (Vec, u32)| { let (data, norm) = data; + if data.len() == 0 { return; } let pre_sum: u128 = data.iter().map(|x| *x as u128).sum(); let normalized = data.normalize(norm); @@ -43,19 +44,17 @@ fn main() { assert!(normalized.is_err()) } else { if let Ok(normalized) = normalized { + // if sum goes beyond u128, panic. + let sum: u128 = normalized.iter().map(|x| *x as u128).sum(); + // if this function returns Ok(), then it will ALWAYS be accurate. - if data.len() > 0 { - - // if sum goes beyond u128, panic. - let sum: u128 = normalized.iter().map(|x| *x as u128).sum(); - assert_eq!( - sum, - norm as u128, - "sums don't match {:?}, {}", - normalized, - norm, - ); - } + assert_eq!( + sum, + norm as u128, + "sums don't match {:?}, {}", + normalized, + norm, + ); } } }) diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index e1a23934da2a6..f4e55e2f09fec 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -45,7 +45,7 @@ pub use per_things::{PerThing, InnerOf, UpperOf, Percent, PerU16, Permill, Perbi pub use rational128::Rational128; use sp_std::{prelude::*, cmp::Ordering, fmt::Debug, convert::TryInto}; -use traits::{BaseArithmetic, One, Zero, SaturatedConversion}; +use traits::{BaseArithmetic, One, Zero, SaturatedConversion, Unsigned}; /// Trait for comparing two numbers with an threshold. /// @@ -125,26 +125,38 @@ impl Normalizable

for Vec

{ /// /// 1. We prefer storing original indices, and sorting the `input` only once. This will save the /// cost of sorting per round at the cost of a little bit of memory. -/// 2. The granularity of increment/decrements is determined by the difference and the number of -/// elements in `input` and their sum difference with `targeted_sum`. In short, the -/// implementation is very likely to over-increment/decrement in case the `diff / input.len()` is -/// rather small. +/// 2. The granularity of increment/decrements is determined by the number of elements in `input` +/// and their sum difference with `targeted_sum`, namely `diff = diff(abs(sum(input), +/// target_sum))`. This value is then distributed into `per_round = diff / input.len()` and +/// `leftover = diff % round`. First, per_round is applied to all elements of input, and then we +/// move to leftover, in which we add/subtract 1 by 1 until `leftover` is depleted. /// -/// This function can return an error is if `T` cannot be built from the size of `input`, or if -/// `sum(input)` cannot fit inside `T`. Moreover, if any of the internal operations saturate, it -/// will also return en `Err`. +/// In case where the sum is less than the target, the above approach always holds. If sum is less +/// than target, then each individual element is also less than target. Thus, by adding `per_round` +/// to each item, neither of them can overflow the numeric bound of `T`. In fact, neither of the can +/// go beyond `target_sum`*. /// -/// Based on the above, the best use case of the function will be only to correct small rounding -/// errors. If the difference of the elements is vert large, then the subtraction/addition of one -/// element with `diff / input.len()` might easily saturate and results will be `Err`. +/// In the case of sum is more than target, there is small twist. The subtraction of `per_round` +/// form each element might go below zero. In this case, we saturate and add the error to the +/// `leftover` value. This ensures that the result will always stay accurate, yet it might cause the +/// execution to become increasingly slow, since leftovers are applied one by one. +/// +/// All in all, the complicated case above is rare to happen in all substrate use cases, hence we +/// opt for it due to its simplicity. +/// +/// This function will return an error is if length of `input` cannot fit in `T`, or if `sum(input)` +/// cannot fit inside `T`. +/// +/// * This proof is used in the implementation as well. pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str> - where T: Clone + Copy + Ord + BaseArithmetic + Debug, + where T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug, { let mut sum = T::zero(); for t in input.iter() { sum = sum.checked_add(t).ok_or("sum of input cannot fit in `T`")?; } let count = input.len(); + let count_t: T = count.try_into().map_err(|_| "length of `inputs` cannot fit in `T`")?; // early exit if needed. if count.is_zero() { @@ -157,8 +169,8 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str } let needs_bump = targeted_sum > sum; - let per_round = diff / count.try_into().map_err(|_| "failed to build T from usize")?; - let mut leftover = diff % count.try_into().map_err(|_| "failed to build T from usize")?; + let per_round = diff / count_t; + let mut leftover = diff % count_t; let mut output_with_idx = input.iter().cloned().enumerate().collect::>(); // sort output once based on diff. This will require more data transfer and saving original @@ -179,7 +191,7 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str for _ in 0..count { output_with_idx[min_index].1 = output_with_idx[min_index].1 .checked_add(&per_round) - .ok_or("Failed to add.")?; + .expect("Proof provided in the module doc; qed."); if output_with_idx[min_index].1 >= threshold { min_index += 1; min_index = min_index % count; @@ -190,8 +202,8 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // continue with the previous min_index while !leftover.is_zero() { output_with_idx[min_index].1 = output_with_idx[min_index].1 - .checked_add(&One::one()) - .ok_or("Failed to add.")?; + .checked_add(&T::one()) + .expect("Proof provided in the module doc; qed."); if output_with_idx[min_index].1 >= threshold { min_index += 1; min_index = min_index % count; @@ -212,7 +224,11 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str for _ in 0..count { output_with_idx[max_index].1 = output_with_idx[max_index].1 .checked_sub(&per_round) - .ok_or("Failed to subtract.")?; + .unwrap_or_else(|| { + let remainder = per_round - output_with_idx[max_index].1; + leftover += remainder; + output_with_idx[max_index].1.saturating_sub(per_round) + }); if output_with_idx[max_index].1 <= threshold { max_index = max_index.checked_sub(1).unwrap_or(count - 1); } @@ -221,13 +237,15 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // continue with the previous max_index while !leftover.is_zero() { - output_with_idx[max_index].1 = output_with_idx[max_index].1 - .checked_sub(&One::one()) - .ok_or("Failed to subtract.")?; - if output_with_idx[max_index].1 <= threshold { + if let Some(next) = output_with_idx[max_index].1.checked_sub(&One::one()) { + output_with_idx[max_index].1 = next; + if output_with_idx[max_index].1 <= threshold { + max_index = max_index.checked_sub(1).unwrap_or(count - 1); + } + leftover -= One::one() + } else { max_index = max_index.checked_sub(1).unwrap_or(count - 1); } - leftover -= One::one() } } @@ -267,9 +285,24 @@ mod normalize_tests { } #[test] - fn fails_on_large_input() { + fn fails_on_if_input_sum_large() { assert!(normalize(vec![1u8; 255].as_ref(), 10).is_ok()); - assert!(normalize(vec![10u8; 256].as_ref(), 10).is_err()); + assert_eq!( + normalize(vec![1u8; 256].as_ref(), 10), + Err("sum of input cannot fit in `T`"), + ); + } + + #[test] + fn does_not_fail_on_subtraction_overflow() { + assert_eq!( + normalize(vec![1u8, 100, 100].as_ref(), 10).unwrap(), + vec![1, 9, 0], + ); + assert_eq!( + normalize(vec![1u8, 8, 9].as_ref(), 1).unwrap(), + vec![0, 1, 0], + ); } #[test] diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 355463e6fefb7..521f4d107412c 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -21,7 +21,7 @@ use serde::{Serialize, Deserialize}; use sp_std::{ops, fmt, prelude::*, convert::TryInto}; use codec::{Encode, CompactAs}; use crate::traits::{ - SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, + SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, Unsigned, }; use sp_debug_derive::RuntimeDebug; @@ -37,13 +37,13 @@ pub trait PerThing: Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug { /// The data type used to build this per-thingy. - type Inner: BaseArithmetic + Copy + fmt::Debug; + type Inner: BaseArithmetic + Unsigned + Copy + fmt::Debug; /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. /// It must be able to compute `ACCURACY^2`. type Upper: BaseArithmetic + Copy + From + TryInto + - UniqueSaturatedInto + fmt::Debug; + UniqueSaturatedInto + Unsigned + fmt::Debug; /// The accuracy of this type. const ACCURACY: Self::Inner; diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 3921d253daf06..29b8e419ef85c 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -22,7 +22,7 @@ use codec::HasCompact; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, CheckedNeg, - CheckedShl, CheckedShr, checked_pow, Signed + CheckedShl, CheckedShr, checked_pow, Signed, Unsigned, }; use sp_std::ops::{ Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, From cdfe121f9675f1a781b1aca3cf8e21a62787f2dc Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 24 Jun 2020 09:56:10 +0200 Subject: [PATCH 6/8] Way better doc for everything. --- primitives/arithmetic/src/lib.rs | 43 ++++++++++++++++------------ primitives/npos-elections/src/lib.rs | 6 ++++ 2 files changed, 30 insertions(+), 19 deletions(-) diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index f4e55e2f09fec..cc72006294ce3 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -88,6 +88,8 @@ where /// A collection-like object that is made of values of type `T` and can normalize its individual /// values around a centric point. +/// +/// Note that the order of items in the collection may affect the result. pub trait Normalizable { /// Normalize self around `targeted_sum`. /// @@ -97,18 +99,18 @@ pub trait Normalizable { } macro_rules! impl_normalize_for_numeric { - ($numeric:ty) => { - impl Normalizable<$numeric> for Vec<$numeric> { - fn normalize(&self, targeted_sum: $numeric) -> Result, &'static str> { - normalize(self.as_ref(), targeted_sum) + ($($numeric:ty),*) => { + $( + impl Normalizable<$numeric> for Vec<$numeric> { + fn normalize(&self, targeted_sum: $numeric) -> Result, &'static str> { + normalize(self.as_ref(), targeted_sum) + } } - } + )* }; } -impl_normalize_for_numeric!(u32); -impl_normalize_for_numeric!(u64); -impl_normalize_for_numeric!(u128); +impl_normalize_for_numeric!(u8, u16, u32, u64, u128); impl Normalizable

for Vec

{ fn normalize(&self, targeted_sum: P) -> Result, &'static str> { @@ -126,17 +128,17 @@ impl Normalizable

for Vec

{ /// 1. We prefer storing original indices, and sorting the `input` only once. This will save the /// cost of sorting per round at the cost of a little bit of memory. /// 2. The granularity of increment/decrements is determined by the number of elements in `input` -/// and their sum difference with `targeted_sum`, namely `diff = diff(abs(sum(input), -/// target_sum))`. This value is then distributed into `per_round = diff / input.len()` and -/// `leftover = diff % round`. First, per_round is applied to all elements of input, and then we -/// move to leftover, in which we add/subtract 1 by 1 until `leftover` is depleted. +/// and their sum difference with `targeted_sum`, namely `diff = diff(sum(input), target_sum)`. +/// This value is then distributed into `per_round = diff / input.len()` and `leftover = diff % +/// round`. First, per_round is applied to all elements of input, and then we move to leftover, +/// in which case we add/subtract 1 by 1 until `leftover` is depleted. /// -/// In case where the sum is less than the target, the above approach always holds. If sum is less -/// than target, then each individual element is also less than target. Thus, by adding `per_round` -/// to each item, neither of them can overflow the numeric bound of `T`. In fact, neither of the can -/// go beyond `target_sum`*. +/// When the sum is less than the target, the above approach always holds. In this case, then each +/// individual element is also less than target. Thus, by adding `per_round` to each item, neither +/// of them can overflow the numeric bound of `T`. In fact, neither of the can go beyond +/// `target_sum`*. /// -/// In the case of sum is more than target, there is small twist. The subtraction of `per_round` +/// If sum is more than target, there is small twist. The subtraction of `per_round` /// form each element might go below zero. In this case, we saturate and add the error to the /// `leftover` value. This ensures that the result will always stay accurate, yet it might cause the /// execution to become increasingly slow, since leftovers are applied one by one. @@ -151,14 +153,17 @@ impl Normalizable

for Vec

{ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str> where T: Clone + Copy + Ord + BaseArithmetic + Unsigned + Debug, { + // compute sum and return error if failed. let mut sum = T::zero(); for t in input.iter() { sum = sum.checked_add(t).ok_or("sum of input cannot fit in `T`")?; } + + // convert count and return error if failed. let count = input.len(); let count_t: T = count.try_into().map_err(|_| "length of `inputs` cannot fit in `T`")?; - // early exit if needed. + // Nothing to do here. if count.is_zero() { return Ok(Vec::::new()); } @@ -172,9 +177,9 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str let per_round = diff / count_t; let mut leftover = diff % count_t; - let mut output_with_idx = input.iter().cloned().enumerate().collect::>(); // sort output once based on diff. This will require more data transfer and saving original // index, but we sort only twice instead: once now and once at the very end. + let mut output_with_idx = input.iter().cloned().enumerate().collect::>(); output_with_idx.sort_unstable_by_key(|x| x.1); if needs_bump { diff --git a/primitives/npos-elections/src/lib.rs b/primitives/npos-elections/src/lib.rs index ce87aac61ea48..592ed3b717350 100644 --- a/primitives/npos-elections/src/lib.rs +++ b/primitives/npos-elections/src/lib.rs @@ -272,6 +272,12 @@ impl StakedAssignment { /// /// If `Ok(())` is returned, then the assignment MUST have been successfully normalized to /// `stake`. + /// + /// NOTE: current implementation of `.normalize` is almost safe to `expect()` upon. The only + /// error case is when the input cannot fit in `T`, or the sum of input cannot fit in `T`. + /// Sadly, both of these are dependent upon the implementation of `VoteLimit`, i.e. the limit + /// of edges per voter which is enforced from upstream. Hence, at this crate, we prefer + /// returning a result and a use the name prefix `try_`. pub fn try_normalize(&mut self, stake: ExtendedBalance) -> Result<(), &'static str> { self.distribution .iter() From 5554d9d4d7c9aa5784ed3f70c50fe5091e0a7ed7 Mon Sep 17 00:00:00 2001 From: kianenigma Date: Wed, 24 Jun 2020 14:48:55 +0200 Subject: [PATCH 7/8] Some improvement --- primitives/arithmetic/src/lib.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index cc72006294ce3..19fa84fc91e25 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -188,9 +188,7 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // we move to the next minimum. let mut min_index = 0; // at this threshold we move to next index. - let threshold = output_with_idx - .last() - .expect("length of input is greater than zero; it must have a last; qed").1; + let threshold = targeted_sum / count_t; if !per_round.is_zero() { for _ in 0..count { @@ -223,7 +221,8 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // at this threshold we move to next index. let threshold = output_with_idx .first() - .expect("length of input is greater than zero; it must have a fist; qed").1; + .expect("length of input is greater than zero; it must have a fist; qed") + .1; if !per_round.is_zero() { for _ in 0..count { @@ -337,9 +336,9 @@ mod normalize_tests { Perbill::from_percent(30) ].normalize(Perbill::one()).unwrap(), vec![ - Perbill::from_parts(316666666), - Perbill::from_parts(383333333), - Perbill::from_parts(300000001), + Perbill::from_parts(316666668), + Perbill::from_parts(383333332), + Perbill::from_parts(300000000), ] ); } From e0bea744df4ab033913713d0415ff6b6cfad2969 Mon Sep 17 00:00:00 2001 From: Kian Paimani <5588131+kianenigma@users.noreply.github.com> Date: Wed, 24 Jun 2020 15:00:35 +0200 Subject: [PATCH 8/8] Update primitives/arithmetic/src/lib.rs Co-authored-by: Bernhard Schuster --- primitives/arithmetic/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index 19fa84fc91e25..5c0d2baa51db6 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -221,7 +221,7 @@ pub fn normalize(input: &[T], targeted_sum: T) -> Result, &'static str // at this threshold we move to next index. let threshold = output_with_idx .first() - .expect("length of input is greater than zero; it must have a fist; qed") + .expect("length of input is greater than zero; it must have a first; qed") .1; if !per_round.is_zero() {