diff --git a/vm/actor/src/builtin/mod.rs b/vm/actor/src/builtin/mod.rs index 501c69e51a48..8bf26c8a5546 100644 --- a/vm/actor/src/builtin/mod.rs +++ b/vm/actor/src/builtin/mod.rs @@ -20,3 +20,4 @@ pub mod verifreg; pub use self::codes::*; pub(crate) use self::shared::*; pub use self::singletons::*; +pub use network::*; diff --git a/vm/actor/src/util/math.rs b/vm/actor/src/util/math.rs new file mode 100644 index 000000000000..44ff0ce393ed --- /dev/null +++ b/vm/actor/src/util/math.rs @@ -0,0 +1,22 @@ +// Copyright 2020 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use num_bigint::{BigInt, ParseBigIntError}; + +pub const PRECISION: u64 = 128; + +/// polyval evaluates a polynomial given by coefficients `p` in Q.128 format +/// at point `x` in Q.128 format. Output is in Q.128. +/// Coefficients should be ordered from the highest order coefficient to the lowest. +pub fn poly_val(poly: &[BigInt], x: &BigInt) -> BigInt { + let mut res = BigInt::default(); + + for coeff in poly { + res = ((res * x) >> PRECISION) + coeff; + } + res +} + +pub fn poly_parse(coefs: &[&str]) -> Result, ParseBigIntError> { + coefs.iter().map(|c| c.parse()).collect() +} diff --git a/vm/actor/src/util/mod.rs b/vm/actor/src/util/mod.rs index d4e74e6fedce..c7cfb37c4f15 100644 --- a/vm/actor/src/util/mod.rs +++ b/vm/actor/src/util/mod.rs @@ -2,9 +2,11 @@ // SPDX-License-Identifier: Apache-2.0, MIT mod balance_table; +pub mod math; mod multimap; mod set; mod set_multimap; +pub mod smooth; pub use self::balance_table::BalanceTable; pub use self::multimap::*; diff --git a/vm/actor/src/util/smooth/alpha_beta_filter.rs b/vm/actor/src/util/smooth/alpha_beta_filter.rs new file mode 100644 index 000000000000..82bf97cfcabb --- /dev/null +++ b/vm/actor/src/util/smooth/alpha_beta_filter.rs @@ -0,0 +1,72 @@ +// Copyright 2020 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use crate::util::math::PRECISION; +use clock::ChainEpoch; +use encoding::tuple::*; +use encoding::Cbor; +use num_bigint::{bigint_ser, BigInt}; + +#[derive(Default, Serialize_tuple, Deserialize_tuple)] +pub struct FilterEstimate { + #[serde(with = "bigint_ser")] + pub position: BigInt, + #[serde(with = "bigint_ser")] + pub velocity: BigInt, +} + +impl FilterEstimate { + /// Create a new filter estimate given two Q.0 format ints. + pub fn new(position: BigInt, velocity: BigInt) -> Self { + FilterEstimate { + position: position << PRECISION, + velocity: velocity << PRECISION, + } + } + + /// Returns the Q.0 position estimate of the filter + pub fn estimate(&self) -> BigInt { + &self.position >> PRECISION + } + + /// Extrapolate filter "position" delta epochs in the future. + pub fn extrapolate(&self, delta: ChainEpoch) -> BigInt { + let delta_t = BigInt::from(delta) << PRECISION; + let position = &self.position << PRECISION; + (&self.velocity * delta_t) + position + } +} + +impl Cbor for FilterEstimate {} + +#[derive(Default)] +pub struct AlphaBetaFilter { + alpha: BigInt, + beta: BigInt, + prev_est: FilterEstimate, +} + +impl AlphaBetaFilter { + pub fn load_filter(prev_est: FilterEstimate, alpha: BigInt, beta: BigInt) -> Self { + AlphaBetaFilter { + alpha, + beta, + prev_est, + } + } + + pub fn next_estimate(&self, obs: BigInt, epoch_delta: ChainEpoch) -> FilterEstimate { + let delta_t = BigInt::from(epoch_delta) << PRECISION; + let delta_x = (&delta_t * &self.prev_est.velocity) >> PRECISION; + let mut position = delta_x + &self.prev_est.position; + + let obs = obs << PRECISION; + let residual = obs - &position; + let revision_x = (&self.alpha * &residual) >> PRECISION; + position += &revision_x; + + let revision_v = (residual * &self.beta) / delta_t; + let velocity = revision_v + &self.prev_est.velocity; + FilterEstimate { position, velocity } + } +} diff --git a/vm/actor/src/util/smooth/mod.rs b/vm/actor/src/util/smooth/mod.rs new file mode 100644 index 000000000000..92f8182307e4 --- /dev/null +++ b/vm/actor/src/util/smooth/mod.rs @@ -0,0 +1,8 @@ +// Copyright 2020 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +mod alpha_beta_filter; +mod smooth_func; + +pub use alpha_beta_filter::*; +pub use smooth_func::*; diff --git a/vm/actor/src/util/smooth/smooth_func.rs b/vm/actor/src/util/smooth/smooth_func.rs new file mode 100644 index 000000000000..aee2df354dac --- /dev/null +++ b/vm/actor/src/util/smooth/smooth_func.rs @@ -0,0 +1,93 @@ +// Copyright 2020 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use super::alpha_beta_filter::*; +use crate::math::{poly_parse, poly_val, PRECISION}; +use clock::ChainEpoch; +use num_bigint::BigInt; + +lazy_static! { + pub static ref NUM: Vec = poly_parse(&[ + "261417938209272870992496419296200268025", + "7266615505142943436908456158054846846897", + "32458783941900493142649393804518050491988", + "17078670566130897220338060387082146864806", + "-35150353308172866634071793531642638290419", + "-20351202052858059355702509232125230498980", + "-1563932590352680681114104005183375350999", + ]) + .unwrap(); + pub static ref DENOM: Vec = poly_parse(&[ + "49928077726659937662124949977867279384", + "2508163877009111928787629628566491583994", + "21757751789594546643737445330202599887121", + "53400635271583923415775576342898617051826", + "41248834748603606604000911015235164348839", + "9015227820322455780436733526367238305537", + "340282366920938463463374607431768211456", + ]) + .unwrap(); + pub static ref LN_2: BigInt = "235865763225513294137944142764154484399".parse().unwrap(); + pub static ref EPSILON: BigInt = "302231454903657293676544".parse().unwrap(); +} + +fn get_bit_len(z: &BigInt) -> u64 { + z.bits() +} + +/// Extrapolate the CumSumRatio given two filters. +pub fn extrapolated_cum_sum_of_ratio( + delta: ChainEpoch, + relative_start: ChainEpoch, + est_num: &FilterEstimate, + est_denom: &FilterEstimate, +) -> BigInt { + let delta_t = BigInt::from(delta) << PRECISION; + let t0 = BigInt::from(relative_start) << PRECISION; + + let pos_1 = &est_num.position; + let pos_2 = &est_denom.position; + let velo_1 = &est_num.velocity; + let velo_2 = &est_denom.velocity; + + let squared_velo_2 = (velo_2 * velo_2) >> PRECISION; + + if squared_velo_2 > *EPSILON { + let mut x2a = ((velo_2 * t0) >> PRECISION) + pos_2; + let mut x2b = ((velo_2 * &delta_t) >> PRECISION) + &x2a; + x2a = ln(&x2a); + x2b = ln(&x2b); + + let m1 = ((&x2b - &x2a) * pos_1 * velo_2) >> PRECISION; + + let m2_l = (&x2a - &x2b) * pos_2; + let m2_r = velo_2 * &delta_t; + let m2 = ((m2_l + m2_r) * velo_1) >> PRECISION; + + return (m2 + m1) / squared_velo_2; + } + + let half_delta = &delta_t >> 1; + let mut x1m = velo_1 * (t0 + half_delta); + x1m = (x1m >> PRECISION) + pos_1; + + (x1m * delta_t) / pos_2 +} + +/// The natural log of Q.128 x. +pub fn ln(z: &BigInt) -> BigInt { + let k: i64 = get_bit_len(z) as i64 - 1 - PRECISION as i64; + + let x: BigInt = if k > 0 { z >> k } else { z << k.abs() }; + + (BigInt::from(k) * &*LN_2) + ln_between_one_and_two(x) +} + +/// The natural log of x, specified in Q.128 format +/// Should only use with 1 <= x <= 2 +/// Output is in Q.128 format. +fn ln_between_one_and_two(x: BigInt) -> BigInt { + let num = poly_val(&NUM, &x) << PRECISION; + let denom = poly_val(&DENOM, &x); + num / denom +} diff --git a/vm/actor/tests/alpha_beta_filter_test.rs b/vm/actor/tests/alpha_beta_filter_test.rs new file mode 100644 index 000000000000..7b130fd7fdce --- /dev/null +++ b/vm/actor/tests/alpha_beta_filter_test.rs @@ -0,0 +1,211 @@ +// Copyright 2020 ChainSafe Systems +// SPDX-License-Identifier: Apache-2.0, MIT + +use actor::math::{poly_parse, PRECISION}; +use actor::smooth::extrapolated_cum_sum_of_ratio as ecsor; +use actor::smooth::*; +use actor::EPOCHS_IN_DAY; +use clock::ChainEpoch; +use fil_types::StoragePower; +use num_bigint::BigInt; +use num_traits::sign::Signed; + +const ERR_BOUND: u64 = 350; + +// Millionths of difference between val1 and val2 +// (val1 - val2) / val1 * 1e6 +// all inputs Q.128, output Q.0 +fn per_million_error(val_1: &BigInt, val_2: &BigInt) -> BigInt { + let diff = (val_1 - val_2) << PRECISION; + + let ratio = diff / val_1; + let million = BigInt::from(1_000_000) << PRECISION; + + let diff_per_million = (ratio * million).abs(); + + diff_per_million >> 2 * PRECISION +} + +fn iterative_cum_sum_of_ratio( + num: &FilterEstimate, + denom: &FilterEstimate, + t0: ChainEpoch, + delta: ChainEpoch, +) -> BigInt { + let mut ratio = BigInt::from(0u8); + + for i in 0..delta { + let num_epsilon = num.extrapolate(t0 + i); // Q.256 + let denom_epsilon = denom.extrapolate(t0 + i) >> PRECISION; // Q.256 + let mut epsilon = num_epsilon / denom_epsilon; // Q.256 / Q.128 => Q.128 + + if i != 0 && i != delta - 1 { + epsilon *= 2; // Q.128 * Q.0 => Q.128 + } + ratio += epsilon; + } + + ratio / 2 +} + +fn assert_err_bound( + num: &FilterEstimate, + denom: &FilterEstimate, + delta: ChainEpoch, + t0: ChainEpoch, + err_bound: BigInt, +) { + let analytic = ecsor(delta, t0, num, denom); + let iterative = iterative_cum_sum_of_ratio(num, denom, t0, delta); + let actual_err = per_million_error(&analytic, &iterative); + assert!( + actual_err < err_bound, + "Values are {} and {}", + actual_err, + err_bound + ); +} + +// Returns an estimate with position val and velocity 0 +pub fn testing_constant_estimate(val: BigInt) -> FilterEstimate { + FilterEstimate::new(val, BigInt::from(0u8)) +} + +// Returns and estimate with postion x and velocity v +pub fn testing_estimate(x: BigInt, v: BigInt) -> FilterEstimate { + FilterEstimate::new(x, v) +} + +#[test] +fn test_natural_log() { + let ln_inputs: Vec = poly_parse(&[ + "340282366920938463463374607431768211456", // Q.128 format of 1 + "924990000000000000000000000000000000000", // Q.128 format of e (rounded up in 5th decimal place to handle truncation) + "34028236692093846346337460743176821145600000000000000000000", // Q.128 format of 100e18 + "6805647338418769269267492148635364229120000000000000000000000", // Q.128 format of 2e22 + "204169000000000000000000000000000000", // Q.128 format of 0.0006 + "34028236692093846346337460743", // Q.128 format of 1e-10 + ]) + .unwrap(); + + let expected_ln_outputs: Vec = poly_parse(&[ + "0", // Q.128 format of 0 = ln(1) + "340282366920938463463374607431768211456", // Q.128 format of 1 = ln(e) + "15670582109617661336106769654068947397831", // Q.128 format of 46.051... = ln(100e18) + "17473506083804940763855390762239996622013", // Q.128 format of 51.35... = ln(2e22) + "-2524410000000000000000000000000000000000", // Q.128 format of -7.41.. = ln(0.0006) + "-7835291054808830668053384827034473698915", // Q.128 format of -23.02.. = ln(1e-10) + ]) + .unwrap(); + + assert_eq!(ln_inputs.len(), expected_ln_outputs.len()); + let num_inputs = ln_inputs.len(); + + for i in 0..num_inputs { + let z = &ln_inputs[i]; + let ln_of_z = ln(z); + let expected_z = &expected_ln_outputs[i]; + assert_eq!(expected_z >> PRECISION, ln_of_z >> PRECISION); + } +} + +#[test] +fn constant_estimate() { + let num_estimate = testing_constant_estimate(BigInt::from(4_000_000)); + let denom_estimate = testing_constant_estimate(BigInt::from(1)); + + // 4e6/1 over 1000 epochs should give us 4e9 + let csr_1 = ecsor(1000, 0, &num_estimate, &denom_estimate) >> PRECISION; + assert_eq!(BigInt::from(4 * 10_i64.pow(9)), csr_1); + + // if we change t0 nothing should change because velocity is 0 + let csr_2 = ecsor(1000, 10_i64.pow(15), &num_estimate, &denom_estimate) >> PRECISION; + + assert_eq!(csr_1, csr_2); + + // 1e12 / 200e12 for 100 epochs should give ratio of 1/2 + let num_estimate = testing_constant_estimate(BigInt::from(10_i64.pow(12))); + let denom_estimate = testing_constant_estimate(BigInt::from(200 * 10_i64.pow(12))); + let csr_frac = ecsor(100, 0, &num_estimate, &denom_estimate); + + // If we didn't return Q.128 we'd just get zero + assert_eq!(BigInt::from(0u8), &csr_frac >> PRECISION); + + // multiply by 10k and we'll get 5k + // note: this is a bit sensative to input, lots of numbers approach from below + // (...99999) and so truncating division takes us off by one + let product = csr_frac * (BigInt::from(10_000) << PRECISION); // Q.256 + assert_eq!(BigInt::from(5000), product >> 2 * PRECISION); +} + +#[test] +fn both_positive_velocity() { + let num_estimate = testing_estimate(BigInt::from(111), BigInt::from(12)); + let denom_estimate = testing_estimate(BigInt::from(3456), BigInt::from(8)); + assert_err_bound( + &num_estimate, + &denom_estimate, + 10_000, + 0, + BigInt::from(ERR_BOUND), + ); +} + +#[test] +fn flipped_signs() { + let num_estimate = testing_estimate(BigInt::from(1_000_000), BigInt::from(-100)); + let denom_estimate = testing_estimate(BigInt::from(70_000), BigInt::from(1000)); + assert_err_bound( + &num_estimate, + &denom_estimate, + 100_000, + 0, + BigInt::from(ERR_BOUND), + ); +} + +#[test] +fn values_in_range() { + let tens_of_fil = BigInt::from(50 * 10_i128.pow(18)); + let one_fil_per_sec = BigInt::from(25); + let four_fil_per_second = BigInt::from(100); + + let slow_money = testing_estimate(tens_of_fil.clone(), one_fil_per_sec); + let fast_money = testing_estimate(tens_of_fil.clone(), four_fil_per_second); + + let tens_of_ei_bs = StoragePower::from(10_i128.pow(19)); + let thousands_of_ei_bs = StoragePower::from(2 * 10_i128.pow(22)); + + let one_byte_per_epoch_velocity = BigInt::from(1); + let ten_pi_bs_per_day_velocity = + BigInt::from(10 * 2_i128.pow(50)) / BigInt::from(EPOCHS_IN_DAY); + let one_ei_bs_per_day_velocity = BigInt::from(2_i128.pow(60)) / BigInt::from(EPOCHS_IN_DAY); + + let delta = EPOCHS_IN_DAY; + let t0 = 0; + let err_bound = BigInt::from(ERR_BOUND); + + let test_cases: Vec<(StoragePower, BigInt)> = vec![ + (tens_of_ei_bs.clone(), one_byte_per_epoch_velocity.clone()), + (tens_of_ei_bs.clone(), ten_pi_bs_per_day_velocity.clone()), + (tens_of_ei_bs.clone(), one_ei_bs_per_day_velocity.clone()), + ( + thousands_of_ei_bs.clone(), + one_byte_per_epoch_velocity.clone(), + ), + ( + thousands_of_ei_bs.clone(), + ten_pi_bs_per_day_velocity.clone(), + ), + ( + thousands_of_ei_bs.clone(), + one_ei_bs_per_day_velocity.clone(), + ), + ]; + + for test_case in test_cases { + let power = testing_estimate(test_case.0, test_case.1); + assert_err_bound(&slow_money, &power, delta, t0, err_bound.clone()); + assert_err_bound(&fast_money, &power, delta, t0, err_bound.clone()); + } +}