diff --git a/Cargo.lock b/Cargo.lock index 672bf3e6831..2640eb1f027 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -23,7 +23,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.5", "once_cell", "version_check", ] @@ -48,9 +48,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.53" +version = "1.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94a45b455c14666b85fc40a019e8ab9eb75e3a124e05494f5397122bc9eb06e0" +checksum = "159bb86af3a200e19a068f4224eae4c8bb2d0fa054c7e5d1cacd5cef95e684cd" [[package]] name = "arrayvec" @@ -262,9 +262,9 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" [[package]] name = "cfg-if" @@ -371,15 +371,17 @@ dependencies = [ [[package]] name = "console-subscriber" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7a4aa62cefeef6d5a2bfcf638818b7950329a6c3ad919f89f04d60174f217f4" +checksum = "565a7dfea2d10dd0e5c57cc394d5d441b1910960d8c9211ed14135e0e6ec3a20" dependencies = [ "console-api", "crossbeam-channel", + "crossbeam-utils", "futures 0.3.21", "hdrhistogram", "humantime", + "prost-types", "serde 1.0.136", "serde_json", "thread_local", @@ -987,9 +989,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "d39cd93900197114fa1fcb7ae84ca742095eed9442088988ae74fa744e930e77" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1041,7 +1043,7 @@ dependencies = [ "indexmap", "slab", "tokio", - "tokio-util", + "tokio-util 0.6.9", "tracing", ] @@ -1054,6 +1056,15 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c21d40587b92fa6a6c6e3c1bdbf87d75511db5672f9c93175574b3a00df1758" +dependencies = [ + "ahash", +] + [[package]] name = "hdrhistogram" version = "7.5.0" @@ -1209,7 +1220,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg 1.1.0", - "hashbrown", + "hashbrown 0.11.2", "serde 1.0.136", ] @@ -1224,9 +1235,9 @@ dependencies = [ [[package]] name = "integer-encoding" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90c11140ffea82edce8dcd74137ce9324ec24b3cf0175fc9d7e29164da9915b8" +checksum = "0e85a1509a128c855368e135cffcde7eac17d8e1083f41e2b98c58bc1a5074be" [[package]] name = "ipnet" @@ -1326,7 +1337,7 @@ dependencies = [ "jsonrpc-server-utils", "log", "net2", - "parking_lot", + "parking_lot 0.11.2", "unicase", ] @@ -1340,7 +1351,7 @@ dependencies = [ "jsonrpc-core", "lazy_static", "log", - "parking_lot", + "parking_lot 0.11.2", "rand 0.7.3", "serde 1.0.136", ] @@ -1359,7 +1370,7 @@ dependencies = [ "log", "tokio", "tokio-stream", - "tokio-util", + "tokio-util 0.6.9", "unicase", ] @@ -1399,9 +1410,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.118" +version = "0.2.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06e509672465a0504304aa87f9f176f2b2b716ed8fb105ebe5c02dc6dce96a94" +checksum = "1bf2e165bb3457c8e098ea76f3e3bc9db55f87aa90d52d0e6be741470916aaa4" [[package]] name = "libloading" @@ -1511,8 +1522,10 @@ dependencies = [ "massa_bootstrap", "massa_consensus_exports", "massa_consensus_worker", - "massa_execution", + "massa_execution_exports", + "massa_execution_worker", "massa_graph", + "massa_ledger", "massa_logging", "massa_models", "massa_network", @@ -1520,12 +1533,13 @@ dependencies = [ "massa_protocol_exports", "massa_protocol_worker", "massa_time", + "parking_lot 0.12.0", "pretty_assertions", "serde 1.0.136", "serde_json", "serial_test", "tokio", - "tokio-util", + "tokio-util 0.6.9", "tracing", "tracing-subscriber", ] @@ -1560,7 +1574,7 @@ dependencies = [ "jsonrpc-derive", "jsonrpc-http-server", "massa_consensus_exports", - "massa_execution", + "massa_execution_exports", "massa_graph", "massa_hash", "massa_models", @@ -1583,9 +1597,9 @@ dependencies = [ "futures 0.3.21", "lazy_static", "massa_consensus_exports", - "massa_execution", "massa_graph", "massa_hash", + "massa_ledger", "massa_logging", "massa_models", "massa_network", @@ -1593,6 +1607,7 @@ dependencies = [ "massa_signature", "massa_time", "num_enum", + "parking_lot 0.12.0", "pretty_assertions", "rand 0.8.5", "serde 1.0.136", @@ -1611,7 +1626,7 @@ dependencies = [ "displaydoc", "futures 0.3.21", "lazy_static", - "massa_execution", + "massa_execution_exports", "massa_graph", "massa_hash", "massa_logging", @@ -1644,7 +1659,7 @@ dependencies = [ "futures 0.3.21", "lazy_static", "massa_consensus_exports", - "massa_execution", + "massa_execution_exports", "massa_graph", "massa_hash", "massa_logging", @@ -1669,18 +1684,37 @@ dependencies = [ ] [[package]] -name = "massa_execution" +name = "massa_execution_exports" +version = "0.1.0" +dependencies = [ + "anyhow", + "displaydoc", + "massa_ledger", + "massa_models", + "massa_time", + "pretty_assertions", + "serde 1.0.136", + "serde_json", + "serial_test", + "thiserror", + "tracing", +] + +[[package]] +name = "massa_execution_worker" version = "0.1.0" dependencies = [ "anyhow", "displaydoc", "lazy_static", "massa-sc-runtime", + "massa_execution_exports", "massa_hash", + "massa_ledger", "massa_models", "massa_signature", "massa_time", - "parking_lot", + "parking_lot 0.12.0", "pretty_assertions", "rand 0.8.5", "rand_xoshiro", @@ -1700,7 +1734,7 @@ dependencies = [ "bitvec", "displaydoc", "lazy_static", - "massa_execution", + "massa_execution_exports", "massa_hash", "massa_logging", "massa_models", @@ -1737,6 +1771,28 @@ dependencies = [ "thiserror", ] +[[package]] +name = "massa_ledger" +version = "0.1.0" +dependencies = [ + "displaydoc", + "futures 0.3.21", + "lazy_static", + "massa_hash", + "massa_logging", + "massa_models", + "massa_signature", + "massa_time", + "num", + "pretty_assertions", + "serde 1.0.136", + "serde_json", + "serial_test", + "thiserror", + "tokio", + "tracing", +] + [[package]] name = "massa_logging" version = "0.1.0" @@ -1994,9 +2050,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "ba272f85fa0b41fc91872be579b3bbe0f56b792aa361a380eb669469f68dafb2" dependencies = [ "libc", "log", @@ -2199,7 +2255,7 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d992b768490d7fe0d8586d9b5745f6c49f557da6d81dc982b1d167ad4edbb21" dependencies = [ - "proc-macro-crate 1.1.0", + "proc-macro-crate 1.1.2", "proc-macro2", "quote", "syn", @@ -2221,7 +2277,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40bec70ba014595f99f7aa110b84331ffe1ee9aece7fe6f387cc7e3ecda4d456" dependencies = [ "crc32fast", - "hashbrown", + "hashbrown 0.11.2", "indexmap", "memchr", ] @@ -2288,7 +2344,17 @@ checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ "instant", "lock_api", - "parking_lot_core", + "parking_lot_core 0.8.5", +] + +[[package]] +name = "parking_lot" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f5ec2493a61ac0506c0f4199f99070cbe83857b0337006a30f3e6719b8ef58" +dependencies = [ + "lock_api", + "parking_lot_core 0.9.1", ] [[package]] @@ -2305,6 +2371,19 @@ dependencies = [ "winapi", ] +[[package]] +name = "parking_lot_core" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28141e0cc4143da2443301914478dc976a61ffdb3f043058310c70df2fed8954" +dependencies = [ + "cfg-if 1.0.0", + "libc", + "redox_syscall", + "smallvec", + "windows-sys", +] + [[package]] name = "paw" version = "1.0.0" @@ -2448,9 +2527,9 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "1.1.0" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" +checksum = "9dada8c9981fcf32929c3c0f0cd796a9284aca335565227ed88c83babb1d43dc" dependencies = [ "thiserror", "toml", @@ -2680,7 +2759,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.5", ] [[package]] @@ -2812,7 +2891,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.5", "redox_syscall", ] @@ -2924,12 +3003,12 @@ checksum = "18eb52b6664d331053136fcac7e4883bdc6f5fc04a6aab3b0f75eafb80ab88b3" [[package]] name = "rkyv" -version = "0.7.31" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439655b8d657bcb28264da8e5380d55549e34ffc4149bea9e3521890a122a7bd" +checksum = "bf98e3e6c7ed44e474b454b1ebded3193ee5aba3428e29c55d59b1d65e49945e" dependencies = [ "bytecheck", - "hashbrown", + "hashbrown 0.12.0", "ptr_meta", "rend", "rkyv_derive", @@ -2938,9 +3017,9 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.31" +version = "0.7.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cded413ad606a80291ca84bedba137093807cf4f5b36be8c60f57a7e790d48f6" +checksum = "cc9940ec6a7c62b1d1f476f607c6caf0d7fbf74e43f77bc022143b878fcd3266" dependencies = [ "proc-macro2", "quote", @@ -2955,9 +3034,9 @@ checksum = "3e52c148ef37f8c375d49d5a73aa70713125b7f19095948a923f80afdeb22ec2" [[package]] name = "rust_decimal" -version = "1.21.0" +version = "1.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4214023b1223d02a4aad9f0bb9828317634a56530870a2eaf7200a99c0c10f68" +checksum = "d37baa70cf8662d2ba1c1868c5983dda16ef32b105cce41fb5c47e72936a90b3" dependencies = [ "arrayvec 0.7.2", "num-traits 0.2.14", @@ -3063,9 +3142,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0486718e92ec9a68fbed73bb5ef687d71103b142595b406835649bebd33f72c7" +checksum = "a4a3381e03edd24287172047536f20cabde766e2cd3e65e6b00fb3af51c4f38d" [[package]] name = "serde" @@ -3155,7 +3234,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0bccbcf40c8938196944a3da0e133e031a33f4d6b72db3bda3cc556e361905d" dependencies = [ "lazy_static", - "parking_lot", + "parking_lot 0.11.2", "serial_test_derive", ] @@ -3220,7 +3299,7 @@ dependencies = [ "fxhash", "libc", "log", - "parking_lot", + "parking_lot 0.11.2", ] [[package]] @@ -3424,9 +3503,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.16.1" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c27a64b625de6d309e8c57716ba93021dccf1b3b5c97edd6d3dd2d2135afc0a" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ "bytes", "libc", @@ -3434,9 +3513,10 @@ dependencies = [ "mio", "num_cpus", "once_cell", - "parking_lot", + "parking_lot 0.12.0", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", "tracing", "winapi", @@ -3498,6 +3578,20 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-util" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64910e1b9c1901aaf5375561e35b9c057d95ff41a44ede043a03e09279eabaf1" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "log", + "pin-project-lite", + "tokio", +] + [[package]] name = "toml" version = "0.5.8" @@ -3542,7 +3636,7 @@ dependencies = [ "prost-derive", "tokio", "tokio-stream", - "tokio-util", + "tokio-util 0.6.9", "tower", "tower-layer", "tower-service", @@ -3564,9 +3658,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5651b5f6860a99bd1adb59dbfe1db8beb433e73709d9032b413a77e2fb7c066a" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" dependencies = [ "futures-core", "futures-util", @@ -3576,8 +3670,7 @@ dependencies = [ "rand 0.8.5", "slab", "tokio", - "tokio-stream", - "tokio-util", + "tokio-util 0.7.0", "tower-layer", "tower-service", "tracing", @@ -3597,9 +3690,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d8d93354fe2a8e50d5953f5ae2e47a3fc2ef03292e7ea46e3cc38f549525fb9" +checksum = "f6c650a8ef0cd2dd93736f033d21cbd1224c5a967aa0c258d00fcf7dafef9b9f" dependencies = [ "cfg-if 1.0.0", "log", @@ -3652,9 +3745,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74786ce43333fcf51efe947aed9718fbe46d5c7328ec3f1029e818083966d9aa" +checksum = "9e0ab7bdc962035a87fba73f3acca9b8a8d0034c2e6f60b84aeaaddddc155dce" dependencies = [ "ansi_term", "sharded-slab", @@ -3864,9 +3957,9 @@ checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" [[package]] name = "wasmer" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c823abda8b80153c938b3c7c55fcc74e2adfe40592732f7be01309dbe4aa53b" +checksum = "d23f1bf05a2f86ad775dfe73b1da1842a58ffa549cd711c083cf56520c8025c4" dependencies = [ "cfg-if 1.0.0", "indexmap", @@ -3890,9 +3983,9 @@ dependencies = [ [[package]] name = "wasmer-compiler" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ef59c6661f8f739c7c56d85400711a6e8791a101e931902af87423280e3dd36" +checksum = "32158a07455808eccd655c3333d22a6a9eeb01c5891b750e6c7a66c43317b1fe" dependencies = [ "enumset", "loupe", @@ -3909,9 +4002,9 @@ dependencies = [ [[package]] name = "wasmer-compiler-cranelift" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd21b6306ec69096ac39e93418b2338e5def5445522f098c5b0a38863059492d" +checksum = "b2afe1d581016703af0a6ac37511661b5ee5ceceee7bc387afcd4035ff36c2fc" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -3930,9 +4023,9 @@ dependencies = [ [[package]] name = "wasmer-compiler-singlepass" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95c704e8c917e047e301862c76b93e2374631d3e6dc0b547c690f7ac38227344" +checksum = "e06040411d28aa7ba03fcf03971fbcd719b1020c5aa3eae2891587e892ef51ee" dependencies = [ "byteorder", "dynasm", @@ -3949,9 +4042,9 @@ dependencies = [ [[package]] name = "wasmer-derive" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5932932ef9e4f185dee5c99848ffafe98c5f49e0e92d2da14686761e38345c67" +checksum = "4ecae9427e1f0ae664256f6620924f3eabb0983bdb18e1c36d15c15ba6ee35c9" dependencies = [ "proc-macro-error", "proc-macro2", @@ -3961,9 +4054,9 @@ dependencies = [ [[package]] name = "wasmer-engine" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ed91396b15addd562d53fd92a016ba88c9b2ce4e0e0169a0e47cc67b739b37f" +checksum = "0823953c2775707cb0e151c2da97c4621193cf58308ee4f48b2df17d114c3d82" dependencies = [ "backtrace", "enumset", @@ -3983,9 +4076,9 @@ dependencies = [ [[package]] name = "wasmer-engine-dylib" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d9ecf7885c571a26141f37a362caa97796b8904f423dc5645031e149c9ee802" +checksum = "75db66afeacaf2d12a3d6af916410bc41459e01477f6628d637b77f5785f7c8c" dependencies = [ "cfg-if 1.0.0", "enum-iterator", @@ -4008,9 +4101,9 @@ dependencies = [ [[package]] name = "wasmer-engine-universal" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bc4c936ebc050946dced5a3c4ef2bfe0b937c6e91323642c8e04552a82dfd9f" +checksum = "12a18da6797a46b5b35043796fd5d3451d384226e4531888ff1966dcb029a8ea" dependencies = [ "cfg-if 1.0.0", "enum-iterator", @@ -4028,9 +4121,9 @@ dependencies = [ [[package]] name = "wasmer-middlewares" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc770d4c278498bbbabec4cb9db33362ce6349cc397f746f2c75b20b5448fe2f" +checksum = "09a98d5a2058c14e56f5f48f51cd14cf4c572223bb79fe5589f2a1ee820a4522" dependencies = [ "loupe", "wasmer", @@ -4040,9 +4133,9 @@ dependencies = [ [[package]] name = "wasmer-object" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5626938e5e062747b999493d3a36591d9ba2fb0b6fb8e6f474e3355440ff9a0" +checksum = "ffb88abea6a488ef64674c72c813564de7877a6e046d4b3f3e20c41302b82a10" dependencies = [ "object 0.28.3", "thiserror", @@ -4052,9 +4145,9 @@ dependencies = [ [[package]] name = "wasmer-types" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78e5c8a51b65b946c122cecacdd496b24cc070f5465bc07ce83c62f830835814" +checksum = "30bffc68539057bc92c10b260175d36acb90926840b175939e663eafdd4fbdb6" dependencies = [ "indexmap", "loupe", @@ -4065,9 +4158,9 @@ dependencies = [ [[package]] name = "wasmer-vm" -version = "2.2.0-rc1" +version = "2.2.0-rc2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91ad82e6b8e33a63c5215e11ea2f37e9a0b8a81606a41c922f26f61cb862e7f9" +checksum = "215cc368b140d2eb3d3ab826dd01c602dc337c2374ac5d48f5652bd7eeac0d05" dependencies = [ "backtrace", "cc", @@ -4155,6 +4248,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3df6e476185f92a12c072be4a189a0210dcdcf512a1891d6dff9edb874deadc6" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d8e92753b1c443191654ec532f14c199742964a061be25d77d7a96f09db20bf5" + +[[package]] +name = "windows_i686_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a711c68811799e017b6038e0922cb27a5e2f43a2ddb609fe0b6f3eeda9de615" + +[[package]] +name = "windows_i686_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c11bb1a02615db74680b32a68e2d61f553cc24c4eb5b4ca10311740e44172" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c912b12f7454c6620635bbff3450962753834be2a594819bd5e945af18ec64bc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "504a2476202769977a040c6364301a3f65d0cc9e3fb08600b2bda150a0488316" + [[package]] name = "winreg" version = "0.7.0" diff --git a/Cargo.toml b/Cargo.toml index c3186451ec2..ffb6a3b9cf4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,7 +5,8 @@ members = [ "massa-client", "massa-consensus-exports", "massa-consensus-worker", - "massa-execution", + "massa-execution-exports", + "massa-execution-worker", "massa-graph", "massa-hash", "massa-logging", @@ -19,6 +20,7 @@ members = [ "massa-signature", "massa-time", "massa-wallet", + "massa-ledger" ] resolver = "2" diff --git a/massa-api/Cargo.toml b/massa-api/Cargo.toml index a4913d947a7..ee3ebf327b7 100644 --- a/massa-api/Cargo.toml +++ b/massa-api/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1.15", features = ["full"] } tracing = "0.1" # custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_models = { path = "../massa-models" } @@ -27,4 +27,4 @@ massa_signature = { path = "../massa-signature" } massa_time = { path = "../massa-time" } [features] -instrument = ["tokio/tracing", "massa_consensus_exports/instrument", "massa_execution/instrument", "massa_graph/instrument", "massa_models/instrument", "massa_network/instrument", "massa_pool/instrument", "massa_protocol_exports/instrument", "massa_time/instrument"] +instrument = ["tokio/tracing", "massa_consensus_exports/instrument", "massa_graph/instrument", "massa_models/instrument", "massa_network/instrument", "massa_pool/instrument", "massa_protocol_exports/instrument", "massa_time/instrument"] diff --git a/massa-api/src/error.rs b/massa-api/src/error.rs index 9c982d87b3d..f999f1cab57 100644 --- a/massa-api/src/error.rs +++ b/massa-api/src/error.rs @@ -2,7 +2,7 @@ use displaydoc::Display; use massa_consensus_exports::error::ConsensusError; -use massa_execution::ExecutionError; +use massa_execution_exports::ExecutionError; use massa_hash::MassaHashError; use massa_models::ModelsError; use massa_network::NetworkError; diff --git a/massa-api/src/lib.rs b/massa-api/src/lib.rs index 0ffcfaa3be5..b2083937432 100644 --- a/massa-api/src/lib.rs +++ b/massa-api/src/lib.rs @@ -8,7 +8,7 @@ use jsonrpc_core::{BoxFuture, IoHandler, Value}; use jsonrpc_derive::rpc; use jsonrpc_http_server::{CloseHandle, ServerBuilder}; use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; -use massa_execution::ExecutionCommandSender; +use massa_execution_exports::ExecutionController; use massa_models::api::{ APISettings, AddressInfo, BlockInfo, BlockSummary, EndorsementInfo, EventFilter, NodeStatus, OperationInfo, ReadOnlyExecution, TimeInterval, @@ -36,7 +36,7 @@ mod public; pub struct Public { pub consensus_command_sender: ConsensusCommandSender, - pub execution_command_sender: ExecutionCommandSender, + pub execution_controller: Box, pub pool_command_sender: PoolCommandSender, pub consensus_config: ConsensusConfig, pub api_settings: &'static APISettings, @@ -50,7 +50,7 @@ pub struct Public { pub struct Private { pub consensus_command_sender: ConsensusCommandSender, pub network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + pub execution_controller: Box, pub consensus_config: ConsensusConfig, pub api_settings: &'static APISettings, pub stop_node_channel: mpsc::Sender<()>, @@ -116,8 +116,8 @@ pub trait Endpoints { #[rpc(name = "execute_read_only_request")] fn execute_read_only_request( &self, - _: ReadOnlyExecution, - ) -> BoxFuture>; + _: Vec, + ) -> BoxFuture, ApiError>>; /// Remove a vec of addresses used to stake. /// No confirmation to expect. diff --git a/massa-api/src/private.rs b/massa-api/src/private.rs index 13ec0987ff9..f316a568d51 100644 --- a/massa-api/src/private.rs +++ b/massa-api/src/private.rs @@ -5,7 +5,7 @@ use crate::{Endpoints, Private, RpcServer, StopHandle, API}; use jsonrpc_core::BoxFuture; use jsonrpc_http_server::tokio::sync::mpsc; use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; -use massa_execution::ExecutionCommandSender; +use massa_execution_exports::ExecutionController; use massa_models::api::{ APISettings, AddressInfo, BlockInfo, BlockSummary, EndorsementInfo, EventFilter, NodeStatus, OperationInfo, ReadOnlyExecution, TimeInterval, @@ -24,7 +24,7 @@ impl API { pub fn new( consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + execution_controller: Box, api_settings: &'static APISettings, consensus_settings: ConsensusConfig, ) -> (Self, mpsc::Receiver<()>) { @@ -33,7 +33,7 @@ impl API { API(Private { consensus_command_sender, network_command_sender, - execution_command_sender, + execution_controller, consensus_config: consensus_settings, api_settings, stop_node_channel, @@ -76,20 +76,9 @@ impl Endpoints for API { fn execute_read_only_request( &self, - ReadOnlyExecution { - max_gas, - simulated_gas_price, - bytecode, - address, - }: ReadOnlyExecution, - ) -> BoxFuture> { - let cmd_sender = self.0.execution_command_sender.clone(); - let closure = async move || { - Ok(cmd_sender - .execute_read_only_request(max_gas, simulated_gas_price, bytecode, address) - .await?) - }; - Box::pin(closure()) + _: Vec, + ) -> BoxFuture, ApiError>> { + crate::wrong_api::>() } fn remove_staking_addresses(&self, keys: Vec
) -> BoxFuture> { diff --git a/massa-api/src/public.rs b/massa-api/src/public.rs index 62560fa85de..5aebdbba705 100644 --- a/massa-api/src/public.rs +++ b/massa-api/src/public.rs @@ -5,8 +5,12 @@ use crate::{Endpoints, Public, RpcServer, StopHandle, API}; use futures::{stream::FuturesUnordered, StreamExt}; use jsonrpc_core::BoxFuture; use massa_consensus_exports::{ConsensusCommandSender, ConsensusConfig}; -use massa_execution::ExecutionCommandSender; +use massa_execution_exports::{ + ExecutionController, ExecutionStackElement, ReadOnlyExecutionRequest, +}; use massa_graph::{DiscardReason, ExportBlockStatus}; +use massa_models::api::SCELedgerInfo; +use massa_models::execution::ReadOnlyResult; use massa_models::{ api::{ APISettings, AddressInfo, BlockInfo, BlockInfoContent, BlockSummary, EndorsementInfo, @@ -23,14 +27,14 @@ use massa_models::{ }; use massa_network::{NetworkCommandSender, NetworkSettings}; use massa_pool::PoolCommandSender; -use massa_signature::PrivateKey; +use massa_signature::{derive_public_key, generate_random_private_key, PrivateKey}; use massa_time::MassaTime; use std::net::{IpAddr, SocketAddr}; impl API { pub fn new( consensus_command_sender: ConsensusCommandSender, - execution_command_sender: ExecutionCommandSender, + execution_controller: Box, api_settings: &'static APISettings, consensus_settings: ConsensusConfig, pool_command_sender: PoolCommandSender, @@ -50,7 +54,7 @@ impl API { network_command_sender, compensation_millis, node_id, - execution_command_sender, + execution_controller, }) } } @@ -77,9 +81,63 @@ impl Endpoints for API { fn execute_read_only_request( &self, - _: ReadOnlyExecution, - ) -> BoxFuture> { - crate::wrong_api::() + reqs: Vec, + ) -> BoxFuture, ApiError>> { + if reqs.len() > self.0.api_settings.max_arguments as usize { + let closure = + async move || Err(ApiError::TooManyArguments("too many arguments".into())); + return Box::pin(closure()); + } + + let mut res: Vec = Vec::with_capacity(reqs.len()); + for ReadOnlyExecution { + max_gas, + address, + simulated_gas_price, + bytecode, + } in reqs + { + let address = address.unwrap_or_else(|| { + // if no addr provided, use a random one + Address::from_public_key(&derive_public_key(&generate_random_private_key())) + }); + + // TODO: + // * set a maximum gas value for read-only executions to prevent attacks + // * stop mapping request and result, reuse execution's structures + // * remove async stuff + + // translate request + let req = ReadOnlyExecutionRequest { + max_gas, + simulated_gas_price, + bytecode, + call_stack: vec![ExecutionStackElement { + address, + coins: Default::default(), + owned_addresses: vec![address], + }], + }; + + // run + let result = self.0.execution_controller.execute_readonly_request(req); + + // map result + let result = ExecuteReadOnlyResponse { + executed_at: result.as_ref().map_or_else(|_| Slot::new(0, 0), |v| v.slot), + result: result.as_ref().map_or_else( + |err| ReadOnlyResult::Error(format!("readonly call failed: {}", err)), + |_| ReadOnlyResult::Ok, + ), + output_events: result.map_or_else(|_| Default::default(), |v| v.events.export()), + }; + + res.push(result); + } + + // return result + let closure = async move || Ok(res); + Box::pin(closure()) } fn remove_staking_addresses(&self, _: Vec
) -> BoxFuture> { @@ -356,11 +414,37 @@ impl Endpoints for API { let api_cfg = self.0.api_settings; let pool_command_sender = self.0.pool_command_sender.clone(); let compensation_millis = self.0.compensation_millis; - let sce_command_sender = self.0.execution_command_sender.clone(); - let closure = async move || { - if addresses.len() as u64 > api_cfg.max_arguments { - return Err(ApiError::TooManyArguments("too many arguments".into())); + + // todo make better use of SCE ledger info + + // map SCE ledger info and check for address length + let sce_ledger_info = if addresses.len() as u64 > api_cfg.max_arguments { + Err(ApiError::TooManyArguments("too many arguments".into())) + } else { + // get SCE ledger info + let mut sce_ledger_info: Map = + Map::with_capacity_and_hasher(addresses.len(), BuildMap::default()); + for addr in &addresses { + let active_entry = match self + .0 + .execution_controller + .get_final_and_active_ledger_entry(addr) + .1 + { + None => continue, + Some(v) => SCELedgerInfo { + balance: v.parallel_balance, + module: Some(v.bytecode), + datastore: v.datastore.into_iter().collect(), + }, + }; + sce_ledger_info.insert(*addr, active_entry); } + Ok(sce_ledger_info) + }; + + let closure = async move || { + let sce_ledger_info = sce_ledger_info?; let mut res = Vec::with_capacity(addresses.len()); @@ -383,11 +467,10 @@ impl Endpoints for API { // roll and balance info let states = cmd_sender.get_addresses_info(addresses.iter().copied().collect()); - let sce_info = sce_command_sender.get_sce_ledger_for_addresses(addresses.clone()); // wait for both simultaneously - let (next_draws, states, sce_info) = tokio::join!(next_draws, states, sce_info); - let (next_draws, mut states, sce_info) = (next_draws?, states?, sce_info?); + let (next_draws, states) = tokio::join!(next_draws, states); + let (next_draws, mut states) = (next_draws?, states?); // operations block and endorsement info let mut operations: Map> = @@ -469,7 +552,7 @@ impl Endpoints for API { .remove(&address) .ok_or(ApiError::NotFound)?, production_stats: state.production_stats, - sce_ledger_info: sce_info.get(&address).cloned().unwrap_or_default(), + sce_ledger_info: sce_ledger_info.get(&address).cloned().unwrap_or_default(), }) } Ok(res) @@ -514,18 +597,17 @@ impl Endpoints for API { original_operation_id, }: EventFilter, ) -> BoxFuture, ApiError>> { - let execution_command_sender = self.0.execution_command_sender.clone(); - let closure = async move || { - Ok(execution_command_sender - .get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - ) - .await?) - }; + // get events + let events = self.0.execution_controller.get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ); + + // TODO get rid of the async part + let closure = async move || Ok(events); Box::pin(closure()) } } diff --git a/massa-bootstrap/Cargo.toml b/massa-bootstrap/Cargo.toml index 2b3f14e85b0..7b5a818911f 100644 --- a/massa-bootstrap/Cargo.toml +++ b/massa-bootstrap/Cargo.toml @@ -15,11 +15,12 @@ rand = "0.8" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" -tokio = { version = "1.15", features = ["full"] } -tracing = "0.1" -# custom modules +parking_lot = "0.12" +tokio = { version = "1.11", features = ["full"] } +tracing = "0.1"# custom modules massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_execution = { path = "../massa-execution" } +massa_ledger = { path = "../massa-ledger" } +# custom modules massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } @@ -33,6 +34,8 @@ massa_time = { path = "../massa-time" } bitvec = { version = "0.22", features = ["serde"] } pretty_assertions = "1.0" serial_test = "0.5" +massa_ledger = { path = "../massa-ledger", features=["testing"] } + [features] -instrument = ["tokio/tracing", "massa_consensus_exports/instrument", "massa_execution/instrument", "massa_graph/instrument", "massa_models/instrument", "massa_network/instrument", "massa_proof_of_stake_exports/instrument", "massa_time/instrument"] +instrument = ["tokio/tracing", "massa_consensus_exports/instrument", "massa_graph/instrument", "massa_models/instrument", "massa_network/instrument", "massa_proof_of_stake_exports/instrument", "massa_time/instrument"] diff --git a/massa-bootstrap/src/error.rs b/massa-bootstrap/src/error.rs index 3c4407a69c1..d65d984899a 100644 --- a/massa-bootstrap/src/error.rs +++ b/massa-bootstrap/src/error.rs @@ -3,8 +3,8 @@ use crate::messages::BootstrapMessage; use displaydoc::Display; use massa_consensus_exports::error::ConsensusError; -use massa_execution::ExecutionError; use massa_hash::MassaHashError; +use massa_ledger::LedgerError; use massa_network::NetworkError; use massa_time::TimeError; use thiserror::Error; @@ -30,8 +30,8 @@ pub enum BootstrapError { ConsensusError(#[from] ConsensusError), /// network error: {0} NetworkError(#[from] NetworkError), - /// execution error: {0} - ExecutionError(#[from] ExecutionError), + /// ledger error: {0} + LedgerError(#[from] LedgerError), /// join error: {0} JoinError(#[from] tokio::task::JoinError), /// missing private key file diff --git a/massa-bootstrap/src/lib.rs b/massa-bootstrap/src/lib.rs index d1d95c5f1ee..9e60ccd7e14 100644 --- a/massa-bootstrap/src/lib.rs +++ b/massa-bootstrap/src/lib.rs @@ -9,8 +9,8 @@ use error::BootstrapError; pub use establisher::Establisher; use futures::{stream::FuturesUnordered, StreamExt}; use massa_consensus_exports::ConsensusCommandSender; -use massa_execution::{BootstrapExecutionState, ExecutionCommandSender}; use massa_graph::BootstrapableGraph; +use massa_ledger::{FinalLedger, FinalLedgerBootstrapState}; use massa_logging::massa_trace; use massa_models::Version; use massa_network::{BootstrapPeers, NetworkCommandSender}; @@ -18,10 +18,12 @@ use massa_proof_of_stake_exports::ExportProofOfStake; use massa_signature::{PrivateKey, PublicKey}; use massa_time::MassaTime; use messages::BootstrapMessage; +use parking_lot::RwLock; use rand::{prelude::SliceRandom, rngs::StdRng, SeedableRng}; use settings::BootstrapSettings; use std::collections::{hash_map, HashMap}; use std::net::SocketAddr; +use std::sync::Arc; use std::{convert::TryInto, net::IpAddr}; use tokio::time::Instant; use tokio::{sync::mpsc, task::JoinHandle, time::sleep}; @@ -49,8 +51,8 @@ pub struct GlobalBootstrapState { /// list of network peers pub peers: Option, - /// state of the execution state - pub execution: Option, + /// state of the final ledger + pub final_ledger: Option, } /// Gets the state from a bootstrap server (internal private function) @@ -181,18 +183,18 @@ async fn get_state_internal( Ok(Ok(msg)) => return Err(BootstrapError::UnexpectedMessage(msg)), }; - // Fourth, get execution state + // Fourth, get final ledger // client.next() is not cancel-safe but we drop the whole client object if cancelled => it's OK - let execution = match tokio::time::timeout(cfg.read_timeout.into(), client.next()).await { + let final_ledger = match tokio::time::timeout(cfg.read_timeout.into(), client.next()).await { Err(_) => { return Err(std::io::Error::new( std::io::ErrorKind::TimedOut, - "bootstrap state read timed out", + "final ledger bootstrap state read timed out", ) .into()) } Ok(Err(e)) => return Err(e), - Ok(Ok(BootstrapMessage::ExecutionState { execution_state })) => execution_state, + Ok(Ok(BootstrapMessage::FinalLedgerState { ledger_state })) => ledger_state, Ok(Ok(msg)) => return Err(BootstrapError::UnexpectedMessage(msg)), }; @@ -203,7 +205,7 @@ async fn get_state_internal( graph: Some(graph), compensation_millis, peers: Some(peers), - execution: Some(execution), + final_ledger: Some(final_ledger), }) } @@ -274,7 +276,7 @@ impl BootstrapManager { pub async fn start_bootstrap_server( consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + final_ledger: Arc>, bootstrap_settings: &'static BootstrapSettings, establisher: Establisher, private_key: PrivateKey, @@ -288,7 +290,7 @@ pub async fn start_bootstrap_server( BootstrapServer { consensus_command_sender, network_command_sender, - execution_command_sender, + final_ledger, establisher, manager_rx, bind, @@ -313,7 +315,7 @@ pub async fn start_bootstrap_server( struct BootstrapServer { consensus_command_sender: ConsensusCommandSender, network_command_sender: NetworkCommandSender, - execution_command_sender: ExecutionCommandSender, + final_ledger: Arc>, establisher: Establisher, manager_rx: mpsc::Receiver<()>, bind: SocketAddr, @@ -335,7 +337,7 @@ impl BootstrapServer { ExportProofOfStake, BootstrapableGraph, BootstrapPeers, - BootstrapExecutionState, + FinalLedgerBootstrapState, )> = None; let cache_timer = sleep(cache_timeout); let per_ip_min_interval = self.bootstrap_settings.per_ip_min_interval.to_duration(); @@ -409,14 +411,10 @@ impl BootstrapServer { // This is done to ensure that the execution bootstrap state is older than the consensus state. // If the consensus state snapshot is older than the execution state snapshot, // the execution final ledger will be in the future after bootstrap, which causes an inconsistency. - let get_peers = self.network_command_sender.get_bootstrap_peers(); - let get_pos_graph = self.consensus_command_sender.get_bootstrap_state(); - let execution_state = self.execution_command_sender.get_bootstrap_state(); - let (res_peers, res_execution) = tokio::join!(get_peers, execution_state); - let peer_boot = res_peers?; - let execution_state = res_execution?; - let (pos_boot, graph_boot) = get_pos_graph.await?; - bootstrap_data = Some((pos_boot, graph_boot, peer_boot, execution_state)); + let peer_boot = self.network_command_sender.get_bootstrap_peers().await?; + let res_ledger = self.final_ledger.read().get_bootstrap_state(); + let (pos_boot, graph_boot) = self.consensus_command_sender.get_bootstrap_state().await?; + bootstrap_data = Some((pos_boot, graph_boot, peer_boot, res_ledger)); cache_timer.set(sleep(cache_timeout)); } massa_trace!("bootstrap.lib.run.select.accept.cache_available", {}); @@ -451,7 +449,7 @@ async fn manage_bootstrap( data_pos: ExportProofOfStake, data_graph: BootstrapableGraph, data_peers: BootstrapPeers, - data_execution: BootstrapExecutionState, + ledger_state: FinalLedgerBootstrapState, private_key: PrivateKey, compensation_millis: i64, version: Version, @@ -501,13 +499,11 @@ async fn manage_bootstrap( ) .await?; - // Fourth, send execution state + // Fourth, send ledger state send_state_timeout( write_timeout, - server.send(messages::BootstrapMessage::ExecutionState { - execution_state: data_execution, - }), - "bootstrap execution state send timed out", + server.send(messages::BootstrapMessage::FinalLedgerState { ledger_state }), + "bootstrap ledger state send timed out", ) .await } diff --git a/massa-bootstrap/src/messages.rs b/massa-bootstrap/src/messages.rs index cbfe7585d30..07a62a4b8e1 100644 --- a/massa-bootstrap/src/messages.rs +++ b/massa-bootstrap/src/messages.rs @@ -1,7 +1,7 @@ // Copyright (c) 2022 MASSA LABS -use massa_execution::BootstrapExecutionState; use massa_graph::BootstrapableGraph; +use massa_ledger::FinalLedgerBootstrapState; use massa_models::{ DeserializeCompact, DeserializeVarInt, ModelsError, SerializeCompact, SerializeVarInt, Version, }; @@ -34,9 +34,9 @@ pub enum BootstrapMessage { graph: BootstrapableGraph, }, /// Execution state - ExecutionState { - /// execution state - execution_state: BootstrapExecutionState, + FinalLedgerState { + /// ledger state + ledger_state: FinalLedgerBootstrapState, }, } @@ -46,7 +46,7 @@ enum MessageTypeId { BootstrapTime = 0u32, Peers = 1u32, ConsensusState = 2u32, - ExecutionState = 3u32, + FinalLedgerState = 3u32, } impl SerializeCompact for BootstrapMessage { @@ -70,9 +70,9 @@ impl SerializeCompact for BootstrapMessage { res.extend(&pos.to_bytes_compact()?); res.extend(&graph.to_bytes_compact()?); } - BootstrapMessage::ExecutionState { execution_state } => { - res.extend(u32::from(MessageTypeId::ExecutionState).to_varint_bytes()); - res.extend(&execution_state.to_bytes_compact()?); + BootstrapMessage::FinalLedgerState { ledger_state } => { + res.extend(u32::from(MessageTypeId::FinalLedgerState).to_varint_bytes()); + res.extend(&ledger_state.to_bytes_compact()?); } } Ok(res) @@ -116,12 +116,12 @@ impl DeserializeCompact for BootstrapMessage { BootstrapMessage::ConsensusState { pos, graph } } - MessageTypeId::ExecutionState => { - let (execution_state, delta) = - BootstrapExecutionState::from_bytes_compact(&buffer[cursor..])?; + MessageTypeId::FinalLedgerState => { + let (ledger_state, delta) = + FinalLedgerBootstrapState::from_bytes_compact(&buffer[cursor..])?; cursor += delta; - BootstrapMessage::ExecutionState { execution_state } + BootstrapMessage::FinalLedgerState { ledger_state } } }; Ok((res, cursor)) diff --git a/massa-bootstrap/src/tests/scenarios.rs b/massa-bootstrap/src/tests/scenarios.rs index 6502139468f..c153daba294 100644 --- a/massa-bootstrap/src/tests/scenarios.rs +++ b/massa-bootstrap/src/tests/scenarios.rs @@ -3,28 +3,26 @@ use super::{ mock_establisher, tools::{ - bridge_mock_streams, get_boot_state, get_keys, get_peers, wait_consensus_command, - wait_network_command, + bridge_mock_streams, get_boot_state, get_keys, get_peers, + get_random_ledger_bootstrap_state, wait_consensus_command, wait_network_command, }, }; +use crate::BootstrapSettings; use crate::{ get_state, start_bootstrap_server, tests::tools::{ assert_eq_bootstrap_graph, assert_eq_thread_cycle_states, get_bootstrap_config, }, }; -use crate::{ - tests::tools::{assert_eq_exec, get_execution_state, wait_execution_command}, - BootstrapSettings, -}; use massa_consensus_exports::{commands::ConsensusCommand, ConsensusCommandSender}; -use massa_execution::{ExecutionCommand, ExecutionCommandSender}; +use massa_ledger::{test_exports::assert_eq_ledger_bootstrap_state, FinalLedger}; use massa_models::Version; use massa_network::{NetworkCommand, NetworkCommandSender}; use massa_signature::PrivateKey; use massa_time::MassaTime; +use parking_lot::RwLock; use serial_test::serial; -use std::str::FromStr; +use std::{str::FromStr, sync::Arc}; use tokio::sync::mpsc; lazy_static::lazy_static! { @@ -42,13 +40,17 @@ async fn test_bootstrap_server() { let (consensus_cmd_tx, mut consensus_cmd_rx) = mpsc::channel::(5); let (network_cmd_tx, mut network_cmd_rx) = mpsc::channel::(5); - let (execution_cmd_tx, mut execution_cmd_rx) = mpsc::channel::(5); + let ledger_bootstrap_state = get_random_ledger_bootstrap_state(2); + let final_ledger = Arc::new(RwLock::new(FinalLedger::from_bootstrap_state( + Default::default(), + ledger_bootstrap_state.clone(), + ))); let (bootstrap_establisher, bootstrap_interface) = mock_establisher::new(); let bootstrap_manager = start_bootstrap_server( ConsensusCommandSender(consensus_cmd_tx), NetworkCommandSender(network_cmd_tx), - ExecutionCommandSender(execution_cmd_tx), + final_ledger, bootstrap_settings, bootstrap_establisher, *private_key, @@ -104,7 +106,7 @@ async fn test_bootstrap_server() { bridge_mock_streams(remote_rw, bootstrap_rw).await; }); - // peers and execution are asked simultaneously + // intercept peers being asked let wait_peers = async move || { // wait for bootstrap to ask network for peers, send them let response = match wait_network_command(&mut network_cmd_rx, 1000.into(), |cmd| match cmd @@ -122,25 +124,10 @@ async fn test_bootstrap_server() { sent_peers }; - let wait_execution = async move || { - // wait for bootstrap to ask execution for bootstrap state, send it - let response = - match wait_execution_command(&mut execution_cmd_rx, 1000.into(), |cmd| match cmd { - ExecutionCommand::GetBootstrapState(resp) => Some(resp), - _ => None, - }) - .await - { - Some(resp) => resp, - None => panic!("timeout waiting for get boot execution command"), - }; - let sent_execution_state = get_execution_state(); - response.send(sent_execution_state.clone()).unwrap(); - sent_execution_state - }; + // wait for peers + let sent_peers = wait_peers().await; - // wait for peers and execution at the same time - let (sent_peers, sent_execution_state) = tokio::join!(wait_peers(), wait_execution()); + // here the ledger is queried directly. We don't intercept this // wait for bootstrap to ask consensus for bootstrap graph, send it let response = match wait_consensus_command(&mut consensus_cmd_rx, 1000.into(), |cmd| match cmd @@ -166,10 +153,6 @@ async fn test_bootstrap_server() { // wait for bridge bridge.await.expect("bridge join failed"); - // check states - assert_eq_thread_cycle_states(&sent_pos, &bootstrap_res.pos.unwrap()); - assert_eq_bootstrap_graph(&sent_graph, &bootstrap_res.graph.unwrap()); - // check peers assert_eq!( sent_peers.0, @@ -177,8 +160,15 @@ async fn test_bootstrap_server() { "mismatch between sent and received peers" ); - // check execution - assert_eq_exec(&sent_execution_state, &bootstrap_res.execution.unwrap()); + // check ledger + assert_eq_ledger_bootstrap_state( + &ledger_bootstrap_state, + &bootstrap_res.final_ledger.unwrap(), + ); + + // check states + assert_eq_thread_cycle_states(&sent_pos, &bootstrap_res.pos.unwrap()); + assert_eq_bootstrap_graph(&sent_graph, &bootstrap_res.graph.unwrap()); // stop bootstrap server bootstrap_manager diff --git a/massa-bootstrap/src/tests/tools.rs b/massa-bootstrap/src/tests/tools.rs index d45c2b09ccb..afa62a101d4 100644 --- a/massa-bootstrap/src/tests/tools.rs +++ b/massa-bootstrap/src/tests/tools.rs @@ -1,15 +1,14 @@ // Copyright (c) 2022 MASSA LABS -use crate::settings::BootstrapSettings; - use super::mock_establisher::Duplex; +use crate::settings::BootstrapSettings; use bitvec::prelude::*; use massa_consensus_exports::commands::ConsensusCommand; -use massa_execution::{BootstrapExecutionState, ExecutionCommand, SCELedger, SCELedgerEntry}; use massa_graph::{ export_active_block::ExportActiveBlock, ledger::LedgerSubset, BootstrapableGraph, }; use massa_hash::hash::Hash; +use massa_ledger::{test_exports::make_bootstrap_state, FinalLedgerBootstrapState, LedgerEntry}; use massa_models::{ clique::Clique, ledger_models::{LedgerChange, LedgerChanges, LedgerData}, @@ -23,14 +22,59 @@ use massa_signature::{ derive_public_key, generate_random_private_key, sign, PrivateKey, PublicKey, Signature, }; use massa_time::MassaTime; -use std::net::{IpAddr, Ipv4Addr, SocketAddr}; +use rand::Rng; use std::str::FromStr; +use std::{ + collections::BTreeMap, + net::{IpAddr, Ipv4Addr, SocketAddr}, +}; use tokio::io::AsyncReadExt; use tokio::io::AsyncWriteExt; use tokio::{sync::mpsc::Receiver, time::sleep}; pub const BASE_BOOTSTRAP_IP: IpAddr = IpAddr::V4(Ipv4Addr::new(169, 202, 0, 10)); +/// generates a small random number of bytes +fn get_some_random_bytes() -> Vec { + let mut rng = rand::thread_rng(); + (0usize..rng.gen_range(0..10)) + .map(|_| rand::random::()) + .collect() +} + +/// generates a random ledger entry +fn get_random_ledger_entry() -> LedgerEntry { + let mut rng = rand::thread_rng(); + let parallel_balance = Amount::from_raw(rng.gen::()); + let bytecode: Vec = get_some_random_bytes(); + let mut datastore = BTreeMap::new(); + for _ in 0usize..rng.gen_range(0..10) { + let key = Hash::compute_from(&get_some_random_bytes()); + let value = get_some_random_bytes(); + datastore.insert(key, value); + } + LedgerEntry { + parallel_balance, + bytecode, + datastore, + } +} + +/// generates a rendom bootstrap state for a final ledger +pub fn get_random_ledger_bootstrap_state(thread_count: u8) -> FinalLedgerBootstrapState { + let mut rng = rand::thread_rng(); + + let mut sorted_ledger = BTreeMap::new(); + for _ in 0usize..rng.gen_range(0..10) { + sorted_ledger.insert(get_random_address(), get_random_ledger_entry()); + } + + make_bootstrap_state( + Slot::new(rng.gen::(), rng.gen_range(0..thread_count)), + sorted_ledger, + ) +} + pub fn get_dummy_block_id(s: &str) -> BlockId { BlockId(Hash::compute_from(s.as_bytes())) } @@ -116,27 +160,6 @@ where } } -pub async fn wait_execution_command( - execution_command_receiver: &mut Receiver, - timeout: MassaTime, - filter_map: F, -) -> Option -where - F: Fn(ExecutionCommand) -> Option, -{ - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd = execution_command_receiver.recv() => match cmd { - Some(orig_evt) => if let Some(res_evt) = filter_map(orig_evt) { return Some(res_evt); }, - _ => panic!("execution event channel died") - }, - _ = &mut timer => return None - } - } -} - pub async fn wait_network_command( network_command_receiver: &mut Receiver, timeout: MassaTime, @@ -292,96 +315,6 @@ pub fn assert_eq_bootstrap_graph(v1: &BootstrapableGraph, v2: &BootstrapableGrap } } -/// generates a sample BootstrapExecutionState with a few ledger entries: -/// -/// * final_slot: (period 14, thread 1) -/// * final_ledger: -/// * (random address 1): -/// * balance: 129 -/// * opt_module: None -/// * data: -/// * hash(bytes("key_testA")): bytes("test1_data") -/// * hash(bytes("key_testB")): bytes("test2_data") -/// * hash(bytes("key_testC")): bytes("test3_data") -/// * (random address 2): -/// * balance: 878 -/// * opt_module: Some(bytes("bytecodebytecode")) -/// * data: -/// * hash(bytes("key_testD")): bytes("test4_data") -/// * hash(bytes("key_testE")): bytes("test5_data") -pub fn get_execution_state() -> BootstrapExecutionState { - BootstrapExecutionState { - final_slot: Slot::new(14, 1), - final_ledger: SCELedger( - vec![ - ( - get_random_address(), - SCELedgerEntry { - balance: Amount::from_str("129").unwrap(), - opt_module: None, - data: vec![ - ( - massa_hash::hash::Hash::compute_from("key_testA".as_bytes()), - "test1_data".into(), - ), - ( - massa_hash::hash::Hash::compute_from("key_testB".as_bytes()), - "test2_data".into(), - ), - ( - massa_hash::hash::Hash::compute_from("key_testC".as_bytes()), - "test3_data".into(), - ), - ] - .into_iter() - .collect(), - }, - ), - ( - get_random_address(), - SCELedgerEntry { - balance: Amount::from_str("878").unwrap(), - opt_module: Some("bytecodebytecode".into()), - data: vec![ - ( - massa_hash::hash::Hash::compute_from("key_testD".as_bytes()), - "test4_data".into(), - ), - ( - massa_hash::hash::Hash::compute_from("key_testE".as_bytes()), - "test5_data".into(), - ), - ] - .into_iter() - .collect(), - }, - ), - ] - .into_iter() - .collect(), - ), - } -} - -pub fn assert_eq_exec(v1: &BootstrapExecutionState, v2: &BootstrapExecutionState) { - assert_eq!(v1.final_slot, v2.final_slot, "final slot mismatch"); - assert_eq!( - v1.final_ledger.0.len(), - v2.final_ledger.0.len(), - "ledger len mismatch" - ); - for k in v1.final_ledger.0.keys() { - let itm1 = v1.final_ledger.0.get(k).unwrap(); - let itm2 = v2.final_ledger.0.get(k).expect("ledger key mismatch"); - assert_eq!(itm1.balance, itm2.balance, "ledger balance mismatch"); - assert_eq!( - itm1.opt_module, itm2.opt_module, - "ledger opt_module mismatch" - ); - assert_eq!(itm1.data, itm2.data, "ledger data mismatch"); - } -} - pub fn get_boot_state() -> (ExportProofOfStake, BootstrapableGraph) { let private_key = generate_random_private_key(); let public_key = derive_public_key(&private_key); diff --git a/massa-client/src/cmds.rs b/massa-client/src/cmds.rs index 2505290e465..39be42bc8d6 100644 --- a/massa-client/src/cmds.rs +++ b/massa-client/src/cmds.rs @@ -758,7 +758,7 @@ impl Command { }; let bytecode = get_file_as_byte_vec(&path).await?; match client - .private + .public .execute_read_only_request(ReadOnlyExecution { max_gas, simulated_gas_price, diff --git a/massa-client/src/rpc.rs b/massa-client/src/rpc.rs index 1bb3eade793..6071a30bb79 100644 --- a/massa-client/src/rpc.rs +++ b/massa-client/src/rpc.rs @@ -117,12 +117,14 @@ impl RpcClient { &self, read_only_execution: ReadOnlyExecution, ) -> RpcResult { - self.call_method( + self.call_method::>, Vec>( "execute_read_only_request", - "ExecuteReadOnlyResponse", - read_only_execution, + "Vec", + vec![vec![read_only_execution]], ) - .await + .await? + .pop() + .ok_or_else(|| RpcError::Client("missing return value on execute_read_only_request".into())) } //////////////// diff --git a/massa-consensus-exports/Cargo.toml b/massa-consensus-exports/Cargo.toml index 20406caeb0c..a13d3dda362 100644 --- a/massa-consensus-exports/Cargo.toml +++ b/massa-consensus-exports/Cargo.toml @@ -22,7 +22,7 @@ tokio = { version = "1.15", features = ["full"] } tracing = "0.1" tempfile = "3.2" # custom modules -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } @@ -39,6 +39,6 @@ serial_test = "0.5" massa_models = { path = "../massa-models", features = ["testing"] } [features] -instrument = ["tokio/tracing", "massa_execution/instrument", "massa_graph/instrument", "massa_models/instrument", "massa_pool/instrument", "massa_proof_of_stake_exports/instrument", "massa_protocol_exports/instrument", "massa_time/instrument"] +instrument = ["tokio/tracing", "massa_graph/instrument", "massa_models/instrument", "massa_pool/instrument", "massa_proof_of_stake_exports/instrument", "massa_protocol_exports/instrument", "massa_time/instrument"] sandbox = [] testing = [] diff --git a/massa-consensus-exports/src/consensus_controller.rs b/massa-consensus-exports/src/consensus_controller.rs index 8e261a5ad1d..2e155ed6ce1 100644 --- a/massa-consensus-exports/src/consensus_controller.rs +++ b/massa-consensus-exports/src/consensus_controller.rs @@ -1,6 +1,4 @@ // Copyright (c) 2022 MASSA LABS -use massa_execution::ExecutionEventReceiver; - use massa_graph::{BlockGraphExport, BootstrapableGraph, ExportBlockStatus, Status}; use massa_models::{ address::AddressState, api::EndorsementInfo, Endorsement, EndorsementId, OperationId, @@ -483,8 +481,7 @@ impl ConsensusEventReceiver { } pub struct ConsensusManager { - pub join_handle: - JoinHandle>, + pub join_handle: JoinHandle>, pub manager_tx: mpsc::Sender, } @@ -493,12 +490,12 @@ impl ConsensusManager { pub async fn stop( self, consensus_event_receiver: ConsensusEventReceiver, - ) -> Result<(ProtocolEventReceiver, ExecutionEventReceiver), ConsensusError> { + ) -> Result { massa_trace!("consensus.consensus_controller.stop", {}); drop(self.manager_tx); let _remaining_events = consensus_event_receiver.drain().await; - let (protocol_event_receiver, execution_event_receiver) = self.join_handle.await??; + let protocol_event_receiver = self.join_handle.await??; - Ok((protocol_event_receiver, execution_event_receiver)) + Ok(protocol_event_receiver) } } diff --git a/massa-consensus-exports/src/error.rs b/massa-consensus-exports/src/error.rs index 9fc31aec631..8fce805721e 100644 --- a/massa-consensus-exports/src/error.rs +++ b/massa-consensus-exports/src/error.rs @@ -1,6 +1,6 @@ // Copyright (c) 2022 MASSA LABS use displaydoc::Display; -use massa_execution::ExecutionError; +use massa_execution_exports::ExecutionError; use massa_graph::error::GraphError; use massa_models::ModelsError; use massa_proof_of_stake_exports::error::ProofOfStakeError; diff --git a/massa-consensus-exports/src/settings.rs b/massa-consensus-exports/src/settings.rs index 7d2c30d903e..a8c24266ed1 100644 --- a/massa-consensus-exports/src/settings.rs +++ b/massa-consensus-exports/src/settings.rs @@ -44,7 +44,7 @@ //! In unit test your allowed to use the `testing` feature flag that will //! use the default values from `/node_configuration/default_testing.rs` in the //! `massa-models` crate sources. -use massa_execution::{ExecutionCommandSender, ExecutionEventReceiver}; +use massa_execution_exports::ExecutionController; use massa_graph::{settings::GraphConfig, LedgerConfig}; use massa_models::Amount; use massa_pool::PoolCommandSender; @@ -303,10 +303,8 @@ pub struct ConsensusWorkerChannels { pub protocol_command_sender: ProtocolCommandSender, /// Associated protocol event listener. pub protocol_event_receiver: ProtocolEventReceiver, - /// Associated execution event listener. - pub execution_event_receiver: ExecutionEventReceiver, /// Execution command sender. - pub execution_command_sender: ExecutionCommandSender, + pub execution_controller: Box, /// Associated Pool command sender. pub pool_command_sender: PoolCommandSender, /// Channel receiving consensus commands. @@ -320,8 +318,7 @@ pub struct ConsensusWorkerChannels { /// Public channels associated to the consensus module. /// Execution & Protocol Sender/Receiver pub struct ConsensusChannels { - pub execution_command_sender: ExecutionCommandSender, - pub execution_event_receiver: ExecutionEventReceiver, + pub execution_controller: Box, pub protocol_command_sender: ProtocolCommandSender, pub protocol_event_receiver: ProtocolEventReceiver, pub pool_command_sender: PoolCommandSender, diff --git a/massa-consensus-worker/Cargo.toml b/massa-consensus-worker/Cargo.toml index 9d1556d55b3..dcf01a0b303 100644 --- a/massa-consensus-worker/Cargo.toml +++ b/massa-consensus-worker/Cargo.toml @@ -21,8 +21,8 @@ thiserror = "1.0" tokio = { version = "1.15", features = ["full"] } tracing = "0.1" # custom modules +massa_execution_exports = { path = "../massa-execution-exports" } massa_consensus_exports = { path = "../massa-consensus-exports" } -massa_execution = { path = "../massa-execution" } massa_graph = { path = "../massa-graph" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } @@ -38,8 +38,9 @@ pretty_assertions = "1.0" serial_test = "0.5" tempfile = "3.2" massa_models = { path = "../massa-models", features = ["testing"] } +massa_execution_exports = { path = "../massa-execution-exports", features = ["testing"] } massa_consensus_exports = { path = "../massa-consensus-exports", features = ["testing"] } [features] -instrument = ["tokio/tracing", "massa_consensus_exports/instrument", "massa_execution/instrument", "massa_graph/instrument", "massa_models/instrument", "massa_pool/instrument", "massa_proof_of_stake_exports/instrument", "massa_protocol_exports/instrument", "massa_time/instrument"] +instrument = ["tokio/tracing", "massa_consensus_exports/instrument", "massa_graph/instrument", "massa_models/instrument", "massa_pool/instrument", "massa_proof_of_stake_exports/instrument", "massa_protocol_exports/instrument", "massa_time/instrument"] sandbox = ["massa_consensus_exports/sandbox"] diff --git a/massa-consensus-worker/src/consensus_worker.rs b/massa-consensus-worker/src/consensus_worker.rs index 3740544b9e9..1ecbf75ee46 100644 --- a/massa-consensus-worker/src/consensus_worker.rs +++ b/massa-consensus-worker/src/consensus_worker.rs @@ -6,7 +6,6 @@ use massa_consensus_exports::{ settings::ConsensusWorkerChannels, ConsensusConfig, }; -use massa_execution::ExecutionEventReceiver; use massa_graph::{BlockGraph, BlockGraphExport}; use massa_hash::hash::Hash; use massa_models::address::AddressState; @@ -145,24 +144,18 @@ impl ConsensusWorker { // notify execution module of current blockclique and final blocks // we need to do this because the bootstrap snapshots of the executor vs the consensus may not have been taken in sync // because the two modules run concurrently and out of sync - channels - .execution_command_sender - .update_blockclique( - block_db.clone_all_final_blocks(), - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - block_db - .get_blockclique() - .into_iter() - .filter_map(|block_id| { - block_db - .get_active_block(&block_id) - .map(|a_block| (block_id, a_block.block.clone())) - }) - .collect(), - */ - Map::default(), - ) - .await?; + channels.execution_controller.update_blockclique_status( + block_db.clone_all_final_blocks(), + block_db + .get_blockclique() + .into_iter() + .filter_map(|block_id| { + block_db + .get_active_block(&block_id) + .map(|a_block| (block_id, a_block.block.clone())) + }) + .collect(), + ); Ok(ConsensusWorker { genesis_public_key, @@ -187,7 +180,7 @@ impl ConsensusWorker { /// Consensus work is managed here. /// It's mostly a tokio::select within a loop. - pub async fn run_loop(mut self) -> Result<(ProtocolEventReceiver, ExecutionEventReceiver)> { + pub async fn run_loop(mut self) -> Result { // signal initial state to pool if let Some(previous_slot) = self.previous_slot { self.channels @@ -277,10 +270,7 @@ impl ConsensusWorker { } } // end loop - Ok(( - self.channels.protocol_event_receiver, - self.channels.execution_event_receiver, - )) + Ok(self.channels.protocol_event_receiver) } async fn slot_tick(&mut self, next_slot_timer: &mut std::pin::Pin<&mut Sleep>) -> Result<()> { @@ -1218,9 +1208,8 @@ impl ConsensusWorker { }) .collect(); self.channels - .execution_command_sender - .update_blockclique(finalized_blocks, blockclique) - .await?; + .execution_controller + .update_blockclique_status(finalized_blocks, blockclique); } // Process new final blocks diff --git a/massa-consensus-worker/src/tests/mock_execution_controller.rs b/massa-consensus-worker/src/tests/mock_execution_controller.rs deleted file mode 100644 index 5ed7c782966..00000000000 --- a/massa-consensus-worker/src/tests/mock_execution_controller.rs +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright (c) 2022 MASSA LABS - -use massa_execution::{ - ExecutionCommand, ExecutionCommandSender, ExecutionEvent, ExecutionEventReceiver, -}; -use massa_models::{constants::CHANNEL_SIZE, prehash::Map, Block, BlockId}; -use massa_time::MassaTime; -use tokio::{ - sync::mpsc::{channel, unbounded_channel, Receiver, Sender, UnboundedSender}, - time::sleep, -}; - -#[allow(dead_code)] -pub struct MockExecutionController { - execution_command_sender: Sender, - execution_command_receiver: Receiver, - event_sender: UnboundedSender, -} - -impl MockExecutionController { - pub fn new() -> (Self, ExecutionCommandSender, ExecutionEventReceiver) { - let (event_sender, event_rx) = unbounded_channel::(); - let (execution_command_sender, execution_command_receiver) = - channel::(CHANNEL_SIZE); - ( - MockExecutionController { - execution_command_sender: execution_command_sender.clone(), - execution_command_receiver, - event_sender, - }, - ExecutionCommandSender(execution_command_sender), - ExecutionEventReceiver(event_rx), - ) - } - - #[allow(dead_code)] - pub async fn wait_command(&mut self, timeout: MassaTime, filter_map: F) -> Option - where - F: Fn(ExecutionCommand) -> Option, - { - let timer = sleep(timeout.into()); - tokio::pin!(timer); - loop { - tokio::select! { - cmd_opt = self.execution_command_receiver.recv() => match cmd_opt { - Some(orig_cmd) => if let Some(res_cmd) = filter_map(orig_cmd) { return Some(res_cmd); }, - None => panic!("Unexpected closure of execution command command channel."), - }, - _ = &mut timer => return None - } - } - } - - #[allow(dead_code)] - pub async fn blockclique_changed( - &mut self, - blockclique: Map, - finalized_blocks: Map, - ) { - self.execution_command_sender - .send(ExecutionCommand::BlockCliqueChanged { - blockclique, - finalized_blocks, - }) - .await - .expect("could not send execution event"); - } - - #[allow(dead_code)] - pub async fn ignore_commands_while( - &mut self, - mut future: FutureT, - ) -> FutureT::Output { - loop { - tokio::select!( - res = &mut future => return res, - cmd = self.execution_command_receiver.recv() => match cmd { - Some(_) => {}, - None => return future.await - } - ); - } - } -} diff --git a/massa-consensus-worker/src/tests/mod.rs b/massa-consensus-worker/src/tests/mod.rs index 7bc39d87030..ad016f8f2ef 100644 --- a/massa-consensus-worker/src/tests/mod.rs +++ b/massa-consensus-worker/src/tests/mod.rs @@ -1,7 +1,6 @@ // Copyright (c) 2022 MASSA LABS mod block_factory; -mod mock_execution_controller; mod mock_pool_controller; mod mock_protocol_controller; mod scenario_block_creation; diff --git a/massa-consensus-worker/src/tests/scenario_roll.rs b/massa-consensus-worker/src/tests/scenario_roll.rs index 4300230ae45..1e83721448f 100644 --- a/massa-consensus-worker/src/tests/scenario_roll.rs +++ b/massa-consensus-worker/src/tests/scenario_roll.rs @@ -2,6 +2,7 @@ use massa_consensus_exports::tools; use massa_consensus_exports::{settings::ConsensusChannels, ConsensusConfig}; +use massa_execution_exports::test_exports::MockExecutionController; use massa_models::{Address, Amount, BlockId, Slot}; use massa_pool::PoolCommand; use massa_protocol_exports::ProtocolCommand; @@ -27,8 +28,6 @@ use crate::{ use massa_models::ledger_models::LedgerData; use massa_models::prehash::Set; -use super::mock_execution_controller::MockExecutionController; - #[tokio::test] #[serial] async fn test_roll() { @@ -503,8 +502,7 @@ async fn test_roll_block_creation() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (mut pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); let init_time: MassaTime = 1000.into(); cfg.genesis_timestamp = MassaTime::now().unwrap().saturating_add(init_time); @@ -514,8 +512,7 @@ async fn test_roll_block_creation() { start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, @@ -786,16 +783,15 @@ async fn test_roll_deactivation() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (mut pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); + cfg.genesis_timestamp = MassaTime::now().unwrap().saturating_add(300.into()); // launch consensus controller let (consensus_command_sender, _consensus_event_receiver, _consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, diff --git a/massa-consensus-worker/src/tests/scenarios_ledger.rs b/massa-consensus-worker/src/tests/scenarios_ledger.rs index 0559c044278..5ffab528aa4 100644 --- a/massa-consensus-worker/src/tests/scenarios_ledger.rs +++ b/massa-consensus-worker/src/tests/scenarios_ledger.rs @@ -1,11 +1,11 @@ // Copyright (c) 2022 MASSA LABS use super::{ - mock_execution_controller::MockExecutionController, mock_pool_controller::{MockPoolController, PoolCommandSink}, mock_protocol_controller::MockProtocolController, }; use crate::start_consensus_controller; +use massa_execution_exports::test_exports::MockExecutionController; use super::tools::*; use massa_consensus_exports::ConsensusConfig; @@ -498,16 +498,14 @@ async fn test_ledger_update_when_a_batch_of_blocks_becomes_final() { MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, diff --git a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs index 681546d5690..78789c917cb 100644 --- a/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs +++ b/massa-consensus-worker/src/tests/scenarios_note_attack_attempt.rs @@ -2,12 +2,12 @@ use super::tools::*; use super::{ - mock_execution_controller::MockExecutionController, mock_pool_controller::{MockPoolController, PoolCommandSink}, mock_protocol_controller::MockProtocolController, }; use crate::start_consensus_controller; use massa_consensus_exports::ConsensusConfig; +use massa_execution_exports::test_exports::MockExecutionController; use massa_consensus_exports::settings::ConsensusChannels; use massa_hash::hash::Hash; @@ -30,16 +30,14 @@ async fn test_invalid_block_notified_as_attack_attempt() { MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); let pool_sink = PoolCommandSink::new(pool_controller).await; - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, @@ -96,8 +94,7 @@ async fn test_invalid_header_notified_as_attack_attempt() { let (mut protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + let (execution_controller, _execution_rx) = MockExecutionController::new_with_receiver(); let pool_sink = PoolCommandSink::new(pool_controller).await; // launch consensus controller @@ -105,8 +102,7 @@ async fn test_invalid_header_notified_as_attack_attempt() { start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, diff --git a/massa-consensus-worker/src/tests/tools.rs b/massa-consensus-worker/src/tests/tools.rs index bb70ad191f4..f64d41f9431 100644 --- a/massa-consensus-worker/src/tests/tools.rs +++ b/massa-consensus-worker/src/tests/tools.rs @@ -2,7 +2,6 @@ #![allow(clippy::ptr_arg)] // this allow &Vec<..> as function argument type use super::{ - mock_execution_controller::MockExecutionController, mock_pool_controller::{MockPoolController, PoolCommandSink}, mock_protocol_controller::MockProtocolController, }; @@ -10,6 +9,7 @@ use crate::start_consensus_controller; use massa_consensus_exports::{ settings::ConsensusChannels, ConsensusCommandSender, ConsensusConfig, ConsensusEventReceiver, }; +use massa_execution_exports::test_exports::MockExecutionController; use massa_graph::{export_active_block::ExportActiveBlock, BlockGraphExport, BootstrapableGraph}; use massa_hash::hash::Hash; use massa_models::{ @@ -23,8 +23,12 @@ use massa_signature::{ derive_public_key, generate_random_private_key, sign, PrivateKey, PublicKey, Signature, }; use massa_time::MassaTime; -use std::str::FromStr; use std::{collections::HashSet, future::Future}; +use std::{ + str::FromStr, + sync::{Arc, Mutex}, + time::Duration, +}; use tracing::info; @@ -638,16 +642,22 @@ pub async fn consensus_pool_test( let (protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded + let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); + let stop_sinks = Arc::new(Mutex::new(false)); + let stop_sinks_clone = stop_sinks.clone(); + let execution_sink = std::thread::spawn(move || { + while !*stop_sinks_clone.lock().unwrap() { + let _ = execution_rx.recv_timeout(Duration::from_millis(500)); + } + }); // launch consensus controller let (consensus_command_sender, consensus_event_receiver, consensus_manager) = start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, @@ -682,6 +692,10 @@ pub async fn consensus_pool_test( .await .unwrap(); pool_sink.stop().await; + + // stop sinks + *stop_sinks.lock().unwrap() = true; + execution_sink.join().unwrap(); } /// Runs a consensus test, without passing a mock pool controller to it. @@ -700,8 +714,15 @@ where let (protocol_controller, protocol_command_sender, protocol_event_receiver) = MockProtocolController::new(); let (pool_controller, pool_command_sender) = MockPoolController::new(); - let (mut _execution_controller, execution_command_sender, execution_event_receiver) = - MockExecutionController::new(); + // for now, execution_rx is ignored: cique updates to Execution pile up and are discarded + let (execution_controller, execution_rx) = MockExecutionController::new_with_receiver(); + let stop_sinks = Arc::new(Mutex::new(false)); + let stop_sinks_clone = stop_sinks.clone(); + let execution_sink = std::thread::spawn(move || { + while !*stop_sinks_clone.lock().unwrap() { + let _ = execution_rx.recv_timeout(Duration::from_millis(500)); + } + }); let pool_sink = PoolCommandSink::new(pool_controller).await; // launch consensus controller @@ -709,8 +730,7 @@ where start_consensus_controller( cfg.clone(), ConsensusChannels { - execution_command_sender, - execution_event_receiver, + execution_controller, protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender, @@ -738,6 +758,10 @@ where .await .unwrap(); pool_sink.stop().await; + + // stop sinks + *stop_sinks.lock().unwrap() = true; + execution_sink.join().unwrap(); } pub fn get_cliques(graph: &BlockGraphExport, hash: BlockId) -> HashSet { diff --git a/massa-consensus-worker/src/tools.rs b/massa-consensus-worker/src/tools.rs index 86e32e8296e..4b8f6952e00 100644 --- a/massa-consensus-worker/src/tools.rs +++ b/massa-consensus-worker/src/tools.rs @@ -93,8 +93,7 @@ pub async fn start_consensus_controller( ConsensusWorkerChannels { protocol_command_sender: channels.protocol_command_sender, protocol_event_receiver: channels.protocol_event_receiver, - execution_event_receiver: channels.execution_event_receiver, - execution_command_sender: channels.execution_command_sender, + execution_controller: channels.execution_controller, pool_command_sender: channels.pool_command_sender, controller_command_rx: command_rx, controller_event_tx: event_tx, diff --git a/massa-execution-exports/Cargo.toml b/massa-execution-exports/Cargo.toml new file mode 100644 index 00000000000..2795458716b --- /dev/null +++ b/massa-execution-exports/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "massa_execution_exports" +version = "0.1.0" +authors = ["Massa Labs "] +edition = "2021" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +anyhow = "1" +displaydoc = "0.2" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +thiserror = "1.0" +tracing = { version = "0.1", features = [ + "max_level_debug", + "release_max_level_debug", +] } +# custom modules +massa_models = { path = "../massa-models" } +massa_time = { path = "../massa-time" } +massa_ledger = { path = "../massa-ledger" } + +[dev-dependencies] +pretty_assertions = "1.0" +serial_test = "0.5" + +[features] +testing = [] diff --git a/massa-execution-exports/src/config.rs b/massa-execution-exports/src/config.rs new file mode 100644 index 00000000000..0822734a8c9 --- /dev/null +++ b/massa-execution-exports/src/config.rs @@ -0,0 +1,24 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module provides the structures used to provide configuration parameters to the Execution system + +use massa_time::MassaTime; + +/// Executio module configuration +#[derive(Debug, Clone)] +pub struct ExecutionConfig { + /// read-only execution request queue length + pub readonly_queue_length: usize, + /// maximum number of SC output events kept in cache + pub max_final_events: usize, + /// number of threads + pub thread_count: u8, + /// extra lag to add on the execution cursor to improve performance + pub cursor_delay: MassaTime, + /// time compensation in milliseconds + pub clock_compensation: i64, + /// genesis timestamp + pub genesis_timestamp: MassaTime, + /// period duration + pub t0: MassaTime, +} diff --git a/massa-execution-exports/src/controller_traits.rs b/massa-execution-exports/src/controller_traits.rs new file mode 100644 index 00000000000..a355fd2f3f2 --- /dev/null +++ b/massa-execution-exports/src/controller_traits.rs @@ -0,0 +1,87 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module exports generic traits representing interfaces for interacting with the Execution worker + +use crate::types::ExecutionOutput; +use crate::types::ReadOnlyExecutionRequest; +use crate::ExecutionError; +use massa_ledger::LedgerEntry; +use massa_models::output_event::SCOutputEvent; +use massa_models::prehash::Map; +use massa_models::Address; +use massa_models::Block; +use massa_models::BlockId; +use massa_models::OperationId; +use massa_models::Slot; + +/// interface that communicates with the execution worker thread +pub trait ExecutionController: Send + Sync { + /// Updates blockclique status by signalling newly finalized blocks and the latest blockclique. + /// + /// # arguments + /// * finalized_blocks: newly finalized blocks + /// * blockclique: new blockclique + fn update_blockclique_status( + &self, + finalized_blocks: Map, + blockclique: Map, + ); + + /// Get execution events optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec; + + /// Get a copy of a full ledger entry with its final and active values + /// + /// # return value + /// * (final_entry, active_entry) + fn get_final_and_active_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option); + + /// Execute read-only bytecode without causing modifications to the consensus state + /// + /// # arguments + /// * req: an instance of ReadOnlyExecutionRequest describing the parameters of the execution + /// + /// # returns + /// An instance of ExecutionOutput containing a summary of the effects of the execution, + /// or an error if the execution failed. + fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + ) -> Result; + + /// Returns a boxed clone of self. + /// Useful to alow cloning Box. + fn clone_box(&self) -> Box; +} + +/// Allow cloning Box +/// Uses ExecutionController::clone_box internally +impl Clone for Box { + fn clone(&self) -> Box { + self.clone_box() + } +} + +/// Execution manager used to stop the execution thread +pub trait ExecutionManager { + /// Stop the execution thread + /// Note that we do not take self by value to consume it + /// because it is not allowed to move out of Box + /// This will improve if the `unsized_fn_params` feature stabilizes enough to be safely usable. + fn stop(&mut self); +} diff --git a/massa-execution-exports/src/error.rs b/massa-execution-exports/src/error.rs new file mode 100644 index 00000000000..750b8b81109 --- /dev/null +++ b/massa-execution-exports/src/error.rs @@ -0,0 +1,17 @@ +// Copyright (c) 2022 MASSA LABS + +//! this file defines all possible execution error categories + +use displaydoc::Display; +use thiserror::Error; + +/// Errors of the execution component. +#[non_exhaustive] +#[derive(Clone, Display, Error, Debug)] +pub enum ExecutionError { + /// Channel error + ChannelError(String), + + /// Runtime error: {0} + RuntimeError(String), +} diff --git a/massa-execution/src/types.rs b/massa-execution-exports/src/event_store.rs similarity index 54% rename from massa-execution/src/types.rs rename to massa-execution-exports/src/event_store.rs index a8a2272ba27..bd48ecafa83 100644 --- a/massa-execution/src/types.rs +++ b/massa-execution-exports/src/event_store.rs @@ -1,44 +1,70 @@ -use crate::sce_ledger::{FinalLedger, SCELedger, SCELedgerChanges, SCELedgerStep}; -use crate::BootstrapExecutionState; -use massa_models::api::SCELedgerInfo; -use massa_models::execution::ExecuteReadOnlyResponse; +// Copyright (c) 2022 MASSA LABS + +//! This module represents an event store allowing to store, search and retrieve +//! a config-limited number of execution-generated events + use massa_models::output_event::{SCOutputEvent, SCOutputEventId}; use massa_models::prehash::{Map, PreHashed, Set}; /// Define types used while executing block bytecodes -use massa_models::{Address, Amount, Block, BlockId, OperationId, Slot}; -use rand::SeedableRng; -use rand_xoshiro::Xoshiro256PlusPlus; +use massa_models::{Address, OperationId, Slot}; use std::cmp; use std::collections::hash_map::Entry; use std::collections::HashMap; -use std::hash::Hash; -use std::sync::{Condvar, Mutex}; -use std::{collections::VecDeque, sync::Arc}; -use tokio::sync::oneshot; use tracing::warn; -/// history of active executed steps -pub(crate) type StepHistory = VecDeque; - -/// A StepHistory item representing the consequences of a given execution step -#[derive(Debug, Clone)] -pub(crate) struct StepHistoryItem { - // step slot - pub slot: Slot, - - // optional block ID (or miss if None) at that slot - pub opt_block_id: Option, - - // list of SCE ledger changes caused by this execution step - pub ledger_changes: SCELedgerChanges, +#[inline] +/// Remove a given event_id from a `Set` +/// The Set is stored into a map `ctnr` at a `key` address. If +/// the Set resulted from the operation is empty, remove the entry +/// from the `ctnr` +/// +/// Used in `prune()` +fn remove_from_map( + ctnr: &mut Map>, + key: &T, + evt_id: &SCOutputEventId, +) { + match ctnr.get_mut(key) { + Some(ele) => { + ele.remove(evt_id); + if ele.is_empty() { + ctnr.remove(key); + } + } + _ => { + ctnr.remove(key); + } + } +} - /// events produced during this step - pub events: EventStore, +#[inline] +/// Remove a given event_id from a `Set` +/// The Set is stored into a Hashmap `ctnr` at a `key` address. If +/// the Set resulted from the operation is empty, remove the entry +/// from the `ctnr` +/// +/// Used in `prune()` +fn remove_from_hashmap( + ctnr: &mut HashMap>, + key: &T, + evt_id: &SCOutputEventId, +) { + match ctnr.get_mut(key) { + Some(ele) => { + ele.remove(evt_id); + if ele.is_empty() { + ctnr.remove(key); + } + } + _ => { + ctnr.remove(key); + } + } } /// Keep all events you need with some useful indexes #[derive(Default, Debug, Clone)] -pub(crate) struct EventStore { +pub struct EventStore { /// maps ids to events id_to_event: Map, @@ -247,248 +273,3 @@ impl EventStore { .collect() } } - -#[derive(Clone)] -pub struct StackElement { - /// called address - pub address: Address, - /// coins transferred to the target address during a call, - pub coins: Amount, - /// list of addresses created so far during excution, - pub owned_addresses: Vec
, -} - -#[derive(Clone)] -/// Stateful context, providing a context during the execution of a module -pub(crate) struct ExecutionContext { - /// final and active ledger at the current step - pub ledger_step: SCELedgerStep, - - /// max gas for this execution - pub max_gas: u64, - - /// gas price of the execution - pub gas_price: Amount, - - /// slot at which the execution happens - pub slot: Slot, - - /// counter of newly created addresses so far during this execution - pub created_addr_index: u64, - - /// counter of newly created events so far during this execution - pub created_event_index: u64, - - /// block ID, if one is present at this slot - pub opt_block_id: Option, - - /// block creator addr, if there is a block at this slot - pub opt_block_creator_addr: Option
, - - /// address call stack, most recent is at the back - pub stack: Vec, - - /// True if it's a read-only context - pub read_only: bool, - - /// geerated events during this execution, with multiple indexes - pub events: EventStore, - - /// Unsafe RNG state - pub unsafe_rng: Xoshiro256PlusPlus, - - /// origin operation id - pub origin_operation_id: Option, -} - -/// an active execution step target slot and block -#[derive(Clone)] -pub(crate) struct ExecutionStep { - /// slot at which the execution step will happen - pub slot: Slot, - - /// Some(BlockID, block), if a block is present at this slot, otherwise None - pub block: Option<(BlockId, Block)>, -} - -impl ExecutionContext { - pub fn new(ledger: SCELedger, ledger_at_slot: Slot) -> Self { - let final_ledger_slot = FinalLedger { - ledger, - slot: ledger_at_slot, - }; - ExecutionContext { - ledger_step: SCELedgerStep { - final_ledger_slot, - cumulative_history_changes: Default::default(), - caused_changes: Default::default(), - }, - max_gas: Default::default(), - stack: Default::default(), - gas_price: Default::default(), - slot: Slot::new(0, 0), - opt_block_id: Default::default(), - opt_block_creator_addr: Default::default(), - created_addr_index: Default::default(), - read_only: Default::default(), - created_event_index: Default::default(), - events: Default::default(), - unsafe_rng: Xoshiro256PlusPlus::from_seed([0u8; 32]), - origin_operation_id: Default::default(), - } - } -} - -impl From for SCELedgerChanges { - fn from(step: StepHistory) -> Self { - let mut ret = SCELedgerChanges::default(); - step.iter() - .for_each(|StepHistoryItem { ledger_changes, .. }| { - ret.apply_changes(ledger_changes); - }); - ret - } -} - -// Thread vm types: - -/// execution request -pub(crate) enum ExecutionRequest { - /// Runs a final step - RunFinalStep(ExecutionStep), - /// Runs an active step - #[allow(dead_code)] // TODO DISABLED TEMPORARILY #2101 - RunActiveStep(ExecutionStep), - /// Resets the VM to its final state - /// Run code in read-only mode - RunReadOnly { - /// The slot at which the execution will occur. - slot: Slot, - /// Maximum gas spend in execution. - max_gas: u64, - /// The simulated price of gas for the read-only execution. - simulated_gas_price: Amount, - /// The code to execute. - bytecode: Vec, - /// The channel used to send the result of execution. - result_sender: oneshot::Sender, - /// The address, or a default random one if none is provided, - /// which will simulate the sender of the operation. - address: Option
, - }, - /// Reset to latest final state - ResetToFinalState, - /// Get bootstrap state - GetBootstrapState { - response_tx: oneshot::Sender, - }, - /// Shutdown state, set by the worker to signal shutdown to the VM thread. - Shutdown, - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - GetSCOutputEvents { - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - response_tx: oneshot::Sender>, - }, - /// Get ledger entry for address - GetSCELedgerForAddresses { - response_tx: oneshot::Sender>, - addresses: Vec
, - }, -} - -pub(crate) type ExecutionQueue = Arc<(Mutex>, Condvar)>; - -/// Wrapping structure for an ExecutionSC and a sender -pub struct ExecutionData { - /// Sender address - pub sender_address: Address, - /// Smart contract bytecode. - pub bytecode: Vec, - /// The maximum amount of gas that the execution of the contract is allowed to cost. - pub max_gas: u64, - /// Extra coins that are spent by consensus and are available in the execution context of the contract. - pub coins: Amount, - /// The price per unit of gas that the caller is willing to pay for the execution. - pub gas_price: Amount, -} - -impl TryFrom<&massa_models::Operation> for ExecutionData { - type Error = anyhow::Error; - - fn try_from(operation: &massa_models::Operation) -> anyhow::Result { - match &operation.content.op { - massa_models::OperationType::ExecuteSC { - data, - max_gas, - gas_price, - coins, - } => Ok(ExecutionData { - bytecode: data.to_owned(), - sender_address: Address::from_public_key(&operation.content.sender_public_key), - max_gas: *max_gas, - gas_price: *gas_price, - coins: *coins, - }), - _ => anyhow::bail!("Conversion require an `OperationType::ExecuteSC`"), - } - } -} - -#[inline] -/// Remove a given event_id from a `Set` -/// The Set is stored into a map `ctnr` at a `key` address. If -/// the Set resulted from the operation is empty, remove the entry -/// from the `ctnr` -/// -/// Used in `prune()` -fn remove_from_map( - ctnr: &mut Map>, - key: &T, - evt_id: &SCOutputEventId, -) { - match ctnr.get_mut(key) { - Some(ele) => { - ele.remove(evt_id); - if ele.is_empty() { - ctnr.remove(key); - } - } - _ => { - ctnr.remove(key); - } - } -} - -#[inline] -/// Remove a given event_id from a `Set` -/// The Set is stored into a Hashmap `ctnr` at a `key` address. If -/// the Set resulted from the operation is empty, remove the entry -/// from the `ctnr` -/// -/// Used in `prune()` -fn remove_from_hashmap( - ctnr: &mut HashMap>, - key: &T, - evt_id: &SCOutputEventId, -) { - match ctnr.get_mut(key) { - Some(ele) => { - ele.remove(evt_id); - if ele.is_empty() { - ctnr.remove(key); - } - } - _ => { - ctnr.remove(key); - } - } -} diff --git a/massa-execution-exports/src/lib.rs b/massa-execution-exports/src/lib.rs new file mode 100644 index 00000000000..4906d59835d --- /dev/null +++ b/massa-execution-exports/src/lib.rs @@ -0,0 +1,58 @@ +// Copyright (c) 2022 MASSA LABS + +//! # Overview +//! +//! This crate provides all the facilities to interact with a running execution worker (massa-execution-worker crate) +//! that is in charge of executing operations in a virtual machine, +//! and applying the effects of the execution to a ledger. +//! +//! # Usage +//! +//! When an execution worker is launched to run in a separate thread for the whole duration of the process, +//! an instance of ExecutionManager is returned (see the documentation of start_execution_worker in massa-execution-worker), +//! as well as an instance of ExecutionController. +//! +//! The non-clonable ExecutionManager allows stopping the execution worker thread. +//! +//! The clonable ExecutionController allows sending updates on the latest blockclique changes to the execution worker +//! for it to keep track of them and execute the operations present in blocks. +//! It also allows various read-only queries such as executing bytecode +//! while ignoring all the changes it would cause to the consensus state (read-only execution), +//! or reading the state at the output of the executed blockclique blocks. +//! +//! # Architecture +//! +//! ## config.rs +//! Contains configuration parameters for the execution system. +//! +//! ## controller_traits.rs +//! Defines the ExecutionManager and ExecutionController traits for interacting with the execution worker. +//! +//! ## erorrs.rs +//! Defines error types for the crate. +//! +//! ## event_store.rs +//! Defines an indexed, finite-size storage system for execution events. +//! +//! ## types.rs +//! Defines useful shared structures. +//! +//! ## Test exports +//! +//! When the crate feature `testing` is enabled, tooling useful for testing purposes is exported. +//! See test_exports/mod.rs for details. + +mod config; +mod controller_traits; +mod error; +mod event_store; +mod types; + +pub use config::ExecutionConfig; +pub use controller_traits::{ExecutionController, ExecutionManager}; +pub use error::ExecutionError; +pub use event_store::EventStore; +pub use types::{ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest}; + +#[cfg(feature = "testing")] +pub mod test_exports; diff --git a/massa-execution-exports/src/test_exports/config.rs b/massa-execution-exports/src/test_exports/config.rs new file mode 100644 index 00000000000..589ad497aee --- /dev/null +++ b/massa-execution-exports/src/test_exports/config.rs @@ -0,0 +1,22 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines testing tools related to the config + +use massa_time::MassaTime; + +use crate::ExecutionConfig; + +/// Default value of ExecutionConfig used for tests +impl Default for ExecutionConfig { + fn default() -> Self { + ExecutionConfig { + readonly_queue_length: 10, + max_final_events: 10, + thread_count: 2, + cursor_delay: 0.into(), + clock_compensation: 0, + genesis_timestamp: MassaTime::now().unwrap(), + t0: 1000.into(), + } + } +} diff --git a/massa-execution-exports/src/test_exports/mock.rs b/massa-execution-exports/src/test_exports/mock.rs new file mode 100644 index 00000000000..d96fbd249d7 --- /dev/null +++ b/massa-execution-exports/src/test_exports/mock.rs @@ -0,0 +1,143 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines utilities to mock the crate for testing purposes + +use crate::{ExecutionController, ExecutionError, ExecutionOutput, ReadOnlyExecutionRequest}; +use massa_ledger::LedgerEntry; +use massa_models::{ + output_event::SCOutputEvent, prehash::Map, Address, Block, BlockId, OperationId, Slot, +}; +use std::sync::{ + mpsc::{self, Receiver}, + Arc, Mutex, +}; + +/// List of possible messages coming from the mock. +/// Each variant corresponds to a unique method in ExecutionController, +/// and is emitted in a thread-safe way by the mock whenever that method is called. +/// Some variants wait for a response on their response_tx field, if present. +/// See the documentation of ExecutionController for details on parameters and return values. +#[derive(Clone)] +pub enum MockExecutionControllerMessage { + UpdateBlockcliqueStatus { + finalized_blocks: Map, + blockclique: Map, + }, + GetFilteredScOutputEvent { + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + response_tx: mpsc::Sender>, + }, + GetFullLedgerEntry { + addr: Address, + response_tx: mpsc::Sender<(Option, Option)>, + }, + ExecuteReadonlyRequest { + req: ReadOnlyExecutionRequest, + response_tx: mpsc::Sender>, + }, +} + +/// A mocked execution controller that will intercept calls on its methods +/// and emit corresponding MockExecutionControllerMessage messages through a MPSC in a thread-safe way. +/// For messages with a response_tx field, the mock will await a response through their response_tx channel +/// in order to simulate returning this value at the end of the call. +#[derive(Clone)] +pub struct MockExecutionController(Arc>>); + +impl MockExecutionController { + /// Create a new pair (mock execution controller, mpsc receiver for emitted messages) + /// Note that unbounded mpsc channels are used + pub fn new_with_receiver() -> ( + Box, + Receiver, + ) { + let (tx, rx) = mpsc::channel(); + ( + Box::new(MockExecutionController(Arc::new(Mutex::new(tx)))), + rx, + ) + } +} + +/// Implements all the methods of the ExecutionController trait, +/// but simply make them emit a MockExecutionControllerMessage. +/// If the message contains a response_tx, +/// a response from that channel is read and returned as return value. +/// See the documentation of ExecutionController for details on each function. +impl ExecutionController for MockExecutionController { + fn update_blockclique_status( + &self, + finalized_blocks: Map, + blockclique: Map, + ) { + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::UpdateBlockcliqueStatus { + finalized_blocks, + blockclique, + }) + .unwrap(); + } + + fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::GetFilteredScOutputEvent { + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn get_final_and_active_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option) { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::GetFullLedgerEntry { + addr: *addr, + response_tx, + }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + ) -> Result { + let (response_tx, response_rx) = mpsc::channel(); + self.0 + .lock() + .unwrap() + .send(MockExecutionControllerMessage::ExecuteReadonlyRequest { req, response_tx }) + .unwrap(); + response_rx.recv().unwrap() + } + + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} diff --git a/massa-execution-exports/src/test_exports/mod.rs b/massa-execution-exports/src/test_exports/mod.rs new file mode 100644 index 00000000000..1dd974d43cb --- /dev/null +++ b/massa-execution-exports/src/test_exports/mod.rs @@ -0,0 +1,20 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module exposes useful tooling for testing. +//! It is only compiled and exported by the crate if the "testing" feature is enabled. +//! +//! +//! # Architecture +//! +//! ## config.rs +//! Provides a default execution configuration for testing. +//! +//! ## mock.rs +//! Provides a mock of ExecutionController to simulate interactions +//! with an execution worker within tests. + +mod config; +mod mock; + +pub use config::*; +pub use mock::*; diff --git a/massa-execution-exports/src/types.rs b/massa-execution-exports/src/types.rs new file mode 100644 index 00000000000..6220bb0d398 --- /dev/null +++ b/massa-execution-exports/src/types.rs @@ -0,0 +1,56 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file exports useful types used to interact with the execution worker + +use crate::event_store::EventStore; +use massa_ledger::LedgerChanges; +use massa_models::{Address, Amount, BlockId, Slot}; + +/// structure describing the output of a single execution +#[derive(Debug, Clone)] +pub struct ExecutionOutput { + // slot + pub slot: Slot, + // optional block ID at that slot (None if miss) + pub block_id: Option, + // ledger_changes caused by the execution step + pub ledger_changes: LedgerChanges, + // events emitted by the execution step + pub events: EventStore, +} + +/// structure describing a read-only execution request +#[derive(Debug, Clone)] +pub struct ReadOnlyExecutionRequest { + /// Maximum gas to spend in the execution. + pub max_gas: u64, + /// The simulated price of gas for the read-only execution. + pub simulated_gas_price: Amount, + /// The code to execute. + pub bytecode: Vec, + /// Call stack to simulate, older caller first + pub call_stack: Vec, +} + +/// Structure describing an element of the execution stack. +/// Every time a function is called from bytecode, +/// a new ExecutionStackElement is pushed at the top of the execution stack +/// to represent the local execution context of the called function, +/// instead of the caller's which should lie just below in the stack. +#[derive(Debug, Clone)] +pub struct ExecutionStackElement { + /// Called address + pub address: Address, + /// Coins transferred to the target address during the call + pub coins: Amount, + /// List of addresses owned by the current call, and on which the current call has write access. + /// This list should contain ExecutionStackElement::address in the sense that an address should have write access to itself. + /// This list should also contain all addresses created previously during the call + /// to allow write access on newly created addresses in order to set them up, + /// but only within the scope of the current stack element. + /// That way, only the current scope and neither its caller not the functions it calls gain this write access, + /// which is important for security. + /// Note that we use a Vec instead of a prehashed set to ensure order determinism, + /// the performance hit of linear search remains minimal because owned_addreses will always contain very few elements. + pub owned_addresses: Vec
, +} diff --git a/massa-execution/Cargo.toml b/massa-execution-worker/Cargo.toml similarity index 73% rename from massa-execution/Cargo.toml rename to massa-execution-worker/Cargo.toml index 3b466a3803a..482f7833127 100644 --- a/massa-execution/Cargo.toml +++ b/massa-execution-worker/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "massa_execution" +name = "massa_execution_worker" version = "0.1.0" authors = ["Massa Labs "] edition = "2021" @@ -10,25 +10,27 @@ edition = "2021" anyhow = "1.0" displaydoc = "0.2" lazy_static = "1.4" -parking_lot = { version = "0.11" } rand = "0.8" rand_xoshiro = "0.6" +parking_lot = "0.12" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" tokio = { version = "1.15", features = ["full"] } tracing = "0.1" # custom modules -massa_hash = { path = "../massa-hash" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_models = { path = "../massa-models" } -massa_time = { path = "../massa-time" } +massa_hash = { path = "../massa-hash" } massa-sc-runtime = { git = "https://github.com/massalabs/massa-sc-runtime", tag = "v0.4.4" } massa_signature = { path = "../massa-signature" } +massa_time = { path = "../massa-time" } +massa_ledger = { path = "../massa-ledger" } [dev-dependencies] pretty_assertions = "1.0" serial_test = "0.5" tempfile = "3.2" -[features] -instrument = ["tokio/tracing", "massa_models/instrument", "massa_time/instrument"] +# custom modules with testing enabled +massa_execution_exports = { path = "../massa-execution-exports", features = ["testing"] } diff --git a/massa-execution-worker/src/context.rs b/massa-execution-worker/src/context.rs new file mode 100644 index 00000000000..7411352f272 --- /dev/null +++ b/massa-execution-worker/src/context.rs @@ -0,0 +1,409 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module represents the context in which the VM executes bytecode. +//! It provides information such as the current call stack. +//! It also maintians a "speculative" ledger state which is a virtual ledger +//! as seen after applying everything that happened so far in the context. +//! More generally, the context acts only on its own state +//! and does not write anything persistent to the conensus state. + +use crate::speculative_ledger::SpeculativeLedger; +use massa_execution_exports::{ + EventStore, ExecutionError, ExecutionOutput, ExecutionStackElement, ReadOnlyExecutionRequest, +}; +use massa_hash::hash::Hash; +use massa_ledger::{FinalLedger, LedgerChanges}; +use massa_models::{Address, Amount, BlockId, OperationId, Slot}; +use parking_lot::RwLock; +use rand::SeedableRng; +use rand_xoshiro::Xoshiro256PlusPlus; +use std::sync::Arc; + +/// A snapshot taken from an ExecutionContext and that represents its current state. +/// The ExecutionContext state can then be restored later from this snapshot. +pub(crate) struct ExecutionContextSnapshot { + // speculative ledger changes caused so far in the context + pub ledger_changes: LedgerChanges, + + /// counter of newly created addresses so far at this slot during this execution + pub created_addr_index: u64, + + /// counter of newly created events so far during this execution + pub created_event_index: u64, + + /// address call stack, most recent is at the back + pub stack: Vec, + + /// generated events during this execution, with multiple indexes + pub events: EventStore, + + /// Unsafe RNG state + pub unsafe_rng: Xoshiro256PlusPlus, +} + +/// An execution context that needs to be initialized before executing bytecode, +/// passed to the VM to interact with during bytecode execution (through ABIs), +/// and read after execution to gather results. +pub(crate) struct ExecutionContext { + /// speculative ledger state, + /// as seen after everything that happened so far in the context + speculative_ledger: SpeculativeLedger, + + /// max gas for this execution + pub max_gas: u64, + + /// gas price of the execution + pub gas_price: Amount, + + /// slot at which the execution happens + pub slot: Slot, + + /// counter of newly created addresses so far during this execution + pub created_addr_index: u64, + + /// counter of newly created events so far during this execution + pub created_event_index: u64, + + /// block ID, if one is present at the execution slot + pub opt_block_id: Option, + + /// address call stack, most recent is at the back + pub stack: Vec, + + /// True if it's a read-only context + pub read_only: bool, + + /// generated events during this execution, with multiple indexes + pub events: EventStore, + + /// Unsafe RNG state (can be predicted and manipulated) + pub unsafe_rng: Xoshiro256PlusPlus, + + /// operation id that originally caused this execution (if any) + pub origin_operation_id: Option, +} + +impl ExecutionContext { + /// Create a new empty ExecutionContext + /// This should only be used as a placeholder. + /// Further initialization is required before running bytecode + /// (see readonly and active_slot methods). + /// + /// # arguments + /// * final_ledger: thread-safe access to the final ledger. Note that this will be used only for reading, never for writing + /// * previous_changes: list of ledger changes that happened since the final ledger state and before the current execution + /// + /// # returns + /// A new (empty) ExecutionContext instance + pub(crate) fn new( + final_ledger: Arc>, + previous_changes: LedgerChanges, + ) -> Self { + ExecutionContext { + speculative_ledger: SpeculativeLedger::new(final_ledger, previous_changes), + max_gas: Default::default(), + gas_price: Default::default(), + slot: Slot::new(0, 0), + created_addr_index: Default::default(), + created_event_index: Default::default(), + opt_block_id: Default::default(), + stack: Default::default(), + read_only: Default::default(), + events: Default::default(), + unsafe_rng: Xoshiro256PlusPlus::from_seed([0u8; 32]), + origin_operation_id: Default::default(), + } + } + + /// Returns a snapshot containing the clone of the current execution state. + /// Note that the snapshot does not include slot-level information such as the slot number or block ID. + pub(crate) fn get_snapshot(&self) -> ExecutionContextSnapshot { + ExecutionContextSnapshot { + ledger_changes: self.speculative_ledger.get_snapshot(), + created_addr_index: self.created_addr_index, + created_event_index: self.created_event_index, + stack: self.stack.clone(), + events: self.events.clone(), + unsafe_rng: self.unsafe_rng.clone(), + } + } + + /// Resets context to an existing snapshot + /// Note that the snapshot does not include slot-level information such as the slot number or block ID. + pub fn reset_to_snapshot(&mut self, snapshot: ExecutionContextSnapshot) { + self.speculative_ledger + .reset_to_snapshot(snapshot.ledger_changes); + self.created_addr_index = snapshot.created_addr_index; + self.created_event_index = snapshot.created_event_index; + self.stack = snapshot.stack; + self.events = snapshot.events; + self.unsafe_rng = snapshot.unsafe_rng; + } + + /// Create a new ExecutionContext for readonly execution + /// This should be used before performing a readonly execution. + /// + /// # arguments + /// * slot: slot at which the execution will happen + /// * req: parameters of the read only execution + /// * previous_changes: list of ledger changes that happened since the final ledger state and before this execution + /// * final_ledger: thread-safe access to the final ledger. Note that this will be used only for reading, never for writing + /// + /// # returns + /// A ExecutionContext instance ready for a read-only execution + pub(crate) fn readonly( + slot: Slot, + req: ReadOnlyExecutionRequest, + previous_changes: LedgerChanges, + final_ledger: Arc>, + ) -> Self { + // Deterministically seed the unsafe RNG to allow the bytecode to use it. + // Note that consecutive read-only calls for the same slot will get the same random seed. + + // Add the current slot to the seed to ensure different draws at every slot + let mut seed: Vec = slot.to_bytes_key().to_vec(); + // Add a marker to the seed indicating that we are in read-only mode + // to prevent random draw collisions with active executions + seed.push(0u8); // 0u8 = read-only + let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); + // We use Xoshiro256PlusPlus because it is very fast, + // has a period long enough to ensure no repetitions will ever happen, + // of decent quality (given the unsafe constraints) + // but not cryptographically secure (and that's ok because the internal state is exposed anyways) + let unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); + + // return readonly context + ExecutionContext { + max_gas: req.max_gas, + gas_price: req.simulated_gas_price, + slot, + stack: req.call_stack, + read_only: true, + unsafe_rng, + ..ExecutionContext::new(final_ledger, previous_changes) + } + } + + /// Create a new ExecutionContext for executing an active slot. + /// This should be used before performing any executions at that slot. + /// + /// # arguments + /// * slot: slot at which the execution will happen + /// * opt_block_id: optional ID of the block at that slot + /// * previous_changes: list of ledger changes that happened since the final ledger state and before this execution + /// * final_ledger: thread-safe access to the final ledger. Note that this will be used only for reading, never for writing + /// + /// # returns + /// A ExecutionContext instance ready for a read-only execution + pub(crate) fn active_slot( + slot: Slot, + opt_block_id: Option, + previous_changes: LedgerChanges, + final_ledger: Arc>, + ) -> Self { + // Deterministically seed the unsafe RNG to allow the bytecode to use it. + + // Add the current slot to the seed to ensure different draws at every slot + let mut seed: Vec = slot.to_bytes_key().to_vec(); + // Add a marker to the seed indicating that we are in active mode + // to prevent random draw collisions with read-only executions + seed.push(1u8); // 1u8 = active + + // For more deterministic entropy, seed with the block ID if any + if let Some(block_id) = &opt_block_id { + seed.extend(block_id.to_bytes()); // append block ID + } + let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); + let unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); + + // return active slot execution context + ExecutionContext { + slot, + opt_block_id, + unsafe_rng, + ..ExecutionContext::new(final_ledger, previous_changes) + } + } + + /// Moves the output of the execution out of the context, + /// resetting some context fields in the process. + /// + /// This is used to get the output of an execution before discarding the context. + /// Note that we are not taking self by value to consume it because the context is shared. + pub fn take_execution_output(&mut self) -> ExecutionOutput { + ExecutionOutput { + slot: self.slot, + block_id: std::mem::take(&mut self.opt_block_id), + ledger_changes: self.speculative_ledger.take(), + events: std::mem::take(&mut self.events), + } + } + + /// Gets the address at the top of the call stack, if any + pub fn get_current_address(&self) -> Result { + match self.stack.last() { + Some(addr) => Ok(addr.address), + _ => Err(ExecutionError::RuntimeError( + "failed to read current address: call stack empty".into(), + )), + } + } + + /// Gets the current list of owned addresses (top of the stack) + /// Ordering is conserved for determinism + pub fn get_current_owned_addresses(&self) -> Result, ExecutionError> { + match self.stack.last() { + Some(v) => Ok(v.owned_addresses.clone()), + None => Err(ExecutionError::RuntimeError( + "failed to read current owned addresses list: call stack empty".into(), + )), + } + } + + /// Gets the current call coins + pub fn get_current_call_coins(&self) -> Result { + match self.stack.last() { + Some(v) => Ok(v.coins), + None => Err(ExecutionError::RuntimeError( + "failed to read current call coins: call stack empty".into(), + )), + } + } + + /// Gets the addresses from the call stack (last = top of the stack) + pub fn get_call_stack(&self) -> Vec
{ + self.stack.iter().map(|v| v.address).collect() + } + + /// Checks whether the context currently grants write access to a given address + pub fn has_write_rights_on(&self, addr: &Address) -> bool { + self.stack + .last() + .map_or(false, |v| v.owned_addresses.contains(addr)) + } + + /// Creates a new smart contract address with initial bytecode, and returns this address + pub fn create_new_sc_address(&mut self, bytecode: Vec) -> Result { + // TODO: collision problem: + // prefix addresses to know if they are SCs or normal, + // otherwise people can already create new accounts by sending coins to the right hash + // they won't have ownership over it but this can still be unexpected + // to have initial extra coins when an address is created + // It may also induce that for read-only calls. + // https://github.com/massalabs/massa/issues/2331 + + // deterministically generate a new unique smart contract address + + // create a seed from the current slot + let mut data: Vec = self.slot.to_bytes_key().to_vec(); + // add the index of the created address within this context to the seed + data.append(&mut self.created_addr_index.to_be_bytes().to_vec()); + // add a flag on whether we are in read-only mode or not to the seed + // this prevents read-only contexts from shadowing existing addresses + if self.read_only { + data.push(0u8); + } else { + data.push(1u8); + } + // hash the seed to get a unique address + let address = Address(massa_hash::hash::Hash::compute_from(&data)); + + // add this address with its bytecode to the speculative ledger + self.speculative_ledger + .create_new_sc_address(address, bytecode)?; + + // add the address to owned addresses + // so that the current call has write access to it + // from now and for its whole duration, + // in order to allow initializing newly created ledger entries. + match self.stack.last_mut() { + Some(v) => { + v.owned_addresses.push(address); + } + None => { + return Err(ExecutionError::RuntimeError( + "owned addresses not found in context stack".into(), + )) + } + }; + + // increment the address creation counter at this slot + self.created_addr_index += 1; + + Ok(address) + } + + /// gets the bytecode of an address if it exists in the speculative ledger, or returns None + pub fn get_bytecode(&self, address: &Address) -> Option> { + self.speculative_ledger.get_bytecode(address) + } + + /// gets the data from a datastore entry of an address if it exists in the speculative ledger, or returns None + pub fn get_data_entry(&self, address: &Address, key: &Hash) -> Option> { + self.speculative_ledger.get_data_entry(address, key) + } + + /// checks if a datastore entry exists in the speculative ledger + pub fn has_data_entry(&self, address: &Address, key: &Hash) -> bool { + self.speculative_ledger.has_data_entry(address, key) + } + + /// gets the bytecode of an address if it exists in the speculative ledger, or returns None + pub fn get_parallel_balance(&self, address: &Address) -> Option { + self.speculative_ledger.get_parallel_balance(address) + } + + /// Sets a datastore entry for an address in the speculative ledger. + /// Fail if the address is absent from the ledger. + /// The datastore entry is created if it is absent for that address. + /// + /// # Arguments + /// * address: the address of the ledger entry + /// * key: the datastore key + /// * data: the data to insert + pub fn set_data_entry( + &mut self, + address: &Address, + key: Hash, + data: Vec, + ) -> Result<(), ExecutionError> { + // check access right + if !self.has_write_rights_on(address) { + return Err(ExecutionError::RuntimeError(format!( + "writing in the datastore of address {} is not allowed in this context", + address + ))); + } + + // set data entry + self.speculative_ledger.set_data_entry(address, key, data) + } + + /// Transfers parallel coins from one address to another. + /// No changes are retained in case of failure. + /// Spending is only allowed from existing addresses we have write acess on + /// + /// # parameters + /// * from_addr: optional spending address (use None for pure coin creation) + /// * to_addr: optional crediting address (use None for pure coin destruction) + /// * amount: amount of coins to transfer + pub fn transfer_parallel_coins( + &mut self, + from_addr: Option
, + to_addr: Option
, + amount: Amount, + ) -> Result<(), ExecutionError> { + // check access rights + if let Some(from_addr) = &from_addr { + if !self.has_write_rights_on(from_addr) { + return Err(ExecutionError::RuntimeError(format!( + "spending from address {} is not allowed in this context", + from_addr + ))); + } + } + // do the transfer + self.speculative_ledger + .transfer_parallel_coins(from_addr, to_addr, amount) + } +} diff --git a/massa-execution-worker/src/controller.rs b/massa-execution-worker/src/controller.rs new file mode 100644 index 00000000000..0cbf10d0946 --- /dev/null +++ b/massa-execution-worker/src/controller.rs @@ -0,0 +1,209 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module implements an execution controller. +//! See massa-execution-exports/controller_traits.rs for functional details. + +use crate::execution::ExecutionState; +use crate::request_queue::{RequestQueue, RequestWithResponseSender}; +use massa_execution_exports::{ + ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, + ReadOnlyExecutionRequest, +}; +use massa_ledger::LedgerEntry; +use massa_models::output_event::SCOutputEvent; +use massa_models::prehash::Map; +use massa_models::Address; +use massa_models::OperationId; +use massa_models::{Block, BlockId, Slot}; +use parking_lot::{Condvar, Mutex, RwLock}; +use std::collections::HashMap; +use std::sync::Arc; +use tracing::info; + +/// structure used to communicate with execution thread +pub(crate) struct ExecutionInputData { + /// set stop to true to stop the thread + pub stop: bool, + /// list of newly finalized blocks, indexed by slot + pub finalized_blocks: HashMap, + /// new blockclique (if there is a new one), blocks indexed by slot + pub new_blockclique: Option>, + /// queue for readonly execution requests and response mpscs to send back their outputs + pub readonly_requests: RequestQueue, +} + +impl ExecutionInputData { + /// Creates a new empty ExecutionInputData + pub fn new(config: ExecutionConfig) -> Self { + ExecutionInputData { + stop: Default::default(), + finalized_blocks: Default::default(), + new_blockclique: Default::default(), + readonly_requests: RequestQueue::new(config.max_final_events), + } + } + + /// Takes the current input data into a clone that is returned, + /// and resets self. + pub fn take(&mut self) -> Self { + ExecutionInputData { + stop: std::mem::take(&mut self.stop), + finalized_blocks: std::mem::take(&mut self.finalized_blocks), + new_blockclique: std::mem::take(&mut self.new_blockclique), + readonly_requests: self.readonly_requests.take(), + } + } +} + +#[derive(Clone)] +/// implementation of the execution controller +pub struct ExecutionControllerImpl { + /// input data to process in the VM loop + /// with a wakeup condition variable that needs to be triggered when the data changes + pub(crate) input_data: Arc<(Condvar, Mutex)>, + /// current execution state (see execution.rs for details) + pub(crate) execution_state: Arc>, +} + +impl ExecutionController for ExecutionControllerImpl { + /// called to signal changes on the current blockclique, also listing newly finalized blocks + /// + /// # arguments + /// * finalized_blocks: list of newly finalized blocks to be appended to the input finalized blocks + /// * blockclique: new blockclique, replaces the current one in the input + fn update_blockclique_status( + &self, + finalized_blocks: Map, + blockclique: Map, + ) { + // index newly finalized blocks by slot + let mapped_finalized_blocks: HashMap<_, _> = finalized_blocks + .into_iter() + .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) + .collect(); + // index blockclique by slot + let mapped_blockclique = blockclique + .into_iter() + .map(|(b_id, b)| (b.header.content.slot, (b_id, b))) + .collect(); + // update input data + let mut input_data = self.input_data.1.lock(); + input_data.new_blockclique = Some(mapped_blockclique); // replace blockclique + input_data.finalized_blocks.extend(mapped_finalized_blocks); // append finalized blocks + self.input_data.0.notify_one(); // wake up VM loop + } + + /// Get the generated execution events, optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec { + self.execution_state.read().get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ) + } + + /// gets a copy of a full ledger entry + /// + /// # return value + /// * (final_entry, active_entry) + fn get_final_and_active_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option) { + self.execution_state + .read() + .get_final_and_active_ledger_entry(addr) + } + + /// Executes a readonly request + /// Read-only requests do not modify consesnsus state + fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + ) -> Result { + let resp_rx = { + let mut input_data = self.input_data.1.lock(); + + // if the read-onlyi queue is already full, return an error + if input_data.readonly_requests.is_full() { + return Err(ExecutionError::ChannelError( + "too many queued readonly requests".into(), + )); + } + + // prepare the channel to send back the result of the read-only execution + let (resp_tx, resp_rx) = + std::sync::mpsc::channel::>(); + + // append the request to the queue of input read-only requests + input_data + .readonly_requests + .push(RequestWithResponseSender::new(req, resp_tx)); + + // wake up the execution main loop + self.input_data.0.notify_one(); + + resp_rx + }; + + // Wait for the result of the execution + match resp_rx.recv() { + Ok(result) => result, + Err(err) => { + return Err(ExecutionError::ChannelError(format!( + "readonly execution response channel readout failed: {}", + err + ))) + } + } + } + + /// Returns a boxed clone of self. + /// Allows cloning Box, + /// see massa-execution-exports/controller_traits.rs + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} + +/// Execution manager +/// Allows stopping the execution worker +pub struct ExecutionManagerImpl { + /// input data to process in the VM loop + /// with a wakeup condition variable that needs to be triggered when the data changes + pub(crate) input_data: Arc<(Condvar, Mutex)>, + /// handle used to join the worker thread + pub(crate) thread_handle: Option>, +} + +impl ExecutionManager for ExecutionManagerImpl { + /// stops the worker + fn stop(&mut self) { + info!("stopping Execution controller..."); + // notify the worker thread to stop + { + let mut input_wlock = self.input_data.1.lock(); + input_wlock.stop = true; + self.input_data.0.notify_one(); + } + // join the execution thread + if let Some(join_handle) = self.thread_handle.take() { + join_handle.join().expect("VM controller thread panicked"); + } + info!("execution controller stopped"); + } +} diff --git a/massa-execution-worker/src/execution.rs b/massa-execution-worker/src/execution.rs new file mode 100644 index 00000000000..398364895d7 --- /dev/null +++ b/massa-execution-worker/src/execution.rs @@ -0,0 +1,504 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module deals with executing final and active slots, as well as read-only requests. +//! It also keeps a history of executed slots, thus holding the speculative state of the ledger. +//! +//! Execution usually happens in the following way: +//! * an execution context is set up +//! * the VM is called for execution within this context +//! * the output of the execution is extracted from the context + +use crate::context::ExecutionContext; +use crate::interface_impl::InterfaceImpl; +use massa_execution_exports::{ + EventStore, ExecutionConfig, ExecutionError, ExecutionOutput, ExecutionStackElement, + ReadOnlyExecutionRequest, +}; +use massa_ledger::{Applicable, FinalLedger, LedgerChanges, LedgerEntry, SetUpdateOrDelete}; +use massa_models::output_event::SCOutputEvent; +use massa_models::{Address, BlockId, Operation, OperationId, OperationType}; +use massa_models::{Block, Slot}; +use massa_sc_runtime::Interface; +use parking_lot::{Mutex, RwLock}; +use std::{ + collections::{HashMap, VecDeque}, + sync::Arc, +}; +use tracing::debug; + +/// Used to acquire a lock on the execution context +macro_rules! context_guard { + ($self:ident) => { + $self.execution_context.lock() + }; +} + +/// Structure holding consistent speculative and final execution states, +/// and allowing access to them. +pub(crate) struct ExecutionState { + // execution config + pub config: ExecutionConfig, + // History of the outputs of recently executed slots. Slots should be consecutive, newest at the back. + // Whenever an active slot is executed, it is appended at the back of active_history. + // Whenever an executed active slot becomes final, + // its output is popped from the front of active_history and applied to the final state. + pub active_history: VecDeque, + // a cursor pointing to the highest executed slot + pub active_cursor: Slot, + // a cursor pointing to the highest executed final slot + pub final_cursor: Slot, + // store containing execution events that became final + pub final_events: EventStore, + // final ledger with atomic R/W access + pub final_ledger: Arc>, + // execution context (see documentation in context.rs) + pub execution_context: Arc>, + // execution interface allowing the VM runtime to access the Massa context + pub execution_interface: Box, +} + +impl ExecutionState { + /// Create a new execution state. This should be called only once at the start of the executon worker. + /// + /// # arguments + /// * config: execution config + /// * final_lefger: atomic access to the final ledger + /// + /// # returns + /// A new ExecutionState + pub fn new(config: ExecutionConfig, final_ledger: Arc>) -> ExecutionState { + // Get the slot at the output of which the final ledger is attached. + // This should be among the latest final slots. + let last_final_slot = final_ledger.read().slot; + + // Create an empty placeholder execution context, with shared atomic access + let execution_context = Arc::new(Mutex::new(ExecutionContext::new( + final_ledger.clone(), + Default::default(), + ))); + + // Instantiate the interface providing ABI access to the VM, share the execution contex with it + let execution_interface = Box::new(InterfaceImpl::new( + config.clone(), + execution_context.clone(), + )); + + // build the execution state + ExecutionState { + config, + final_ledger, + execution_context, + execution_interface, + // empty execution output history: it is not recovered through bootstrap + active_history: Default::default(), + // empty final event store: it is not recovered through bootstrap + final_events: Default::default(), + // no active slots executed yet: set active_cursor to the last final block + active_cursor: last_final_slot, + final_cursor: last_final_slot, + } + } + + /// Applies the output of an execution to the final execution state. + /// The newly applied final output should be from the slot just after the last executed final slot + /// + /// # Arguments + /// * exec_out: execution output to apply + pub fn apply_final_execution_output(&mut self, exec_out: ExecutionOutput) { + // apply ledger changes to the final ledger + self.final_ledger + .write() + .settle_slot(exec_out.slot, exec_out.ledger_changes); + // update the final ledger's slot + self.final_cursor = exec_out.slot; + + // update active cursor: + // if it was at the previous latest final block, set it to point to the new one + if self.active_cursor < self.final_cursor { + self.active_cursor = self.final_cursor; + } + + // append generated events to the final event store + self.final_events.extend(exec_out.events); + } + + /// Applies an execution output to the active (non-final) state + /// The newly active final output should be from the slot just after the last executed active slot + /// + /// # Arguments + /// * exec_out: execution output to apply + pub fn apply_active_execution_output(&mut self, exec_out: ExecutionOutput) { + // update active cursor to reflect the new latest active slot + self.active_cursor = exec_out.slot; + + // add the execution output at the end of the output history + self.active_history.push_back(exec_out); + } + + /// Clear the whole execution history, + /// deleting caches on executed non-final slots. + pub fn clear_history(&mut self) { + // clear history + self.active_history.clear(); + + // reset active cursor to point to the latest final slot + self.active_cursor = self.final_cursor; + } + + /// This function receives a new sequence of blocks to execute as argument. + /// It then scans the output history to see until which slot this sequence was already executed (and is outputs cached). + /// If a mismatch is found, it means that the sequence of blocks to execute has changed + /// and the existing output cache is truncated to keep output history only until the mismatch slot (excluded). + /// Slots after that point will need to be (re-executed) to account for the new sequence. + /// + /// # Arguments + /// * active_slots: A HashMap mapping each slot to a block or None if the slot is a miss + pub fn truncate_history(&mut self, active_slots: &HashMap>) { + // find mismatch point (included) + let mut truncate_at = None; + // iterate over the output history, in chronological order + for (hist_index, exec_output) in self.active_history.iter().enumerate() { + // try to find the corresponding slot in active_slots + let found_block_id = active_slots + .get(&exec_output.slot) + .map(|opt_b| opt_b.as_ref().map(|(b_id, _b)| *b_id)); + if found_block_id == Some(exec_output.block_id) { + // the slot number and block ID still match. Continue scanning + continue; + } + // mismatch found: stop scannig and return the cutoff index + truncate_at = Some(hist_index); + break; + } + + // If a mismatch was found + if let Some(truncate_at) = truncate_at { + // Truncate the execution output history at the cutoff index (excluded) + self.active_history.truncate(truncate_at); + // Now that part of the speculative executions were cancelled, + // update the active cursor to match the latest executed slot. + // The cursor is set to the latest executed final slot if the history is empty. + self.active_cursor = self + .active_history + .back() + .map_or(self.final_cursor, |out| out.slot); + } + } + + /// Returns he ledger changes accumulated from the beginning of the output history, + /// up until a provided slot (excluded). + /// Only used in the VM main loop because the lock on the final ledger + /// carried by the returned SpeculativeLedger is not held. + /// TODO optimization: do not do this anymore but allow the speculative ledger to lazily query any subentry + /// by scanning through history from end to beginning + /// https://github.com/massalabs/massa/issues/2343 + pub fn get_accumulated_active_changes_at_slot(&self, slot: Slot) -> LedgerChanges { + // check that the slot is within the reach of history + if slot <= self.final_cursor { + panic!("cannot execute at a slot before finality"); + } + let max_slot = self + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("slot overflow when getting speculative ledger"); + if slot > max_slot { + panic!("cannot execute at a slot beyond active cursor + 1"); + } + + // gather the history of changes in the relevant history range + let mut accumulated_changes = LedgerChanges::default(); + for previous_output in &self.active_history { + if previous_output.slot >= slot { + break; + } + accumulated_changes.apply(previous_output.ledger_changes.clone()); + } + + accumulated_changes + } + + /// Execute an operation in the context of a block. + /// Assumes the execution context was initialized at the beginning of the slot. + /// + /// # arguments + /// * operation: operation to execute + /// * block_creator_addr: address of the block creator + pub fn execute_operation( + &self, + operation: &Operation, + block_creator_addr: Address, + ) -> Result<(), ExecutionError> { + // process ExecuteSC operations only, ignore other types of operations + let (bytecode, max_gas, coins, gas_price) = match &operation.content.op { + OperationType::ExecuteSC { + data, + max_gas, + coins, + gas_price, + } => (data, max_gas, coins, gas_price), + _ => return Ok(()), + }; + + // get the operation's sender address + let sender_addr = Address::from_public_key(&operation.content.sender_public_key); + + // get operation ID + // TODO have operation_id contained in the Operation object in the future to avoid recomputation + // https://github.com/massalabs/massa/issues/1121 + // https://github.com/massalabs/massa/issues/2264 + let operation_id = operation + .get_operation_id() + .expect("could not compute operation ID"); + + // prepare the current slot context for executing the operation + let context_snapshot; + { + // acquire write access to the context + let mut context = context_guard!(self); + + // Use the context to credit the producer of the block with max_gas * gas_price parallel coins. + // Note that errors are deterministic and do not cancel the operation execution. + // That way, even if the sender sent an invalid operation, the block producer will still get credited. + let gas_fees = gas_price.saturating_mul_u64(*max_gas); + if let Err(err) = + context.transfer_parallel_coins(None, Some(block_creator_addr), gas_fees) + { + debug!( + "failed to credit block producer {} with {} gas fee coins: {}", + block_creator_addr, gas_fees, err + ); + } + + // Credit the operation sender with `coins` parallel coins. + // Note that errors are deterministic and do not cancel op execution. + if let Err(err) = context.transfer_parallel_coins(None, Some(sender_addr), *coins) { + debug!( + "failed to credit operation sender {} with {} operation coins: {}", + sender_addr, *coins, err + ); + } + + // save a snapshot of the context state to restore it if the op fails to execute, + // this reverting any changes except the coin transfers above + context_snapshot = context.get_snapshot(); + + // set the context gas price to match the one defined in the operation + context.gas_price = *gas_price; + + // set the context max gas to match the one defined in the operation + context.max_gas = *max_gas; + + // Set the call stack to a single element: + // * the execution will happen in the context of the address of the operation's sender + // * the context will signal that `coins` were creditedto the parallel balance of the sender during that call + // * the context will give the operation's sender write access to its own ledger entry + context.stack = vec![ExecutionStackElement { + address: sender_addr, + coins: *coins, + owned_addresses: vec![sender_addr], + }]; + + // set the context origin operation ID + context.origin_operation_id = Some(operation_id); + }; + + // run the VM on the bytecode contained in the operation + let run_result = massa_sc_runtime::run(bytecode, *max_gas, &*self.execution_interface); + if let Err(err) = run_result { + // there was an error during bytecode execution: + // cancel the effects of the execution by resetting the context to the previously saved snapshot + let mut context = context_guard!(self); + context.origin_operation_id = None; + context.reset_to_snapshot(context_snapshot); + return Err(ExecutionError::RuntimeError(format!( + "bytecode execution error: {}", + err + ))); + } + + Ok(()) + } + + /// Executes a full slot (with or without a block inside) without causing any changes to the state, + /// just yielding the execution output. + /// + /// # Arguments + /// * slot: slot to execute + /// * opt_block: block ID if there is a block a that slot, otherwise None + /// + /// # Returns + /// An `ExecutionOutput` structure summarizing the output of the executed slot + pub fn execute_slot(&self, slot: Slot, opt_block: Option<(BlockId, Block)>) -> ExecutionOutput { + // get optional block ID and creator address + let (opt_block_id, opt_block_creator_addr) = opt_block + .as_ref() + .map(|(b_id, b)| (*b_id, Address::from_public_key(&b.header.content.creator))) + .unzip(); + + // accumulate previous active changes from output history + let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); + + // create a new execution context for the whole active slot + let execution_context = ExecutionContext::active_slot( + slot, + opt_block_id, + previous_ledger_changes, + self.final_ledger.clone(), + ); + + // note that here, some pre-operations (like crediting block producers) can be performed before the lock + + // apply the created execution context for slot execution + *context_guard!(self) = execution_context; + + // note that here, async operations should be executed + + // check if there is a block at this slot + if let (Some((block_id, block)), Some(block_creator_addr)) = + (opt_block, opt_block_creator_addr) + { + // Try executing the operations of this block in the order in which they appear in the block. + // Errors are logged but do not interrupt the execution of the slot. + for (op_idx, operation) in block.operations.iter().enumerate() { + if let Err(err) = self.execute_operation(operation, block_creator_addr) { + debug!( + "failed executing operation index {} in block {}: {}", + op_idx, block_id, err + ); + } + } + } + + // return the execution output + context_guard!(self).take_execution_output() + } + + /// Executes a read-only execution request. + /// The executed bytecode appears to be able to read and write the consensus state, + /// but all accumulated changes are simply returned as an ExecutionOutput object, + /// and not actually applied to the consensus state. + /// + /// # Arguments + /// * req: a read-only execution request + /// + /// # Returns + /// ExecutionOutput describing the output of the execution, or an error + pub(crate) fn execute_readonly_request( + &self, + req: ReadOnlyExecutionRequest, + ) -> Result { + // set the execution slot to be the one after the latest executed active slot + let slot = self + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("slot overflow in readonly execution"); + + // accumulate ledger changes that happened in the output history before this slot + let previous_ledger_changes = self.get_accumulated_active_changes_at_slot(slot); + + // create a readonly execution context + let max_gas = req.max_gas; + let bytecode = req.bytecode.clone(); + let execution_context = ExecutionContext::readonly( + slot, + req, + previous_ledger_changes, + self.final_ledger.clone(), + ); + + // set the execution context for execution + *context_guard!(self) = execution_context; + + // run the intepreter + massa_sc_runtime::run(&bytecode, max_gas, &*self.execution_interface) + .map_err(|err| ExecutionError::RuntimeError(err.to_string()))?; + + // return the execution output + Ok(context_guard!(self).take_execution_output()) + } + + /// Gets a full ledger entry both at the latest final and active executed slots + /// TODO: this can be heavily optimized, see comments and https://github.com/massalabs/massa/issues/2343 + /// + /// # returns + /// (final_entry, active_entry) + pub fn get_final_and_active_ledger_entry( + &self, + addr: &Address, + ) -> (Option, Option) { + // get the full entry from the final ledger + let final_entry = self.final_ledger.read().get_full_entry(addr); + + // get cumulative active changes and apply them + // TODO there is a lot of overhead here: we only need to compute the changes for one entry and no need to clone it + // also we should proceed backwards through history for performance + // https://github.com/massalabs/massa/issues/2343 + let active_change = self + .get_accumulated_active_changes_at_slot(self.active_cursor) + .get(addr) + .cloned(); + let active_entry = match (&final_entry, active_change) { + (final_v, None) => final_v.clone(), + (_, Some(SetUpdateOrDelete::Set(v))) => Some(v), + (_, Some(SetUpdateOrDelete::Delete)) => None, + (None, Some(SetUpdateOrDelete::Update(u))) => { + let mut v = LedgerEntry::default(); + v.apply(u); + Some(v) + } + (Some(final_v), Some(SetUpdateOrDelete::Update(u))) => { + let mut v = final_v.clone(); + v.apply(u); + Some(v) + } + }; + + (final_entry, active_entry) + } + + /// Gets execution events optionnally filtered by: + /// * start slot + /// * end slot + /// * emitter address + /// * original caller address + /// * operation id + pub fn get_filtered_sc_output_event( + &self, + start: Option, + end: Option, + emitter_address: Option
, + original_caller_address: Option
, + original_operation_id: Option, + ) -> Vec { + // iter on step history chained with final events + let start = start.unwrap_or_else(Slot::min); + let end = end.unwrap_or_else(Slot::max); + self.final_events + .get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ) + .into_iter() + .chain( + // TODO note that active history is made of consecutive slots, + // so this algo does not need to scan all history items as iteration bounds can be derived a priori + // https://github.com/massalabs/massa/issues/2335 + self.active_history + .iter() + .filter(|item| item.slot >= start && item.slot < end) + .flat_map(|item| { + item.events.get_filtered_sc_output_event( + start, + end, + emitter_address, + original_caller_address, + original_operation_id, + ) + }), + ) + .collect() + } +} diff --git a/massa-execution-worker/src/interface_impl.rs b/massa-execution-worker/src/interface_impl.rs new file mode 100644 index 00000000000..c49a3db2b68 --- /dev/null +++ b/massa-execution-worker/src/interface_impl.rs @@ -0,0 +1,452 @@ +// Copyright (c) 2022 MASSA LABS + +//! Implementation of the interface between massa-execution-worker and massa-sc-runtime. +//! This allows the VM runtime to acceess the Massa execution context, +//! for example to interact with the ledger. +//! See the definition of Interface in the massa-sc-runtime crate for functional details. + +use crate::context::ExecutionContext; +use anyhow::{bail, Result}; +use massa_execution_exports::ExecutionConfig; +use massa_execution_exports::ExecutionStackElement; +use massa_hash::hash::Hash; +use massa_models::{ + output_event::{EventExecutionContext, SCOutputEvent, SCOutputEventId}, + timeslots::get_block_slot_timestamp, +}; +use massa_sc_runtime::{Interface, InterfaceClone}; +use parking_lot::Mutex; +use rand::Rng; +use std::str::FromStr; +use std::sync::Arc; +use tracing::debug; + +/// helper for locking the context mutex +macro_rules! context_guard { + ($self:ident) => { + $self.context.lock() + }; +} + +/// an implementation of the Interface trait (see massa-sc-runtime crate) +#[derive(Clone)] +pub(crate) struct InterfaceImpl { + /// execution config + config: ExecutionConfig, + /// thread-safe shared access to the execution context (see context.rs) + context: Arc>, +} + +impl InterfaceImpl { + /// creates a new InterfaceImpl + /// + /// # Arguments + /// * config: execution config + /// * context: thread-safe shared access to the current execution context (see context.rs) + pub fn new(config: ExecutionConfig, context: Arc>) -> InterfaceImpl { + InterfaceImpl { config, context } + } +} + +impl InterfaceClone for InterfaceImpl { + /// allows cloning a boxed InterfaceImpl + fn clone_box(&self) -> Box { + Box::new(self.clone()) + } +} + +/// Implementation of the Interface trait providing functions for massa-sc-runtime to call +/// in order to interact with the execution context during bytecode execution. +/// See the massa-sc-runtime crate for a functional description of the trait and its methods. +/// Note that massa-sc-runtime uses basic types (str for addresses, u64 for amounts...) for genericity. +impl Interface for InterfaceImpl { + /// prints a message in the node logs at log level 3 (debug) + fn print(&self, message: &str) -> Result<()> { + debug!("SC print: {}", message); + Ok(()) + } + + /// Initialize the call when bytecode calls a function from another bytecode + /// This function transfers the coins passed as parameter, + /// prepares the current execution context by pushing a new element on the top of the call stack, + /// and returns the target bytecode from the ledger. + /// + /// # Arguments + /// * address: string representation of the target address on which the bytecode will be called + /// * raw_coins: raw representation (without decimal factor) of the amount of parallel coins to transfer from the caller address to the target address at the beginning of the call + /// + /// # Returns + /// The target bytecode or an error + fn init_call(&self, address: &str, raw_coins: u64) -> Result> { + // get target address + let to_address = massa_models::Address::from_str(address)?; + + // write-lock context + let mut context = context_guard!(self); + + // get target bytecode + let bytecode = match context.get_bytecode(&to_address) { + Some(bytecode) => bytecode, + None => bail!("bytecode not found for address {}", to_address), + }; + + // get caller address + let from_address = match context.stack.last() { + Some(addr) => addr.address, + _ => bail!("failed to read call stack current address"), + }; + + // transfer coins from caller to target address + let coins = massa_models::Amount::from_raw(raw_coins); + if let Err(err) = + context.transfer_parallel_coins(Some(from_address), Some(to_address), coins) + { + bail!( + "error transferring {} parallel coins from {} to {}: {}", + coins, + from_address, + to_address, + err + ); + } + + // push a new call stack element on top of the current call stack + context.stack.push(ExecutionStackElement { + address: to_address, + coins, + owned_addresses: vec![to_address], + }); + + // return the target bytecode + Ok(bytecode) + } + + /// Called to finish the call process after a bytecode calls a function from another one. + /// This function just pops away the top element of the call stack. + fn finish_call(&self) -> Result<()> { + let mut context = context_guard!(self); + + if context.stack.pop().is_none() { + bail!("call stack out of bounds") + } + + Ok(()) + } + + /// Gets the parallel balance of the current address address (top of the stack). + /// + /// # Returns + /// The raw representation (no decimal factor) of the parallel balance of the address, + /// or zero if the address is not found in the ledger. + fn get_balance(&self) -> Result { + let context = context_guard!(self); + let address = context.get_current_address()?; + Ok(context + .get_parallel_balance(&address) + .unwrap_or_default() + .to_raw()) + } + + /// Gets the parallel balance of arbitrary address passed as argument. + /// + /// # Arguments + /// * address: string representation of the address for which to get the balance + /// + /// # Returns + /// The raw representation (no decimal factor) of the parallel balance of the address, + /// or zero if the address is not found in the ledger. + fn get_balance_for(&self, address: &str) -> Result { + let address = massa_models::Address::from_str(address)?; + Ok(context_guard!(self) + .get_parallel_balance(&address) + .unwrap_or_default() + .to_raw()) + } + + /// Creates a new ledger entry with the initial bytecode given as argument. + /// A new unique address is generated for that entry and returned. + /// + /// # Arguments + /// * bytecode: the bytecode to set for the newly created address + /// + /// # Returns + /// The string representation of the newly created address + fn create_module(&self, bytecode: &[u8]) -> Result { + match context_guard!(self).create_new_sc_address(bytecode.to_vec()) { + Ok(addr) => Ok(addr.to_bs58_check()), + Err(err) => bail!("couldn't create new SC address: {}", err), + } + } + + /// Gets a datastore value by key for a given address. + /// + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// The datastore value matching the provided key, if found, otherwise an error. + fn raw_get_data_for(&self, address: &str, key: &str) -> Result> { + let addr = &massa_models::Address::from_bs58_check(address)?; + let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let context = context_guard!(self); + match context.get_data_entry(addr, &key) { + Some(value) => Ok(value), + _ => bail!("data entry not found"), + } + } + + /// Sets a datastore entry for a given address + /// + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to set + /// * value: new value to set + fn raw_set_data_for(&self, address: &str, key: &str, value: &[u8]) -> Result<()> { + let addr = massa_models::Address::from_str(address)?; + let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let mut context = context_guard!(self); + context.set_data_entry(&addr, key, value.to_vec())?; + Ok(()) + } + + /// Checks if a datastore entry exists for a given address. + /// + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// true if the address exists and has the entry matching the provided key in its datastore, otherwise false + fn has_data_for(&self, address: &str, key: &str) -> Result { + let addr = massa_models::Address::from_str(address)?; + let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let context = context_guard!(self); + Ok(context.has_data_entry(&addr, &key)) + } + + /// Gets a datastore value by key for a the current address (top of the call stack). + /// + /// # Arguments + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// The datastore value matching the provided key, if found, otherwise an error. + fn raw_get_data(&self, key: &str) -> Result> { + let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let context = context_guard!(self); + let addr = context.get_current_address()?; + match context.get_data_entry(&addr, &key) { + Some(data) => Ok(data), + _ => bail!("data entry not found"), + } + } + + /// Sets a datastore entry for the current address (top of the call stack). + /// + /// # Arguments + /// * address: string representation of the address + /// * key: string key of the datastore entry to set + /// * value: new value to set + fn raw_set_data(&self, key: &str, value: &[u8]) -> Result<()> { + let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let mut context = context_guard!(self); + let addr = context.get_current_address()?; + context.set_data_entry(&addr, key, value.to_vec())?; + Ok(()) + } + + /// Checks if a datastore entry exists for the current address (top of the call stack). + /// + /// # Arguments + /// * key: string key of the datastore entry to retrieve + /// + /// # Returns + /// true if the address exists and has the entry matching the provided key in its datastore, otherwise false + fn has_data(&self, key: &str) -> Result { + let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); + let context = context_guard!(self); + let addr = context.get_current_address()?; + Ok(context.has_data_entry(&addr, &key)) + } + + /// Hashses arbitrary data + /// + /// # Arguments + /// * data: data bytes to hash + /// + /// # Returns + /// The string representation of the resulting hash + fn hash(&self, data: &[u8]) -> Result { + Ok(massa_hash::hash::Hash::compute_from(data).to_bs58_check()) + } + + /// Converts a pubkey to an address + /// + /// # Arguments + /// * public_key: string representation of the public key + /// + /// # Returns + /// The string representation of the resulting address + fn address_from_public_key(&self, public_key: &str) -> Result { + let public_key = massa_signature::PublicKey::from_bs58_check(public_key)?; + let addr = massa_models::Address::from_public_key(&public_key); + Ok(addr.to_bs58_check()) + } + + /// Verifies a signature + /// + /// # Arguments + /// * data: the data bytes that were signed + /// * signature: string representation of the signature + /// * public key: string representation of the public key to check against + /// + /// # Returns + /// true if the signature verification succeeded, false otherwise + fn signature_verify(&self, data: &[u8], signature: &str, public_key: &str) -> Result { + let signature = match massa_signature::Signature::from_bs58_check(signature) { + Ok(sig) => sig, + Err(_) => return Ok(false), + }; + let public_key = match massa_signature::PublicKey::from_bs58_check(public_key) { + Ok(pubk) => pubk, + Err(_) => return Ok(false), + }; + let h = massa_hash::hash::Hash::compute_from(data); + Ok(massa_signature::verify_signature(&h, &signature, &public_key).is_ok()) + } + + /// Transfer parallel coins from the current address (top of the call stack) towards a target address. + /// + /// # Arguments + /// * to_address: string representation of the address to which the coins are sent + /// * raw_amount: raw representation (no decimal factor) of the amount of coins to transfer + fn transfer_coins(&self, to_address: &str, raw_amount: u64) -> Result<()> { + let to_address = massa_models::Address::from_str(to_address)?; + let amount = massa_models::Amount::from_raw(raw_amount); + let mut context = context_guard!(self); + let from_address = context.get_current_address()?; + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; + Ok(()) + } + + /// Transfer parallel coins from a given address towards a target address. + /// + /// # Arguments + /// * from_address: string representation of the address that is sending the coins + /// * to_address: string representation of the address to which the coins are sent + /// * raw_amount: raw representation (no decimal factor) of the amount of coins to transfer + fn transfer_coins_for( + &self, + from_address: &str, + to_address: &str, + raw_amount: u64, + ) -> Result<()> { + let from_address = massa_models::Address::from_str(from_address)?; + let to_address = massa_models::Address::from_str(to_address)?; + let amount = massa_models::Amount::from_raw(raw_amount); + let mut context = context_guard!(self); + context.transfer_parallel_coins(Some(from_address), Some(to_address), amount)?; + Ok(()) + } + + /// Returns the list of owned adresses (top of the call stack). + /// Those addresses are the ones the current execution context has write access to, + /// typically it includes the current address itself, + /// but also the ones that were created previously by the current call to allow initializing them. + /// + /// # Returns + /// A vector with the string representation of each owned address. + /// Note that the ordering of this vector is deterministic and conserved. + fn get_owned_addresses(&self) -> Result> { + Ok(context_guard!(self) + .get_current_owned_addresses()? + .into_iter() + .map(|addr| addr.to_bs58_check()) + .collect()) + } + + /// Returns the addresses in the call stack, from the bottom to the top. + /// + /// # Returns + /// A vector with the string representation of each call stack address. + fn get_call_stack(&self) -> Result> { + Ok(context_guard!(self) + .get_call_stack() + .into_iter() + .map(|addr| addr.to_bs58_check()) + .collect()) + } + + /// Gets the amount of coins that have been ransferred at the beginning of the call. + /// See the init_call method. + /// + /// # Returns + /// The raw representation (no decimal factor) of the amount of coins + fn get_call_coins(&self) -> Result { + Ok(context_guard!(self).get_current_call_coins()?.to_raw()) + } + + /// Emits an execution event to be stored. + /// + /// # Arguments: + /// data: the string data that is the payload of the event + fn generate_event(&self, data: String) -> Result<()> { + let mut execution_context = context_guard!(self); + + // Generate a unique event ID + // Initialize a seed from the current slot + let mut to_hash: Vec = execution_context.slot.to_bytes_key().to_vec(); + // Append the index of the emitted event during the current slot + to_hash.append(&mut execution_context.created_event_index.to_be_bytes().to_vec()); + // Append 0u8 if the context is readonly, 1u8 otherwise + // This is used to allow event ID collisions between readonly and active executions + to_hash.push(!execution_context.read_only as u8); + // Hash the seed to generate the ID + let id = SCOutputEventId(Hash::compute_from(&to_hash)); + + // Gather contextual information from the execution context + let context = EventExecutionContext { + slot: execution_context.slot, + block: execution_context.opt_block_id, + call_stack: execution_context.stack.iter().map(|e| e.address).collect(), + read_only: execution_context.read_only, + index_in_slot: execution_context.created_event_index, + origin_operation_id: execution_context.origin_operation_id, + }; + + // Generate the event + let event = SCOutputEvent { id, context, data }; + + // Increment the event counter fot this slot + execution_context.created_event_index += 1; + + // Add the event to the context store + execution_context.events.insert(id, event); + + Ok(()) + } + + /// Returns the current time (millisecond unix timestamp) + /// Note that in order to ensure determinism, this is actually the time of the context slot. + fn get_time(&self) -> Result { + let slot = context_guard!(self).slot; + let ts = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + slot, + )?; + Ok(ts.to_millis()) + } + + /// Returns a pseudo-random deterministic i64 number + /// + /// # Warning + /// This random number generator is unsafe: + /// it can be both predicted and manipulated before the execution + fn unsafe_random(&self) -> Result { + let distr = rand::distributions::Uniform::new_inclusive(i64::MIN, i64::MAX); + Ok(context_guard!(self).unsafe_rng.sample(distr)) + } +} diff --git a/massa-execution-worker/src/lib.rs b/massa-execution-worker/src/lib.rs new file mode 100644 index 00000000000..52de649218e --- /dev/null +++ b/massa-execution-worker/src/lib.rs @@ -0,0 +1,83 @@ +// Copyright (c) 2022 MASSA LABS + +//! # General description +//! +//! The execution worker launches a persistent thread allowing the execution +//! of operations that can contain executable bytecode and managing interactions with the ledger. +//! When the worker is launched, a ExecutionManager and a ExecutionController are returned. +//! ExecutionManager allows stopping the worker, +//! and ExecutionController is the clonable structure through which users interact with the worker. +//! +//! The worker is fed through the ExecutionController with information about blockclique changes and newly finalized blocks +//! and will execute the operations in those blocks, as well as pending asynchronous operations on empty slots. +//! The worker can also query the current state of the ledger, and simulate operations in a read-only context. +//! +//! The execution worker has shared read access to the final ledger, +//! and must be the only module with runtime write access to the final ledger. +//! +//! # A note on finality +//! +//! +//! +//! The operations contained in a final slot are ready to be executed as final +//! only once all the previous slots are final and their operations are executed as final or ready to be so. +//! This ensures the sequentiality of the final executions of operations, +//! thus ensuring that writes to the final ledger are irreversible. +//! +//! Slots are called "active" if they have not been executed as final, and are not ready to be executed as final. +//! Active slots can therefore be final slots, or slots containing blocks from the blockclique, or empty (miss) slots. +//! Active slots can be executed in a speculative way: their execution might need to be reverted +//! as new blocks finalize or arrive, causing changes to them or to active slots before them. +//! +//! Miss slots are executed as well because they can contain implicit and async operations. +//! +//! # Architecture +//! +//! This crate is meant to be included only at the binary level to launch the worker, +//! not by the lib crates that will interact with it. +//! It depends on the massa-execution-exports crate that contains all the publicly exposed elements +//! and throuh which users will actually interact with the worker. +//! +//! ## worker.rs +//! This module runs the main loop of the worker thread. +//! It contains the logic to process incoming blockclique change notifications and read-only execution requests. +//! It sequences the blocks according to their slot number into queues, +//! and requests the execution of active and final slots to execution.rs. +//! +//! ## controller.rs +//! Implements ExecutionManager and ExecutionController +//! that serve as interfaces for users to interact with the worker in worker.rs. +//! +//! ## execution.rs +//! Contains the machinery to execute final and non-final slots, +//! and track the state and results of those executions. +//! This module initializes and holds a reference to the interface from interface_impl.rs +//! that allows the crate to provide execution state access +//! to the virtual machine runtime (massa-sc-runtime crate). +//! It also serves as an access point to the current execution state and speculative ledger +//! as defined in speculative_ledger.rs. +//! +//! ## speculative_ledger.rs +//! A speculative (non-final) ledger that supports cancelling already-executed operations +//! in the case of some blockclique changes. +//! +//! ## request_queue.rs +//! This module contains the implementation of a generic finite-size execution request queue. +//! It handles requests that come with an MPSC to send back the result of their execution once it's done. +//! + +#![feature(map_first_last)] +#![feature(unzip_option)] + +mod context; +mod controller; +mod execution; +mod interface_impl; +mod request_queue; +mod speculative_ledger; +mod worker; + +pub use worker::start_execution_worker; + +#[cfg(test)] +mod tests; diff --git a/massa-execution-worker/src/request_queue.rs b/massa-execution-worker/src/request_queue.rs new file mode 100644 index 00000000000..0da52ec15d5 --- /dev/null +++ b/massa-execution-worker/src/request_queue.rs @@ -0,0 +1,157 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines a generic finite-size execution request queue with an MPSC-based result sender. + +use massa_execution_exports::ExecutionError; +use std::collections::VecDeque; +use std::sync::mpsc::Sender; + +/// Represents an execution request T coupled with an MPSC sender for a result of type R +pub(crate) struct RequestWithResponseSender { + /// The underlying execution request + request: T, + /// An std::mpsc::Sender to later send the execution output R (or an error) + response_tx: Sender>, +} + +impl RequestWithResponseSender { + /// Create a new request with response sender + /// + /// # Arguments + /// * request: the underlying request of type T + /// * response_tx an std::mpsc::Sender to later send the execution output R (or an error) + pub fn new(request: T, response_tx: Sender>) -> Self { + RequestWithResponseSender { + request, + response_tx, + } + } + + /// Cancel the request by consuming the object and sending an error through the response channel. + /// + /// # Arguments + /// * err: the error to send through the response channel + pub fn cancel(self, err: ExecutionError) { + // Send a message to the request's sender to signal the cancellation. + // Ignore errors because they just mean that the emitter of the request + // has dropped the receiver and does not need the response anymore. + let _ = self.response_tx.send(Err(err)); + } + + /// Destructure self into a (request, response sender) pair + pub fn into_request_sender_pair(self) -> (T, Sender>) { + (self.request, self.response_tx) + } +} + +/// Structure representing an execution request queue with maximal length. +/// Each request is a RequestWithResponseSender that comes with an MPSC sender +/// to return the exection result when the execution is over (or an error). +pub(crate) struct RequestQueue { + /// Max number of item in the queue. + /// When the queue is full, extra new items are cancelled and dropped. + max_items: usize, + + /// The actual underlying queue + queue: VecDeque>, +} + +impl RequestQueue { + /// Create a new request queue + /// + /// # Arguments + /// * max_items: the maximal number of items in the queue. When full, extra new elements are cancelled and dropped. + pub fn new(max_items: usize) -> Self { + RequestQueue { + max_items, + queue: VecDeque::with_capacity(max_items), + } + } + + /// Extends Self with the contents of another RequestQueue. + /// The contents of the incoming queue are appended last. + /// Excess items with respect to self.max_items are cancelled and dropped. + pub fn extend(&mut self, mut other: RequestQueue) { + // compute the number of available item slots + let free_slots = self.max_items.saturating_sub(self.queue.len()); + + // if there are no available slots remaining, do nothing + if free_slots == 0 { + return; + } + + // if there are not enough available slots to fit the entire incoming queue + if free_slots < other.queue.len() { + // truncate the incoming queue to the size that fits, cancelling excess items + other.queue.drain(free_slots..).for_each(|req| { + req.cancel(ExecutionError::ChannelError( + "maximal request queue capacity reached".into(), + )) + }); + } + + // append the kept part of the incoming queue + self.queue.extend(other.queue); + } + + /// Cancel all queued items. + /// + /// # Arguments + /// * err: the error to send through the response channel of cancelled items + pub fn cancel(&mut self, err: ExecutionError) { + for req in self.queue.drain(..) { + req.cancel(err.clone()); + } + } + + /// Pop out the oldest element of the queue + /// + /// # Returns + /// The oldest element of the queue, or None if the queue is empty + pub fn pop(&mut self) -> Option> { + self.queue.pop_front() + } + + /// Push a new element at the end of the queue. + /// May fail if maximum capacity is reached, + /// in which case the request is cancelled and dropped. + /// + /// # Returns + /// The oldest element of the queue, or None if the queue is empty + pub fn push(&mut self, req: RequestWithResponseSender) { + // If the queue is already full, cancel the incoming request and return. + if self.queue.len() >= self.max_items { + req.cancel(ExecutionError::ChannelError( + "maximal request queue capacity reached".into(), + )); + return; + } + + // Append the incoming request to the end of the queue. + self.queue.push_back(req); + } + + /// Take all the elements into a new queue and reset the current queue + pub fn take(&mut self) -> Self { + RequestQueue { + max_items: self.max_items, + queue: std::mem::take(&mut self.queue), + } + } + + /// Checks whether the queue is full + /// + /// # Returns + /// true if the queue is full, false otherwise + pub fn is_full(&self) -> bool { + self.queue.len() >= self.max_items + } + + /// Checks whether the queue is empty + /// + /// # Returns + /// true if the queue is empty, false otherwise + pub fn is_empty(&self) -> bool { + self.queue.is_empty() + } +} diff --git a/massa-execution-worker/src/speculative_ledger.rs b/massa-execution-worker/src/speculative_ledger.rs new file mode 100644 index 00000000000..6a0041b36ba --- /dev/null +++ b/massa-execution-worker/src/speculative_ledger.rs @@ -0,0 +1,260 @@ +// Copyright (c) 2022 MASSA LABS + +//! The speculative ledger represents, in a compressed way, +//! the state of the ledger at an arbitrary execution slot. +//! It never actually writes to the consensus state +//! but keeps track of the changes that were applied to it since its creation. + +use massa_execution_exports::ExecutionError; +use massa_hash::hash::Hash; +use massa_ledger::{Applicable, FinalLedger, LedgerChanges}; +use massa_models::{Address, Amount}; +use parking_lot::RwLock; +use std::sync::Arc; + +/// The SpeculativeLedger contains an thread-safe shared reference to the final ledger (read-only), +/// a list of existing changes that happened o the ledger since its finality, +/// as well as an extra list of "added" changes. +/// The SpeculativeLedger makes it possible to transparently manipulate a virtual ledger +/// that takes into account all those ledger changes and allows adding more +/// while keeping track of all the newly added changes, and never writing in the final ledger. +pub struct SpeculativeLedger { + /// Thread-safe shared access to the final ledger. For reading only. + final_ledger: Arc>, + + /// Accumulation of changes that previously happened to the ledger since finality. + /// This value is not modified by changes applied to the SpeculativeLedger. + /// + /// TODO maybe have the history directly here, + /// so that we can avoid accumulating all the changes at every slot + /// but only lazily query addresses backwards in history (to avoid useless computations) with caching + previous_changes: LedgerChanges, + + /// list of ledger changes that were applied to this SpeculativeLedger since its creation + added_changes: LedgerChanges, +} + +impl SpeculativeLedger { + /// creates a new SpeculativeLedger + /// + /// # Arguments + /// * final_ledger: thread-safe shared access to the final ledger (for reading only) + /// * previous_changes: accumulation of changes that previously happened to the ledger since finality + pub fn new(final_ledger: Arc>, previous_changes: LedgerChanges) -> Self { + SpeculativeLedger { + final_ledger, + previous_changes, + added_changes: Default::default(), + } + } + + /// Returns the changes caused to the SpeculativeLedger since its creation, + /// and resets their local value to nothing. + pub fn take(&mut self) -> LedgerChanges { + std::mem::take(&mut self.added_changes) + } + + /// Takes a snapshot (clone) of the changes caused to the SpeculativeLedger since its creation + pub fn get_snapshot(&self) -> LedgerChanges { + self.added_changes.clone() + } + + /// Resets the SpeculativeLedger to a snapshot (see get_snapshot method) + pub fn reset_to_snapshot(&mut self, snapshot: LedgerChanges) { + self.added_changes = snapshot; + } + + /// Gets the effective parallel balance of an address + /// + /// # Arguments: + /// addr: the address to query + /// + /// # Returns + /// Some(Amount) if the address was found, otherwise None + pub fn get_parallel_balance(&self, addr: &Address) -> Option { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.get_parallel_balance_or_else(addr, || { + self.previous_changes + .get_parallel_balance_or_else(addr, || { + self.final_ledger.read().get_parallel_balance(addr) + }) + }) + } + + /// Gets the effective bytecode of an address + /// + /// # Arguments: + /// addr: the address to query + /// + /// # Returns + /// Some(Vec) if the address was found, otherwise None + pub fn get_bytecode(&self, addr: &Address) -> Option> { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.get_bytecode_or_else(addr, || { + self.previous_changes + .get_bytecode_or_else(addr, || self.final_ledger.read().get_bytecode(addr)) + }) + } + + /// Transfers parallel coins from one address to another. + /// No changes are retained in case of failure. + /// The spending address, if defined, must exist. + /// + /// # parameters + /// * from_addr: optional spending address (use None for pure coin creation) + /// * to_addr: optional crediting address (use None for pure coin destruction) + /// * amount: amount of coins to transfer + pub fn transfer_parallel_coins( + &mut self, + from_addr: Option
, + to_addr: Option
, + amount: Amount, + ) -> Result<(), ExecutionError> { + // init empty ledger changes + let mut changes = LedgerChanges::default(); + + // simulate spending coins from sender address (if any) + if let Some(from_addr) = from_addr { + let new_balance = self + .get_parallel_balance(&from_addr) + .ok_or_else(|| ExecutionError::RuntimeError("source address not found".into()))? + .checked_sub(amount) + .ok_or_else(|| { + ExecutionError::RuntimeError("unsufficient from_addr balance".into()) + })?; + changes.set_parallel_balance(from_addr, new_balance); + } + + // simulate crediting coins to destination address (if any) + // note that to_addr can be the same as from_addr + if let Some(to_addr) = to_addr { + let new_balance = changes + .get_parallel_balance_or_else(&to_addr, || self.get_parallel_balance(&to_addr)) + .unwrap_or_default() + .checked_add(amount) + .ok_or_else(|| { + ExecutionError::RuntimeError("overflow in to_addr balance".into()) + })?; + changes.set_parallel_balance(to_addr, new_balance); + } + + // apply the simulated changes to the speculative ledger + self.added_changes.apply(changes); + + Ok(()) + } + + /// Checks if an address exists in the speculative ledger + /// + /// # Arguments: + /// addr: the address to query + /// + /// # Returns + /// true if the address was found, otherwise false + pub fn entry_exists(&self, addr: &Address) -> bool { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.entry_exists_or_else(addr, || { + self.previous_changes + .entry_exists_or_else(addr, || self.final_ledger.read().entry_exists(addr)) + }) + } + + /// Creates a new smart contract address with initial bytecode. + /// + /// # Arguments + /// * addr: address to create + /// * bytecode: bytecode to set in the new ledger entry + pub fn create_new_sc_address( + &mut self, + addr: Address, + bytecode: Vec, + ) -> Result<(), ExecutionError> { + // set bytecode (create if non-existant) + self.added_changes.set_bytecode(addr, bytecode); + Ok(()) + } + + /// Sets the bytecode associated to an address in the ledger. + /// Fails if the address doesn't exist. + /// + /// # Arguments + /// * addr: target address + /// * bytecode: bytecode to set for that address + #[allow(dead_code)] // TODO remove when it is used + pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) -> Result<(), ExecutionError> { + // check for address existence + if !self.entry_exists(&addr) { + return Err(ExecutionError::RuntimeError(format!( + "could not set bytecode for address {}: entry does not exist", + addr + ))); + } + + // set the bytecode of that address + self.added_changes.set_bytecode(addr, bytecode); + + Ok(()) + } + + /// Gets a copy of a datastore value for a given address and datastore key + /// + /// # Arguments + /// * addr: address to query + /// * key: key to query in the address' datastore + /// + /// # Returns + /// Some(Vec) if the value was found, None if the address does not exist or if the key is not in its datastore. + pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.get_data_entry_or_else(addr, key, || { + self.previous_changes.get_data_entry_or_else(addr, key, || { + self.final_ledger.read().get_data_entry(addr, key) + }) + }) + } + + /// Checks if a data entry exists for a given address + /// + /// # Arguments + /// * addr: address to query + /// * key: datastore key to look for + /// + /// # Returns + /// true if the key exists in the address' datastore, false otherwise + pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { + // try to read from added_changes, then previous_changes, then final_ledger + self.added_changes.has_data_entry_or_else(addr, key, || { + self.previous_changes.has_data_entry_or_else(addr, key, || { + self.final_ledger.read().has_data_entry(addr, key) + }) + }) + } + + /// Sets a dataset entry for a given address in the ledger. + /// Fails if the address doesn't exist. + /// If the datastore entry does not exist, it is created. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * data: value to associate to the datasotre key + pub fn set_data_entry( + &mut self, + addr: &Address, + key: Hash, + data: Vec, + ) -> Result<(), ExecutionError> { + // check for address existence + if !self.entry_exists(addr) { + return Err(ExecutionError::RuntimeError(format!( + "could not set data for address {}: entry does not exist", + addr + ))); + } + + // set data + self.added_changes.set_data_entry(*addr, key, data); + + Ok(()) + } +} diff --git a/massa-execution-worker/src/tests/mod.rs b/massa-execution-worker/src/tests/mod.rs new file mode 100644 index 00000000000..0ffb9de2a68 --- /dev/null +++ b/massa-execution-worker/src/tests/mod.rs @@ -0,0 +1,3 @@ +// Copyright (c) 2022 MASSA LABS + +//TODO mod scenarios_mandatories; https://github.com/massalabs/massa/pull/2296 diff --git a/massa-execution/src/tests/scenarios_mandatories.rs b/massa-execution-worker/src/tests/scenarios_mandatories.rs similarity index 100% rename from massa-execution/src/tests/scenarios_mandatories.rs rename to massa-execution-worker/src/tests/scenarios_mandatories.rs diff --git a/massa-execution-worker/src/worker.rs b/massa-execution-worker/src/worker.rs new file mode 100644 index 00000000000..15e12b44c06 --- /dev/null +++ b/massa-execution-worker/src/worker.rs @@ -0,0 +1,566 @@ +// Copyright (c) 2022 MASSA LABS + +//! This module allows launching the execution worker thread, returning objects to communicate with it. +//! The worker thread processes incoming notifications of blockclique changes, +//! orders active and final blocks in queues sorted by increasing slot number, +//! and requests the execution of active and final slots from execution.rs. + +use crate::controller::{ExecutionControllerImpl, ExecutionInputData, ExecutionManagerImpl}; +use crate::execution::ExecutionState; +use crate::request_queue::RequestQueue; +use massa_execution_exports::{ + ExecutionConfig, ExecutionController, ExecutionError, ExecutionManager, ExecutionOutput, + ReadOnlyExecutionRequest, +}; +use massa_ledger::FinalLedger; +use massa_models::BlockId; +use massa_models::{ + timeslots::{get_block_slot_timestamp, get_latest_block_slot_at_timestamp}, + Block, Slot, +}; +use massa_time::MassaTime; +use parking_lot::{Condvar, Mutex, RwLock}; +use std::{collections::HashMap, sync::Arc}; + +/// Structure gathering all elements needed by the execution thread +pub(crate) struct ExecutionThread { + // Execution config + config: ExecutionConfig, + // A copy of the input data allowing access to incoming requests + input_data: Arc<(Condvar, Mutex)>, + // Map of final slots not executed yet but ready for execution + // See lib.rs for an explanation on final execution ordering. + ready_final_slots: HashMap>, + // Highest final slot that is ready to be executed + last_ready_final_slot: Slot, + // Map of final blocks that are not yet ready to be executed + // See lib.rs for an explanation on final execution ordering. + pending_final_blocks: HashMap, + // Current blockclique, indexed by slot number + blockclique: HashMap, + // Map of all active slots + active_slots: HashMap>, + // Highest active slot + last_active_slot: Slot, + // Execution state (see execution.rs) to which execution requests are sent + execution_state: Arc>, + /// queue for readonly execution requests and response mpscs to send back their outputs + readonly_requests: RequestQueue, +} + +impl ExecutionThread { + /// Creates the ExecutionThread structure to gather all data and references + /// needed by the execution worker thread. + /// + /// # Arguments + /// * config: execution config + /// * input_data: a copy of the input data interface to get incoming requests from + /// * execution_state: an thread-safe shared access to the execution state, which can be bootstrapped or newly created + pub fn new( + config: ExecutionConfig, + input_data: Arc<(Condvar, Mutex)>, + execution_state: Arc>, + ) -> Self { + // get the latest executed final slot, at the output of which the final ledger is attached + let final_cursor = execution_state.read().final_cursor; + + // create and return the ExecutionThread + ExecutionThread { + last_active_slot: final_cursor, + input_data, + last_ready_final_slot: final_cursor, + ready_final_slots: Default::default(), + pending_final_blocks: Default::default(), + blockclique: Default::default(), + active_slots: Default::default(), + readonly_requests: RequestQueue::new(config.readonly_queue_length), + config, + execution_state, + } + } + + /// Update the sequence of final slots given newly finalized blocks. + /// This method is called from the execution worker's main loop. + /// + /// # Arguments + /// * new_final_blocks: a map of newly finalized blocks + fn update_final_slots(&mut self, new_final_blocks: HashMap) { + // if there are no new final blocks, exit and do nothing + if new_final_blocks.is_empty() { + return; + } + + // add new_final_blocks to the pending final blocks not ready for execution yet + self.pending_final_blocks.extend(new_final_blocks); + + // get maximal final slot + let max_final_slot = self + .pending_final_blocks + .iter() + .max_by_key(|(s, _)| *s) + .map(|(s, _)| *s) + .expect("expected pending_final_blocks to be non-empty"); + + // Given pending_final_blocks, detect he final slots that are ready to be executed. + // Those are the ones or which all the previous slots are also executed or ready to be so. + // Iterate over consecutive slots starting from the one just after the previous last final one. + let mut slot = self.last_ready_final_slot; + while slot < max_final_slot { + slot = slot + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + + // try to remove that slot out of pending_final_blocks + if let Some((block_id, block)) = self.pending_final_blocks.remove(&slot) { + // pending final block found at slot: + // add block to the ready_final_slots list of final slots ready for execution + self.ready_final_slots.insert(slot, Some((block_id, block))); + self.last_ready_final_slot = slot; + // continue the loop + continue; + } + + // no final block found at this slot: it's a miss + + // check if the miss is final by searching for final blocks later in the same thread + let mut miss_final = false; + let mut search_slot = slot; + while search_slot < max_final_slot { + search_slot = search_slot + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + if self.pending_final_blocks.contains_key(&search_slot) { + // A final block was found later in the same thread. + // The missed slot is therefore final. + miss_final = true; + break; + } + } + + if miss_final { + // If this slot is a final miss + // Add it to the list of final slots ready for execution + self.ready_final_slots.insert(slot, None); + self.last_ready_final_slot = slot; + } else { + // This slot is not final: + // we have reached the end of the list of consecutive final slots + // that are ready to be executed + break; + } + } + } + + /// Returns the latest slot that is at or just before the current timestamp. + /// If a non-zero cursor_delay config is defined, this extra lag is taken into account. + /// Such an extra lag can be useful for weaker nodes to perform less speculative executions + /// because more recent slots change more often and might require multiple re-executions. + /// + /// # Returns + /// The latest slot at or before now() - self.config.cursor_delay) if there is any, + /// or None if it falls behind the genesis timestamp. + fn get_end_active_slot(&self) -> Option { + let target_time = MassaTime::compensated_now(self.config.clock_compensation) + .expect("could not read current time") + .saturating_sub(self.config.cursor_delay); + get_latest_block_slot_at_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + target_time, + ) + .expect("could not get current slot") + } + + /// Updates the sequence of active slots. + /// If a new blockclique is provided, it is taken into account. + /// If no blockclique is provided, this function is still useful to fill + /// ready-to-be-executed active slots with misses until the current time. + /// + /// Arguments: + /// * new_blockclique: optionally provide a new blockclique + fn update_active_slots(&mut self, new_blockclique: Option>) { + // Update the current blockclique if it has changed + if let Some(blockclique) = new_blockclique { + self.blockclique = blockclique; + } + + // Get the latest slot at the current timestamp, if any + let end_active_slot = self.get_end_active_slot(); + + // Empty the list of active slots + self.active_slots = HashMap::new(); + self.last_active_slot = self.last_ready_final_slot; + + // If the current timestamp is before genesis time, keep the active_slots empty and return + let end_active_slot = match end_active_slot { + Some(s) => s, + None => return, + }; + + // Recompute the sequence of active slots + // by iterating over consecutive slots from the one just after last_ready_final_slot until the current timestamp, + // and looking for blocks into pending_final_blocks and the current blockclique + let mut slot = self.last_ready_final_slot; + while slot < end_active_slot { + slot = slot + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + // look for a block at that slot among the ones that are final but not ready for final execution yet + if let Some((block_id, block)) = self.pending_final_blocks.get(&slot) { + // A block at that slot was found in pending_final_blocks. + // Add it to the sequence of active slots. + self.active_slots + .insert(slot, Some((*block_id, block.clone()))); + self.last_active_slot = slot; + } else if let Some((block_id, block)) = self.blockclique.get(&slot) { + // A block at that slot was found in the current blockclique. + // Add it to the sequence of active slots. + self.active_slots + .insert(slot, Some((*block_id, block.clone()))); + self.last_active_slot = slot; + } else { + // No block was found at that slot: it's a miss + // Add the miss to the sequence of active slots + self.active_slots.insert(slot, None); + self.last_active_slot = slot; + } + } + } + + /// executes one final slot, if any + /// returns true if something was executed + fn execute_one_final_slot(&mut self) -> bool { + // check if there are final slots to execute + if self.ready_final_slots.is_empty() { + return false; + } + + // w-lock execution state + let mut exec_state = self.execution_state.write(); + + // get the slot just after the last executed final slot + let slot = exec_state + .final_cursor + .get_next_slot(self.config.thread_count) + .expect("final slot overflow in VM"); + + // take the corresponding element from sce finals + let exec_target = self + .ready_final_slots + .remove(&slot) + .expect("the SCE final slot list skipped a slot"); + + // check if the final slot is cached at the front of the speculative execution history + if let Some(exec_out) = exec_state.active_history.pop_front() { + if exec_out.slot == slot + && exec_out.block_id == exec_target.as_ref().map(|(b_id, _)| *b_id) + { + // speculative execution front result matches what we want to compute + + // apply the cached output and return + exec_state.apply_final_execution_output(exec_out); + return true; + } + } + + // speculative cache mismatch + + // clear the speculative execution output cache completely + exec_state.clear_history(); + + // execute slot + let exec_out = exec_state.execute_slot(slot, exec_target); + + // apply execution output to final state + exec_state.apply_final_execution_output(exec_out); + + true + } + + /// Check if there are any active slots ready for execution + /// This is used to check if the main loop should run an iteration + fn are_there_active_slots_ready_for_execution(&self) -> bool { + let execution_state = self.execution_state.read(); + + // get the next active slot + let slot = execution_state + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + + // check if it is in the active slot queue + self.active_slots.contains_key(&slot) + } + + /// executes one active slot, if any + /// returns true if something was executed + fn execute_one_active_slot(&mut self) -> bool { + // write-lock the execution state + let mut exec_state = self.execution_state.write(); + + // get the next active slot + let slot = exec_state + .active_cursor + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + + // choose the execution target + let exec_target = match self.active_slots.get(&slot) { + Some(b) => b.clone(), //TODO get rid of that clone on storage refactorig https://github.com/massalabs/massa/issues/2178 + None => return false, + }; + + // execute the slot + let exec_out = exec_state.execute_slot(slot, exec_target); + + // apply execution output to active state + exec_state.apply_active_execution_output(exec_out); + + true + } + + /// Gets the time from now() to the slot just after next last_active_slot. + /// Saturates down to 0 on negative durations. + /// Note that config.cursor_delay is taken into account. + fn get_time_until_next_active_slot(&self) -> MassaTime { + // get the timestamp of the slot after the current last active one + let next_slot = self + .last_active_slot + .get_next_slot(self.config.thread_count) + .expect("active slot overflow in VM"); + let next_timestmap = get_block_slot_timestamp( + self.config.thread_count, + self.config.t0, + self.config.genesis_timestamp, + next_slot, + ) + .expect("could not compute block timestmap in VM"); + + // get the current timestamp minus the cursor delay + let end_time = MassaTime::compensated_now(self.config.clock_compensation) + .expect("could not get current time in VM") + .saturating_sub(self.config.cursor_delay); + + // compute the time difference, saturating down to zero + next_timestmap.saturating_sub(end_time) + } + + /// Tells the execution state about the new sequence of active slots. + /// If some slots already executed in a speculative way changed, + /// or if one of their have predecessor slots changed, + /// the execution state will truncate the execution output history + /// to remove all out-of-date execution outputs. + /// Speculative execution will then resume from the point of truncation. + pub fn truncate_execution_history(&mut self) { + // acquire write access to execution state + let mut exec_state = self.execution_state.write(); + + // tells the execution state to truncate its execution output history + // given the new list of active slots + exec_state.truncate_history(&self.active_slots); + } + + /// Append incoming read-only requests to the relevant queue, + /// Cancel those that are in excess if there are too many. + fn update_readonly_requests( + &mut self, + new_requests: RequestQueue, + ) { + // Append incoming readonly requests to our readonly request queue + // Excess requests are cancelld + self.readonly_requests.extend(new_requests); + } + + /// Executes a read-only request from the queue, if any. + /// The result of the execution is sent asynchronously through the response channel provided with the request. + /// + /// # Returns + /// true if a request was executed, false otherwise + fn execute_one_readonly_request(&mut self) -> bool { + if let Some(req_resp) = self.readonly_requests.pop() { + let (req, resp_tx) = req_resp.into_request_sender_pair(); + + // Acquire read access to the execution state and execute the read-only request + let outcome = self.execution_state.read().execute_readonly_request(req); + + // Send the execution output through resp_tx. + // Ignore errors because they just mean that the request emitter dropped the received + // because it doesn't need the response anymore. + let _ = resp_tx.send(outcome); + + return true; + } + false + } + + /// Waits for an event to trigger a new iteration in the excution main loop. + /// + /// # Returns + /// Some(ExecutionInputData) representing the input requests, + /// or None if the main loop needs to stop. + fn wait_loop_event(&mut self) -> Option { + let mut cancel_input = loop { + let mut input_data_lock = self.input_data.1.lock(); + + // take current input data, resetting it + let input_data: ExecutionInputData = input_data_lock.take(); + + // check for stop signal + if input_data.stop { + break input_data; + } + + // Check for readonly requests, new blockclique or final slot changes + // The most frequent triggers are checked first. + if !input_data.readonly_requests.is_empty() + || input_data.new_blockclique.is_some() + || !input_data.finalized_blocks.is_empty() + { + return Some(input_data); + } + + // Check for slots to execute. + // The most frequent triggers are checked first, + // except for the active slot check which is last because it is more expensive. + if !self.readonly_requests.is_empty() + || !self.ready_final_slots.is_empty() + || self.are_there_active_slots_ready_for_execution() + { + return Some(input_data); + } + + // No input data, and no slots to execute. + + // Compute when the next slot will be + // This is useful to wait for the next speculative miss to append to active slots. + let time_until_next_slot = self.get_time_until_next_active_slot(); + if time_until_next_slot == 0.into() { + // next slot is right now: the loop needs to iterate + return Some(input_data); + } + + // Wait to be notified of new input, for at most time_until_next_slot + // The return value is ignored because we don't care what woke up the condition variable. + let _res = self + .input_data + .0 + .wait_for(&mut input_data_lock, time_until_next_slot.to_duration()); + }; + + // The loop needs to quit + + // Cancel pending readonly requests + let cancel_err = ExecutionError::ChannelError( + "readonly execution cancelled because the execution worker is closing".into(), + ); + cancel_input.readonly_requests.cancel(cancel_err.clone()); + self.input_data + .1 + .lock() + .take() + .readonly_requests + .cancel(cancel_err); + + None + } + + /// Main loop of the executin worker + pub fn main_loop(&mut self) { + // This loop restarts everytime an execution happens for easier tracking. + // It also prioritizes executions in the following order: + // 1 - final executions + // 2 - speculative executions + // 3 - read-only executions + while let Some(input_data) = self.wait_loop_event() { + // update the sequence of final slots given the newly finalized blocks + self.update_final_slots(input_data.finalized_blocks); + + // update the sequence of active slots + self.update_active_slots(input_data.new_blockclique); + + // The list of active slots might have seen + // new insertions/deletions of blocks at different slot depths. + // It is therefore important to signal this to the execution state, + // so that it can remove out-of-date speculative execution results from its history. + self.truncate_execution_history(); + + // update the sequence of read-only requests + self.update_readonly_requests(input_data.readonly_requests); + + // execute one slot as final, if there is one ready for final execution + if self.execute_one_final_slot() { + // A slot was executed as final: restart the loop + // This loop continue is useful for monitoring: + // it allows tracking the state of all execution queues + continue; + } + + // now all the slots that were ready for final execution have been executed as final + + // Execute one active slot in a speculative way, if there is one ready for that + if self.execute_one_active_slot() { + // An active slot was executed: restart the loop + continue; + } + + // now all the slots that were ready for final and active execution have been executed + + // Execute a read-only request (note that the queue is of finite length), if there is one ready. + // This must be done in this loop because even though read-only executions do not alter consensus state, + // they still act temporarily on the static shared execution context. + if self.execute_one_readonly_request() { + // a read-only request was executed: restart the loop + continue; + } + } + } +} + +/// Launches an execution worker thread and returns an ExecutionManager to interact with it +/// +/// # parameters +/// * config: execution config +/// * final_ledger: a thread-safe shared access to the final ledger for reading and writing +/// +/// # Returns +/// A pair (execution_manager, execution_controller) where: +/// * execution_manager allows to stop the worker +/// * execution_controller allows sending requests and notifications to the worker +pub fn start_execution_worker( + config: ExecutionConfig, + final_ledger: Arc>, +) -> (Box, Box) { + // create an execution state + let execution_state = Arc::new(RwLock::new(ExecutionState::new( + config.clone(), + final_ledger, + ))); + + // define the input data interface + let input_data = Arc::new(( + Condvar::new(), + Mutex::new(ExecutionInputData::new(config.clone())), + )); + + // create a controller + let controller = ExecutionControllerImpl { + input_data: input_data.clone(), + execution_state: execution_state.clone(), + }; + + // launch the execution thread + let input_data_clone = input_data.clone(); + let thread_handle = std::thread::spawn(move || { + ExecutionThread::new(config, input_data_clone, execution_state).main_loop(); + }); + + // create a manager + let manager = ExecutionManagerImpl { + input_data, + thread_handle: Some(thread_handle), + }; + + // return the execution manager and controller pair + (Box::new(manager), Box::new(controller)) +} diff --git a/massa-execution/read-only.md b/massa-execution/read-only.md deleted file mode 100644 index 9b368f749bf..00000000000 --- a/massa-execution/read-only.md +++ /dev/null @@ -1,38 +0,0 @@ -# Read-only execution - -# Rationale - -When using Massa in a Web3 context for example, one should be able to perform read-only Smart Contract calls. - -See: https://ethereum.stackexchange.com/questions/765/what-is-the-difference-between-a-transaction-and-a-call/770 - -# Massa implementation - -## API - -Add a "sc_readonly_call" API endpoint - -Parameters: -* max_gas: u64 // max gas allowed for the readonly run -* simulated_gas_price: Amount // simulated gas price to expose to the smart contract context -* simulated_caller: Option
// pretend this address is executing the SC, if none provided a random one will be used. -* bytecode: `Vec` // bytecode to execute - -Return value: -* executed_at: Slot // slot at which the execution occurred -* result: - * (optional) error: Error - * (optional) output_events: `Vec` // output events generated during execution - - ## Operation - -* when the sc_readonly_call is called, the bytecode's main() function will be called with the following execution context: - * the execution will be done from the point of view of the latest slot at the current timestamp (see VM slot filler) - * Clear and update the context. - * set the call stack to simulated_caller_address - * set max_gas to its chosen value - * set gas_price to simulated_gas_price - * TODO: block ? maybe just assume a miss - * Note: do not apply changes to the ledger. - -* during the call, everything happens as with a normal ExecuteSC call, but when the call finishes, its effects are rollbacked (like when a SC execution fails) \ No newline at end of file diff --git a/massa-execution/src/controller.rs b/massa-execution/src/controller.rs deleted file mode 100644 index 46f96136ff5..00000000000 --- a/massa-execution/src/controller.rs +++ /dev/null @@ -1,209 +0,0 @@ -use crate::error::ExecutionError; -use crate::settings::ExecutionConfigs; -use crate::worker::{ - ExecutionCommand, ExecutionEvent, ExecutionManagementCommand, ExecutionWorker, -}; -use crate::BootstrapExecutionState; -use massa_models::{ - api::SCELedgerInfo, constants::CHANNEL_SIZE, execution::ExecuteReadOnlyResponse, - output_event::SCOutputEvent, prehash::Map, Address, Amount, Block, BlockId, OperationId, Slot, -}; -use std::collections::VecDeque; -use tokio::sync::{mpsc, oneshot}; -use tokio::task::JoinHandle; -use tracing::{error, info}; - -/// A sender of execution commands. -#[derive(Clone)] -pub struct ExecutionCommandSender(pub mpsc::Sender); - -/// A receiver of execution events. -pub struct ExecutionEventReceiver(pub mpsc::UnboundedReceiver); - -impl ExecutionEventReceiver { - /// drains remaining events and returns them in a VecDeque - /// note: events are sorted from oldest to newest - pub async fn drain(mut self) -> VecDeque { - let mut remaining_events: VecDeque = VecDeque::new(); - - while let Some(evt) = self.0.recv().await { - remaining_events.push_back(evt); - } - remaining_events - } -} - -/// A sender of execution management commands. -pub struct ExecutionManager { - join_handle: JoinHandle>, - manager_tx: mpsc::Sender, -} - -impl ExecutionManager { - pub async fn stop(self) -> Result<(), ExecutionError> { - drop(self.manager_tx); - if let Err(err) = self.join_handle.await { - error!("execution worker crashed: {}", err); - return Err(ExecutionError::JoinError); - }; - - info!("execution worker finished cleanly"); - Ok(()) - } -} - -/// Creates a new execution controller. -/// -/// # Arguments -/// * cfg: execution configuration -/// * bootstrap_state: optional bootstrap state -/// -/// TODO: add a consensus command sender, -/// to be able to send the `TransferToConsensus` message. -pub async fn start_controller( - cfg: ExecutionConfigs, - bootstrap_state: Option, -) -> Result< - ( - ExecutionCommandSender, - ExecutionEventReceiver, - ExecutionManager, - ), - ExecutionError, -> { - let (command_tx, command_rx) = mpsc::channel::(CHANNEL_SIZE); - let (manager_tx, manager_rx) = mpsc::channel::(1); - - // Unbounded, as execution is limited per metering already. - let (event_tx, event_rx) = mpsc::unbounded_channel::(); - let worker = ExecutionWorker::new(cfg, event_tx, command_rx, manager_rx, bootstrap_state)?; - let join_handle = tokio::spawn(async move { - match worker.run_loop().await { - Err(err) => Err(err), - Ok(v) => Ok(v), - } - }); - Ok(( - ExecutionCommandSender(command_tx), - ExecutionEventReceiver(event_rx), - ExecutionManager { - join_handle, - manager_tx, - }, - )) -} - -impl ExecutionCommandSender { - /// notify of a blockclique change - pub async fn update_blockclique( - &self, - finalized_blocks: Map, - blockclique: Map, - ) -> Result<(), ExecutionError> { - self.0 - .send(ExecutionCommand::BlockCliqueChanged { - blockclique, - finalized_blocks, - }) - .await - .map_err(|_err| { - ExecutionError::ChannelError( - "could not send BlockCliqueChanged command to execution".into(), - ) - })?; - Ok(()) - } - - pub async fn get_bootstrap_state(&self) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::GetBootstrapState(response_tx)) - .await - .map_err(|_| { - ExecutionError::ChannelError("could not send GetBootstrapState command".into()) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send GetBootstrapState upstream".into()) - }) - } - - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - pub async fn get_filtered_sc_output_event( - &self, - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - ) -> Result, ExecutionError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - }) - .await - .map_err(|_| { - ExecutionError::ChannelError("could not send GetSCOutputEvents command".into()) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send GetSCOutputEvents upstream".into()) - }) - } - - /// Execute code in read-only mode. - pub async fn execute_read_only_request( - &self, - max_gas: u64, - simulated_gas_price: Amount, - bytecode: Vec, - address: Option
, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::ExecuteReadOnlyRequest { - max_gas, - simulated_gas_price, - bytecode, - result_sender: response_tx, - address, - }) - .await - .map_err(|_| { - ExecutionError::ChannelError("could not send ExecuteReadOnlyRequest command".into()) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send ExecuteReadOnlyResponse upstream".into()) - }) - } - - pub async fn get_sce_ledger_for_addresses( - self, - addresses: Vec
, - ) -> Result, ExecutionError> { - let (response_tx, response_rx) = oneshot::channel(); - self.0 - .send(ExecutionCommand::GetSCELedgerForAddresses { - response_tx, - addresses, - }) - .await - .map_err(|_| { - ExecutionError::ChannelError( - "could not send GetSCELedgerForAddresses command".into(), - ) - })?; - response_rx.await.map_err(|_| { - ExecutionError::ChannelError("could not send GetSCELedgerForAddresses upstream".into()) - }) - } -} diff --git a/massa-execution/src/error.rs b/massa-execution/src/error.rs deleted file mode 100644 index 1cb6c769511..00000000000 --- a/massa-execution/src/error.rs +++ /dev/null @@ -1,38 +0,0 @@ -use displaydoc::Display; -use thiserror::Error; - -/// Errors of the execution component. -#[non_exhaustive] -#[derive(Display, Error, Debug)] -pub enum ExecutionError { - /// Channel error - ChannelError(String), - - /// Join error - JoinError, - - /// crypto error: {0} - ModelsError(#[from] massa_models::ModelsError), - - /// time error: {0} - TimeError(#[from] massa_time::TimeError), - - /// File error - FileError(String), -} - -macro_rules! bootstrap_file_error { - ($st:expr, $cfg:ident) => { - |err| { - ExecutionError::FileError(format!( - "error $st initial SCE ledger file {}: {}", - $cfg.settings - .initial_sce_ledger_path - .to_str() - .unwrap_or("(non-utf8 path)"), - err - )) - } - }; -} -pub(crate) use bootstrap_file_error; diff --git a/massa-execution/src/exports.rs b/massa-execution/src/exports.rs deleted file mode 100644 index 83203d0de72..00000000000 --- a/massa-execution/src/exports.rs +++ /dev/null @@ -1,46 +0,0 @@ -use massa_models::{DeserializeCompact, SerializeCompact, Slot}; -use serde::{Deserialize, Serialize}; - -use crate::sce_ledger::SCELedger; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct BootstrapExecutionState { - pub final_ledger: SCELedger, - pub final_slot: Slot, -} - -impl SerializeCompact for BootstrapExecutionState { - fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { - let mut res: Vec = Vec::new(); - - // final ledger - res.extend(self.final_ledger.to_bytes_compact()?); - - // final slot - res.extend(self.final_slot.to_bytes_compact()?); - - Ok(res) - } -} - -impl DeserializeCompact for BootstrapExecutionState { - fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { - let mut cursor = 0usize; - - // final ledger - let (final_ledger, delta) = SCELedger::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - // final slot - let (final_slot, delta) = Slot::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - Ok(( - BootstrapExecutionState { - final_ledger, - final_slot, - }, - cursor, - )) - } -} diff --git a/massa-execution/src/interface_impl.rs b/massa-execution/src/interface_impl.rs deleted file mode 100644 index d2208f1d6e8..00000000000 --- a/massa-execution/src/interface_impl.rs +++ /dev/null @@ -1,408 +0,0 @@ -//! Implementation of the interface used in the execution external library -use crate::types::{ExecutionContext, StackElement}; -use anyhow::{bail, Result}; -use massa_hash::hash::Hash; -use massa_models::{ - output_event::{EventExecutionContext, SCOutputEvent, SCOutputEventId}, - timeslots::get_block_slot_timestamp, - Amount, -}; -use massa_sc_runtime::{Interface, InterfaceClone}; -use massa_time::MassaTime; -use rand::Rng; -use std::str::FromStr; -use std::sync::{Arc, Mutex}; -use tracing::debug; - -macro_rules! context_guard { - ($self:ident) => { - $self - .context - .lock() - .expect("Failed to acquire lock on context.") - }; -} - -#[derive(Clone)] -pub(crate) struct InterfaceImpl { - context: Arc>, - thread_count: u8, - t0: MassaTime, - genesis_timestamp: MassaTime, -} - -impl InterfaceImpl { - pub fn new( - context: Arc>, - thread_count: u8, - t0: MassaTime, - genesis_timestamp: MassaTime, - ) -> InterfaceImpl { - InterfaceImpl { - context, - thread_count, - t0, - genesis_timestamp, - } - } -} - -impl InterfaceClone for InterfaceImpl { - fn clone_box(&self) -> Box { - Box::new(self.clone()) - } -} - -impl Interface for InterfaceImpl { - fn print(&self, message: &str) -> Result<()> { - debug!("SC print: {}", message); - Ok(()) - } - - fn init_call(&self, address: &str, raw_coins: u64) -> Result> { - // get target - let to_address = massa_models::Address::from_str(address)?; - - // write-lock context - let mut context = context_guard!(self); - - // get bytecode - let bytecode = match context.ledger_step.get_module(&to_address) { - Some(bytecode) => bytecode, - None => bail!("Error bytecode not found"), - }; - - // get caller - let from_address = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; - - // transfer coins - let coins = massa_models::Amount::from_raw(raw_coins); - // debit - context - .ledger_step - .set_balance_delta(from_address, coins, false)?; - // credit - if let Err(err) = context - .ledger_step - .set_balance_delta(to_address, coins, true) - { - // cancel debit - context - .ledger_step - .set_balance_delta(from_address, coins, true) - .expect("credit failed after same-amount debit succeeded"); - bail!("Error crediting destination balance: {}", err); - } - - // prepare context - context.stack.push(StackElement { - address: to_address, - coins, - owned_addresses: vec![to_address], - }); - - Ok(bytecode) - } - - fn finish_call(&self) -> Result<()> { - let mut context = context_guard!(self); - if context.stack.pop().is_none() { - bail!("call stack out of bounds") - } - - Ok(()) - } - - /// Returns zero as a default if address not found. - fn get_balance(&self) -> Result { - let context = context_guard!(self); - let address = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; - Ok(context.ledger_step.get_balance(&address).to_raw()) - } - - /// Returns zero as a default if address not found. - fn get_balance_for(&self, address: &str) -> Result { - let address = massa_models::Address::from_str(address)?; - Ok(context_guard!(self) - .ledger_step - .get_balance(&address) - .to_raw()) - } - - /// Requires a new address that contains the sent bytecode. - /// - /// Generate a new address with a concatenation of the block_id hash, the - /// operation index in the block and the index of address owned in context. - /// - /// Insert in the ledger the given bytecode in the generated address - fn create_module(&self, module: &[u8]) -> Result { - let mut context = context_guard!(self); - let (slot, created_addr_index) = (context.slot, context.created_addr_index); - let mut data: Vec = slot.to_bytes_key().to_vec(); - data.append(&mut created_addr_index.to_be_bytes().to_vec()); - if context.read_only { - data.push(0u8); - } else { - data.push(1u8); - } - let address = massa_models::Address(massa_hash::hash::Hash::compute_from(&data)); - let res = address.to_bs58_check(); - context - .ledger_step - .set_module(address, Some(module.to_vec())); - match context.stack.last_mut() { - Some(v) => { - v.owned_addresses.push(address); - } - None => bail!("owned addresses not found in stack"), - }; - context.created_addr_index += 1; - Ok(res) - } - - /// Requires the data at the address - fn raw_get_data_for(&self, address: &str, key: &str) -> Result> { - let addr = &massa_models::Address::from_bs58_check(address)?; - let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - let context = context_guard!(self); - match context.ledger_step.get_data_entry(addr, &key) { - Some(value) => Ok(value), - _ => bail!("Data entry not found"), - } - } - - /// Requires to replace the data in the current address - /// - /// Note: - /// The execution lib will allways use the current context address for the update - fn raw_set_data_for(&self, address: &str, key: &str, value: &[u8]) -> Result<()> { - let addr = massa_models::Address::from_str(address)?; - let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - let mut context = context_guard!(self); - let is_allowed = context - .stack - .last() - .map_or(false, |v| v.owned_addresses.contains(&addr)); - if !is_allowed { - bail!("You don't have the write access to this entry") - } - context - .ledger_step - .set_data_entry(addr, key, value.to_vec()); - Ok(()) - } - - fn has_data_for(&self, address: &str, key: &str) -> Result { - let context = context_guard!(self); - let addr = massa_models::Address::from_str(address)?; - let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - Ok(context.ledger_step.has_data_entry(&addr, &key)) - } - - fn raw_get_data(&self, key: &str) -> Result> { - let context = context_guard!(self); - let addr = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; - let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - match context.ledger_step.get_data_entry(&addr, &key) { - Some(bytecode) => Ok(bytecode), - _ => bail!("Data entry not found"), - } - } - - fn raw_set_data(&self, key: &str, value: &[u8]) -> Result<()> { - let mut context = context_guard!(self); - let addr = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; - let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - context - .ledger_step - .set_data_entry(addr, key, value.to_vec()); - Ok(()) - } - - fn has_data(&self, key: &str) -> Result { - let context = context_guard!(self); - let addr = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; - let key = massa_hash::hash::Hash::compute_from(key.as_bytes()); - Ok(context.ledger_step.has_data_entry(&addr, &key)) - } - - /// hash data - fn hash(&self, data: &[u8]) -> Result { - Ok(massa_hash::hash::Hash::compute_from(data).to_bs58_check()) - } - - /// convert a pubkey to an address - fn address_from_public_key(&self, public_key: &str) -> Result { - let public_key = massa_signature::PublicKey::from_bs58_check(public_key)?; - let addr = massa_models::Address::from_public_key(&public_key); - Ok(addr.to_bs58_check()) - } - - /// Verify signature - fn signature_verify(&self, data: &[u8], signature: &str, public_key: &str) -> Result { - let signature = match massa_signature::Signature::from_bs58_check(signature) { - Ok(sig) => sig, - Err(_) => return Ok(false), - }; - let public_key = match massa_signature::PublicKey::from_bs58_check(public_key) { - Ok(pubk) => pubk, - Err(_) => return Ok(false), - }; - let h = massa_hash::hash::Hash::compute_from(data); - Ok(massa_signature::verify_signature(&h, &signature, &public_key).is_ok()) - } - - /// Transfer coins from the current address to a target address - /// to_address: target address - /// raw_amount: amount to transfer (in raw u64) - fn transfer_coins(&self, to_address: &str, raw_amount: u64) -> Result<()> { - let to_address = massa_models::Address::from_str(to_address)?; - let mut context = context_guard!(self); - let from_address = match context.stack.last() { - Some(addr) => addr.address, - _ => bail!("Failed to read call stack current address"), - }; - let amount = massa_models::Amount::from_raw(raw_amount); - // debit - context - .ledger_step - .set_balance_delta(from_address, amount, false)?; - // credit - if let Err(err) = context - .ledger_step - .set_balance_delta(to_address, amount, true) - { - // cancel debit - context - .ledger_step - .set_balance_delta(from_address, amount, true) - .expect("credit failed after same-amount debit succeeded"); - bail!("Error crediting destination balance: {}", err); - } - Ok(()) - } - - /// Transfer coins from the current address to a target address - /// from_address: source address - /// to_address: target address - /// raw_amount: amount to transfer (in raw u64) - fn transfer_coins_for( - &self, - from_address: &str, - to_address: &str, - raw_amount: u64, - ) -> Result<()> { - let from_address = massa_models::Address::from_str(from_address)?; - let to_address = massa_models::Address::from_str(to_address)?; - let mut context = context_guard!(self); - let is_allowed = context - .stack - .last() - .map_or(false, |v| v.owned_addresses.contains(&from_address)); - if !is_allowed { - bail!("You don't have the spending access to this entry") - } - let amount = massa_models::Amount::from_raw(raw_amount); - // debit - context - .ledger_step - .set_balance_delta(from_address, amount, false)?; - // credit - if let Err(err) = context - .ledger_step - .set_balance_delta(to_address, amount, true) - { - // cancel debit - context - .ledger_step - .set_balance_delta(from_address, amount, true) - .expect("credit failed after same-amount debit succeeded"); - bail!("Error crediting destination balance: {}", err); - } - Ok(()) - } - - /// Return the list of owned adresses of a given SC user - fn get_owned_addresses(&self) -> Result> { - match context_guard!(self).stack.last() { - Some(v) => Ok(v - .owned_addresses - .iter() - .map(|addr| addr.to_bs58_check()) - .collect()), - None => bail!("owned address stack out of bounds"), - } - } - - fn get_call_stack(&self) -> Result> { - Ok(context_guard!(self) - .stack - .iter() - .map(|addr| addr.address.to_bs58_check()) - .collect()) - } - - /// Get the amount of coins that have been made available for use by the caller of the currently executing code. - fn get_call_coins(&self) -> Result { - Ok(context_guard!(self) - .stack - .last() - .map(|e| e.coins) - .unwrap_or(Amount::zero()) - .to_raw()) - } - - /// generate an execution event and stores it - fn generate_event(&self, data: String) -> Result<()> { - let mut execution_context = context_guard!(self); - - // prepare id computation - // it is the hash of (slot, index_at_slot, readonly) - let mut to_hash: Vec = execution_context.slot.to_bytes_key().to_vec(); - to_hash.append(&mut execution_context.created_event_index.to_be_bytes().to_vec()); - to_hash.push(!execution_context.read_only as u8); - - let context = EventExecutionContext { - slot: execution_context.slot, - block: execution_context.opt_block_id, - call_stack: execution_context.stack.iter().map(|e| e.address).collect(), - read_only: execution_context.read_only, - index_in_slot: execution_context.created_event_index, - origin_operation_id: execution_context.origin_operation_id, - }; - let id = SCOutputEventId(Hash::compute_from(&to_hash)); - let event = SCOutputEvent { id, context, data }; - execution_context.created_event_index += 1; - execution_context.events.insert(id, event); - Ok(()) - } - - /// Returns the current time (millisecond unix timestamp) - fn get_time(&self) -> Result { - let slot = context_guard!(self).slot; - let ts = - get_block_slot_timestamp(self.thread_count, self.t0, self.genesis_timestamp, slot)?; - Ok(ts.to_millis()) - } - - /// Returns a random number (unsafe: can be predicted and manipulated) - fn unsafe_random(&self) -> Result { - let distr = rand::distributions::Uniform::new_inclusive(i64::MIN, i64::MAX); - Ok(context_guard!(self).unsafe_rng.sample(distr)) - } -} diff --git a/massa-execution/src/lib.rs b/massa-execution/src/lib.rs deleted file mode 100644 index 5794212407d..00000000000 --- a/massa-execution/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -#![feature(map_first_last)] - -mod controller; -mod error; -mod exports; -mod interface_impl; -mod sce_ledger; -mod settings; -mod types; -mod vm; -mod worker; - -pub use controller::{ - start_controller, ExecutionCommandSender, ExecutionEventReceiver, ExecutionManager, -}; -pub use error::ExecutionError; -pub use exports::BootstrapExecutionState; -pub use sce_ledger::{SCELedger, SCELedgerEntry}; -pub use settings::{ExecutionConfigs, ExecutionSettings}; -pub use worker::ExecutionCommand; -pub use worker::ExecutionEvent; -pub use worker::ExecutionManagementCommand; -pub use worker::ExecutionWorker; - -#[cfg(test)] -mod tests; diff --git a/massa-execution/src/sce_ledger.rs b/massa-execution/src/sce_ledger.rs deleted file mode 100644 index 3ce40315183..00000000000 --- a/massa-execution/src/sce_ledger.rs +++ /dev/null @@ -1,589 +0,0 @@ -use crate::ExecutionError; -use massa_hash::{hash::Hash, HASH_SIZE_BYTES}; -use massa_models::{ - array_from_slice, - constants::ADDRESS_SIZE_BYTES, - prehash::{BuildMap, Map}, - DeserializeCompact, DeserializeVarInt, ModelsError, SerializeCompact, SerializeVarInt, Slot, - {Address, Amount}, -}; - -use serde::{Deserialize, Serialize}; - -/// an entry in the SCE ledger -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct SCELedgerEntry { - // SCE balance - pub balance: Amount, - - // optional executable module - pub opt_module: Option>, - - // datastore - pub data: Map>, -} - -impl SCELedgerEntry { - /// applies an entry update to self - pub fn apply_entry_update(&mut self, update: &SCELedgerEntryUpdate) { - // balance - if let Some(new_balance) = update.update_balance { - self.balance = new_balance; - } - - // module - if let Some(opt_module) = &update.update_opt_module { - self.opt_module = opt_module.clone(); - } - - // data - for (data_key, data_update) in update.update_data.iter() { - match data_update { - Some(new_data) => { - self.data.insert(*data_key, new_data.clone()); - } - None => { - self.data.remove(data_key); - } - } - } - } -} - -impl SerializeCompact for SCELedgerEntry { - fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { - let mut res: Vec = Vec::new(); - - // write balance - res.extend(self.balance.to_bytes_compact()?); - - // write opt module data - if let Some(module_data) = &self.opt_module { - // write that it is present - res.push(1); - - // write length - let length: u32 = module_data.len().try_into().map_err(|_| { - ModelsError::SerializeError( - "SCE ledger entry module data too long for serialization".into(), - ) - })?; - // TODO check against max length - res.extend(length.to_varint_bytes()); - - // write bytecode - res.extend(module_data); - } else { - // write that it is absent - res.push(0); - } - - // write data store - - // write length - let length: u32 = self.data.len().try_into().map_err(|_| { - ModelsError::SerializeError( - "SCE ledger entry data store too long for serialization".into(), - ) - })?; - // TODO limit length - res.extend(length.to_varint_bytes()); - - // write entry pairs - for (h, data_entry) in self.data.iter() { - // write hash - res.extend(h.to_bytes()); - - // write length - let length: u32 = data_entry.len().try_into().map_err(|_| { - ModelsError::SerializeError( - "SCE ledger entry data store entry too long for serialization".into(), - ) - })?; - // TODO check against max length - res.extend(length.to_varint_bytes()); - - // write data entry - res.extend(data_entry); - } - - Ok(res) - } -} - -impl DeserializeCompact for SCELedgerEntry { - fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { - let mut cursor = 0usize; - - // read balance - let (balance, delta) = Amount::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - // read opt module data - let has_module = match buffer.get(cursor) { - Some(1) => true, - Some(0) => false, - _ => { - return Err(ModelsError::DeserializeError( - "could not deserialize ledger entry opt module data byte".into(), - )) - } - }; - cursor += 1; - let opt_module: Option> = if has_module { - // read length - let (length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit length with from_varint_bytes_bounded - cursor += delta; - - // read items - if let Some(slice) = buffer.get(cursor..(cursor + (length as usize))) { - cursor += length as usize; - Some(slice.to_vec()) - } else { - return Err(ModelsError::DeserializeError( - "could not deserialize ledger entry module bytes: buffer too small".into(), - )); - } - } else { - None - }; - - // read data store - - // read length - let (length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit length with from_varint_bytes_bounded - cursor += delta; - - // read entry pairs - let mut data: Map> = - Map::with_capacity_and_hasher(length as usize, BuildMap::default()); - for _ in 0..length { - // read hash - let h = Hash::from_bytes(&array_from_slice(&buffer[cursor..])?)?; - cursor += HASH_SIZE_BYTES; - - // read data length - let (d_length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit d_length with from_varint_bytes_bounded - cursor += delta; - - // read data - let entry_data = if let Some(slice) = buffer.get(cursor..(cursor + (d_length as usize))) - { - cursor += d_length as usize; - slice.to_vec() - } else { - return Err(ModelsError::DeserializeError( - "could not deserialize ledger entry data store entry bytes: buffer too small" - .into(), - )); - }; - - // insert - data.insert(h, entry_data); - } - - Ok(( - SCELedgerEntry { - balance, - opt_module, - data, - }, - cursor, - )) - } -} - -// optional updates to be applied to a ledger entry -#[derive(Debug, Clone, Default)] -pub struct SCELedgerEntryUpdate { - pub update_balance: Option, - pub update_opt_module: Option>>, - pub update_data: Map>>, // None for row deletion -} - -impl SCELedgerEntryUpdate { - /// apply another SCELedgerEntryUpdate to self - pub fn apply_entry_update(&mut self, other: &SCELedgerEntryUpdate) { - // balance - if let Some(new_balance) = other.update_balance { - self.update_balance = Some(new_balance); - } - - // module - if let Some(new_opt_module) = &other.update_opt_module { - self.update_opt_module = Some(new_opt_module.clone()); - } - - // data - self.update_data.extend(other.update_data.clone()); - } -} - -#[derive(Debug, Clone)] -pub enum SCELedgerChange { - // delete an entry - Delete, - // sets an entry to an absolute value - Set(SCELedgerEntry), - // updates an entry - Update(SCELedgerEntryUpdate), -} - -impl Default for SCELedgerChange { - fn default() -> Self { - Self::Delete - } -} - -impl SCELedgerChange { - /// applies another SCELedgerChange to the current one - pub fn apply_change(&mut self, other: &SCELedgerChange) { - let new_val = match (&self, other) { - // other deletes the entry - (_, SCELedgerChange::Delete) => { - // make self delete as well - SCELedgerChange::Delete - } - - // other sets an absolute entry - (_, new_set @ SCELedgerChange::Set(_)) => { - // make self set the same absolute entry - new_set.clone() - } - - // self deletes, other updates - (SCELedgerChange::Delete, SCELedgerChange::Update(other_entry_update)) => { - // prepare a default entry - let mut res_entry = SCELedgerEntry::default(); - // apply other's updates to res_entry - res_entry.apply_entry_update(other_entry_update); - // make self set to res_entry - SCELedgerChange::Set(res_entry) - } - - // self sets, other updates - (SCELedgerChange::Set(cur_entry), SCELedgerChange::Update(other_entry_update)) => { - // apply other's updates to cur_entry - // TODO avoid clone, act directly on mutable cur_entry - let mut res_entry = cur_entry.clone(); - res_entry.apply_entry_update(other_entry_update); - SCELedgerChange::Set(res_entry) - } - - // self updates, other updates - ( - SCELedgerChange::Update(cur_entry_update), - SCELedgerChange::Update(other_entry_update), - ) => { - // try to apply other's updates to self's updates - // TODO avoid clone, act directly on mutable cur_entry_update - let mut res_update = cur_entry_update.clone(); - res_update.apply_entry_update(other_entry_update); - SCELedgerChange::Update(res_update) - } - }; - *self = new_val; - } -} - -/// SCE ledger -#[derive(Debug, Clone, Default, Serialize, Deserialize)] -pub struct SCELedger(pub Map); - -impl SerializeCompact for SCELedger { - fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { - let mut res: Vec = Vec::new(); - - // write length - let length: u32 = self.0.len().try_into().map_err(|_| { - ModelsError::SerializeError("SCE ledger too long for serialization".into()) - })?; - // TODO limit length - res.extend(length.to_varint_bytes()); - - // write entry pairs - for (addr, ledger_entry) in self.0.iter() { - // write address - res.extend(addr.to_bytes()); - - // write ledger entry - res.extend(ledger_entry.to_bytes_compact()?); - } - - Ok(res) - } -} - -impl DeserializeCompact for SCELedger { - fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { - let mut cursor = 0usize; - - // read length - let (length, delta) = u32::from_varint_bytes(&buffer[cursor..])?; - // TOOD limit length with from_varint_bytes_bounded - cursor += delta; - - // read entry pairs - let mut res_ledger: Map = - Map::with_capacity_and_hasher(length as usize, BuildMap::default()); - for _ in 0..length { - // read address - let address = Address::from_bytes(&array_from_slice(&buffer[cursor..])?)?; - cursor += ADDRESS_SIZE_BYTES; - - // read ledger entry - let (ledger_entry, delta) = SCELedgerEntry::from_bytes_compact(&buffer[cursor..])?; - cursor += delta; - - // add to output ledger - res_ledger.insert(address, ledger_entry); - } - - Ok((SCELedger(res_ledger), cursor)) - } -} - -/// list of ledger changes (deletions, resets, updates) -#[derive(Debug, Clone, Default)] -pub struct SCELedgerChanges(pub Map); - -impl SCELedgerChanges { - /// extends the current SCELedgerChanges with another - pub fn apply_changes(&mut self, changes: &SCELedgerChanges) { - for (addr, change) in changes.0.iter() { - self.apply_change(*addr, change); - } - } - - /// appliees a single change to self - pub fn apply_change(&mut self, addr: Address, change: &SCELedgerChange) { - self.0 - .entry(addr) - .and_modify(|cur_c| cur_c.apply_change(change)) - .or_insert_with(|| change.clone()); - } - - pub fn clear(&mut self) { - self.0.clear(); - } -} - -impl SCELedger { - /// creates an SCELedger from a hashmap of balances - pub fn from_balances_map(balances_map: Map) -> Self { - SCELedger( - balances_map - .into_iter() - .map(|(k, v)| { - ( - k, - SCELedgerEntry { - balance: v, - ..Default::default() - }, - ) - }) - .collect(), - ) - } - - /// applies ledger changes to ledger - pub fn apply_changes(&mut self, changes: &SCELedgerChanges) { - for (addr, change) in changes.0.iter() { - match change { - // delete entry - SCELedgerChange::Delete => { - self.0.remove(addr); - } - - // set entry to absolute value - SCELedgerChange::Set(new_entry) => { - self.0.insert(*addr, new_entry.clone()); - } - - // update entry - SCELedgerChange::Update(update) => { - // insert default if absent - self.0 - .entry(*addr) - .or_insert_with(SCELedgerEntry::default) - .apply_entry_update(update); - } - } - } - } -} - -/// The final ledger. -#[derive(Debug, Clone)] -pub struct FinalLedger { - /// The slot of the ledger. - pub slot: Slot, - /// The ledger. - pub ledger: SCELedger, -} - -/// represents an execution step from the point of view of the SCE ledger -/// applying cumulative_history_changes then caused_changes to final_ledger yields the current ledger during the ledger step -#[derive(Debug, Clone)] -pub struct SCELedgerStep { - // The final ledger and its slot - pub final_ledger_slot: FinalLedger, - - // accumulator of existing ledger changes - pub cumulative_history_changes: SCELedgerChanges, - - // additional changes caused by the step - pub caused_changes: SCELedgerChanges, -} - -impl SCELedgerStep { - /// gets the balance of an SCE ledger entry - pub fn get_balance(&self, addr: &Address) -> Amount { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return Amount::zero(), - Some(SCELedgerChange::Set(new_entry)) => return new_entry.balance, - Some(SCELedgerChange::Update(update)) => { - if let Some(updated_balance) = update.update_balance { - return updated_balance; - } - } - None => {} - } - } - // check if the final ledger has the info - if let Some(entry) = self.final_ledger_slot.ledger.0.get(addr) { - return entry.balance; - } - // otherwise, just return zero - Amount::zero() - } - - /// sets the balance of an address - pub fn set_balance(&mut self, addr: Address, balance: Amount) { - let update = SCELedgerEntryUpdate { - update_balance: Some(balance), - update_opt_module: Default::default(), - update_data: Default::default(), - }; - self.caused_changes - .apply_change(addr, &SCELedgerChange::Update(update)); - } - - /// tries to increase/decrease the balance of an address - /// does not change anything on failure - pub fn set_balance_delta( - &mut self, - addr: Address, - amount: Amount, - positive: bool, - ) -> Result<(), ExecutionError> { - let mut balance = self.get_balance(&addr); - if positive { - balance = balance - .checked_add(amount) - .ok_or_else(|| ModelsError::CheckedOperationError("balance overflow".into()))?; - } else { - balance = balance - .checked_sub(amount) - .ok_or_else(|| ModelsError::CheckedOperationError("balance underflow".into()))?; - } - self.set_balance(addr, balance); - Ok(()) - } - - /// gets the module of an SCE ledger entry - /// returns None if the entry was not found or has no module - pub fn get_module(&self, addr: &Address) -> Option> { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return None, - Some(SCELedgerChange::Set(new_entry)) => return new_entry.opt_module.clone(), - Some(SCELedgerChange::Update(update)) => { - if let Some(updates_opt_module) = &update.update_opt_module { - return updates_opt_module.clone(); - } - } - None => {} - } - } - // check if the final ledger has the info - match self.final_ledger_slot.ledger.0.get(addr) { - Some(entry) => entry.opt_module.clone(), - _ => None, - } - } - - /// returns a data entry - /// None if address not found or entry nto found in addr's data - pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return None, - Some(SCELedgerChange::Set(new_entry)) => return new_entry.data.get(key).cloned(), - Some(SCELedgerChange::Update(update)) => { - match update.update_data.get(key) { - None => {} // no updates - Some(None) => return None, // data entry deleted, - Some(Some(updated_data)) => return Some(updated_data.clone()), - } - } - None => {} - } - } - - // check if the final ledger has the info - match self.final_ledger_slot.ledger.0.get(addr) { - Some(entry) => entry.data.get(key).cloned(), - _ => None, - } - } - - /// checks if a data entry exists - pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { - // check if caused_changes or cumulative_history_changes have an update on this - for changes in [&self.caused_changes, &self.cumulative_history_changes] { - match changes.0.get(addr) { - Some(SCELedgerChange::Delete) => return false, - Some(SCELedgerChange::Set(_)) => return true, - Some(SCELedgerChange::Update(update)) => { - match update.update_data.get(key) { - None => {} // no updates - Some(None) => return false, // data entry deleted, - Some(Some(_)) => return true, - } - } - None => {} - } - } - - // check if the final ledger has the info - self.final_ledger_slot.ledger.0.contains_key(addr) - } - - /// sets data entry - pub fn set_data_entry(&mut self, addr: Address, key: Hash, value: Vec) { - let update = SCELedgerEntryUpdate { - update_data: [(key, Some(value))].into_iter().collect(), - ..Default::default() - }; - self.caused_changes - .apply_change(addr, &SCELedgerChange::Update(update)); - } - - pub fn set_module(&mut self, addr: Address, opt_module: Option>) { - let update = SCELedgerEntryUpdate { - update_opt_module: Some(opt_module), - ..Default::default() - }; - self.caused_changes - .apply_change(addr, &SCELedgerChange::Update(update)); - } -} diff --git a/massa-execution/src/settings.rs b/massa-execution/src/settings.rs deleted file mode 100644 index 19fd04de306..00000000000 --- a/massa-execution/src/settings.rs +++ /dev/null @@ -1,40 +0,0 @@ -use massa_models::constants::{GENESIS_TIMESTAMP, T0, THREAD_COUNT}; -use massa_time::MassaTime; -use std::path::PathBuf; - -use serde::{Deserialize, Serialize}; - -/// Execution setting parsed with .toml in `massa-node/src/settings.rs` -#[derive(Debug, Deserialize, Serialize, Clone, Default)] -pub struct ExecutionSettings { - /// Initial SCE ledger file - pub initial_sce_ledger_path: PathBuf, - /// maximum number of SC output events kept in cache - pub max_final_events: usize, -} - -#[derive(Debug, Deserialize, Serialize, Clone)] -pub struct ExecutionConfigs { - /// Execution settings - pub settings: ExecutionSettings, - /// Thread count - pub thread_count: u8, - /// Genesis timestmap - pub genesis_timestamp: MassaTime, - /// period duration - pub t0: MassaTime, - /// clock compensation in milliseconds - pub clock_compensation: i64, -} - -impl Default for ExecutionConfigs { - fn default() -> Self { - Self { - settings: Default::default(), - thread_count: THREAD_COUNT, - genesis_timestamp: *GENESIS_TIMESTAMP, - t0: T0, - clock_compensation: Default::default(), - } - } -} diff --git a/massa-execution/src/spec.md b/massa-execution/src/spec.md deleted file mode 100644 index c209a217431..00000000000 --- a/massa-execution/src/spec.md +++ /dev/null @@ -1,71 +0,0 @@ -We described here what should be done whan a bytecode call another -(spoted by get_module in the interface) - -```rust - -/// ABI allowing a contract to call another. -fn _call(shared_env: &SharedExecutionContext, addr: Address, func_name: String, max_gas: u64) { - //TODO add arbitrary input parameters and return value - - //TODO metering / mem limit - - // prepare execution - let old_max_gas; - let old_coins; - let target_module; - let ledger_push; - { - let mut exec_context_guard = shared_env.0.lock().unwrap(); - - // TODO make sure max_gas >= context.remaining_gas - - // get target module - if let Some(module) = (*exec_context_guard).ledger_step._get_module(&addr) { - target_module = module; - } else { - // no module to call - // TODO error - return; - } - - // save old context values - ledger_push = (*exec_context_guard).ledger_step.caused_changes.clone(); - old_max_gas = (*exec_context_guard).max_gas; // save old max gas - old_coins = (*exec_context_guard).coins; - - // update context - (*exec_context_guard).max_gas = max_gas; - (*exec_context_guard).coins = Amount::zero(); // TODO maybe allow sending coins in the call - (*exec_context_guard).call_stack.push_back(addr); - } - - // run - let mut run_failed = false; - match Instance::new(&target_module, &ImportObject::new()) // TODO bring imports into the execution context (?) - .map(|inst| inst.exports.get_function(&func_name).unwrap().clone()) - .map(|f| f.native::<(), ()>().unwrap()) // TODO figure out the "native" explicit parameters - .map(|f| f.call()) - { - Ok(_rets) => { - // TODO check what to do with the return values. - } - Err(_err) => { - // failed to find target func, or invalid parameters, or execution error - run_failed = true; - } - } - - // unstack execution context - { - let mut exec_context_guard = shared_env.0.lock().unwrap(); - (*exec_context_guard).max_gas = old_max_gas; - (*exec_context_guard).coins = old_coins; - (*exec_context_guard).call_stack.pop_back(); - if run_failed { - // if the run failed, cancel its consequences on the ledger - (*exec_context_guard).ledger_step.caused_changes = ledger_push; - } - } -} - -``` \ No newline at end of file diff --git a/massa-execution/src/vm.rs b/massa-execution/src/vm.rs deleted file mode 100644 index 15414d13d3b..00000000000 --- a/massa-execution/src/vm.rs +++ /dev/null @@ -1,510 +0,0 @@ -use crate::error::bootstrap_file_error; -use crate::types::{ - EventStore, ExecutionContext, ExecutionData, ExecutionStep, StackElement, StepHistory, - StepHistoryItem, -}; -use crate::{ - interface_impl::InterfaceImpl, - sce_ledger::{FinalLedger, SCELedger, SCELedgerChanges}, -}; -use crate::{settings::ExecutionConfigs, ExecutionError}; - -use massa_models::{ - api::SCELedgerInfo, - execution::{ExecuteReadOnlyResponse, ReadOnlyResult}, - output_event::SCOutputEvent, - prehash::Map, - timeslots::{get_latest_block_slot_at_timestamp, slot_count_in_range}, - Address, Amount, BlockId, OperationId, Slot, -}; -use massa_sc_runtime::Interface; -use massa_signature::{derive_public_key, generate_random_private_key}; -use massa_time::MassaTime; -use rand::SeedableRng; -use rand_xoshiro::Xoshiro256PlusPlus; -use std::mem; -use std::sync::{Arc, Mutex}; -use tokio::sync::oneshot; -use tracing::debug; - -/// Virtual Machine and step history system -pub(crate) struct VM { - /// thread count - thread_count: u8, - - genesis_timestamp: MassaTime, - t0: MassaTime, - - /// history of SCE-active executed steps - step_history: StepHistory, - - /// execution interface used by the runtime - execution_interface: Box, - - /// execution context - execution_context: Arc>, - - /// final events - final_events: EventStore, -} - -impl VM { - pub fn new( - cfg: ExecutionConfigs, - ledger_bootstrap: Option<(SCELedger, Slot)>, - ) -> Result { - let (ledger_bootstrap, ledger_slot) = - if let Some((ledger_bootstrap, ledger_slot)) = ledger_bootstrap { - // bootstrap from snapshot - (ledger_bootstrap, ledger_slot) - } else { - // not bootstrapping: load initial SCE ledger from file - let ledger_slot = Slot::new(0, cfg.thread_count.saturating_sub(1)); // last genesis block - let ledgger_balances = serde_json::from_str::>( - &std::fs::read_to_string(&cfg.settings.initial_sce_ledger_path) - .map_err(bootstrap_file_error!("loading", cfg))?, - ) - .map_err(bootstrap_file_error!("parsing", cfg))?; - let ledger_bootstrap = SCELedger::from_balances_map(ledgger_balances); - (ledger_bootstrap, ledger_slot) - }; - - // Context shared between VM and the interface provided to the assembly simulator. - let execution_context = Arc::new(Mutex::new(ExecutionContext::new( - ledger_bootstrap, - ledger_slot, - ))); - - // Instantiate the interface used by the assembly simulator. - let execution_interface = Box::new(InterfaceImpl::new( - Arc::clone(&execution_context), - cfg.thread_count, - cfg.t0, - cfg.genesis_timestamp, - )); - - Ok(VM { - thread_count: cfg.thread_count, - step_history: Default::default(), - execution_interface, - execution_context, - final_events: Default::default(), - genesis_timestamp: cfg.genesis_timestamp, - t0: cfg.t0, - }) - } - - // clone bootstrap state (final ledger and slot) - pub fn get_bootstrap_state(&self) -> FinalLedger { - self.execution_context - .lock() - .unwrap() - .ledger_step - .final_ledger_slot - .clone() - } - - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - pub fn get_filtered_sc_output_event( - &self, - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - ) -> Vec { - // iter on step history chained with final events - let start = start.unwrap_or_else(|| Slot::new(0, 0)); - let end = end.unwrap_or(match MassaTime::now() { - Ok(now) => get_latest_block_slot_at_timestamp( - self.thread_count, - self.t0, - self.genesis_timestamp, - now, - ) - .unwrap_or_else(|_| Some(Slot::new(0, 0))) - .unwrap_or_else(|| Slot::new(0, 0)), - Err(_) => Slot::new(0, 0), - }); - self.step_history - .iter() - .filter(|item| item.slot >= start && item.slot < end) - .flat_map(|item| { - item.events.get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - ) - }) - .chain(self.final_events.get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - )) - .collect() - } - - // clone bootstrap state (final ledger and slot) - pub fn get_sce_ledger_entry_for_addresses( - &self, - addresses: Vec
, - ) -> Map { - let ledger = &self - .execution_context - .lock() - .unwrap() - .ledger_step - .final_ledger_slot - .ledger; - addresses - .into_iter() - .map(|ad| { - let entry = ledger.0.get(&ad).cloned().unwrap_or_default(); - ( - ad, - SCELedgerInfo { - balance: entry.balance, - module: entry.opt_module, - datastore: entry.data, - }, - ) - }) - .collect() - } - - /// runs an SCE-final execution step - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// - /// # Parameters - /// * step: execution step to run - /// * max_final_events: max number of events kept in cache (todo should be removed when config become static) - pub(crate) fn run_final_step(&mut self, step: ExecutionStep, max_final_events: usize) { - // check if that step was already executed as the earliest active step - let history_item = if let Some(cached) = self.pop_cached_step(&step) { - // if so, pop it - cached - } else { - // otherwise, clear step history an run it again explicitly - self.step_history.clear(); - self.run_step_internal(&step) - }; - - // apply ledger changes to final ledger - let mut context = self.execution_context.lock().unwrap(); - let mut ledger_step = &mut (*context).ledger_step; - ledger_step - .final_ledger_slot - .ledger - .apply_changes(&history_item.ledger_changes); - ledger_step.final_ledger_slot.slot = step.slot; - - self.final_events.extend(mem::take(&mut context.events)); - self.final_events.prune(max_final_events) - } - - /// check if step already at history front, if so, pop it - fn pop_cached_step(&mut self, step: &ExecutionStep) -> Option { - let found = if let Some(StepHistoryItem { - slot, opt_block_id, .. - }) = self.step_history.front() - { - if *slot == step.slot { - match (&opt_block_id, &step.block) { - // matching miss - (None, None) => true, - - // matching block - (Some(b_id_hist), Some((b_id_step, _b_step))) => (b_id_hist == b_id_step), - - // miss/block mismatch - (None, Some(_)) => false, - - // block/miss mismatch - (Some(_), None) => false, - } - } else { - false // slot mismatch - } - } else { - false // no item - }; - - // rerturn the step if found - if found { - self.step_history.pop_front() - } else { - None - } - } - - /// Tooling function that has to be run before each new step execution, even if we are in read-only - /// - /// Clear all caused changes in the context - /// Set cumulative_hisory_changes = step_history.into_changes - /// Reset the execution call stack and the owned addresses - fn clear_and_update_context(&self) { - let mut context = self.execution_context.lock().unwrap(); - context.ledger_step.caused_changes.clear(); - context.ledger_step.cumulative_history_changes = - SCELedgerChanges::from(self.step_history.clone()); - context.created_addr_index = 0; - context.created_event_index = 0; - context.stack.clear(); - context.events.clear(); - context.read_only = false; - context.origin_operation_id = None; - } - - /// Prepares (updates) the shared context before the new operation. - /// Returns a snapshot of the current caused ledger changes. - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// TODO: do not ignore the results - /// TODO: consider dispatching gas fees with edorsers/endorsees as well - /// Returns (backup of local ledger changes, backup of created_addr_index, - /// backup of events, backup of created_events_index, backup of unsafe rng) - fn prepare_context( - &self, - data: &ExecutionData, - block_creator_addr: Address, - block_id: BlockId, - slot: Slot, - operation: Option, - ) -> (SCELedgerChanges, u64, EventStore, u64, Xoshiro256PlusPlus) { - let mut context = self.execution_context.lock().unwrap(); - // make context.ledger_step credit Op's sender with Op.coins in the SCE ledger - let _result = context - .ledger_step - .set_balance_delta(data.sender_address, data.coins, true); - - // make context.ledger_step credit the producer of the block B with Op.max_gas * Op.gas_price in the SCE ledger - let _result = context.ledger_step.set_balance_delta( - block_creator_addr, - data.gas_price.saturating_mul_u64(data.max_gas), - true, - ); - - // fill context for execution - // created_addr_index is not reset here (it is used at the slot scale) - context.gas_price = data.gas_price; - context.max_gas = data.max_gas; - context.stack = vec![StackElement { - address: data.sender_address, - coins: data.coins, - owned_addresses: vec![data.sender_address], - }]; - context.slot = slot; - context.opt_block_id = Some(block_id); - context.opt_block_creator_addr = Some(block_creator_addr); - context.origin_operation_id = operation; - - ( - context.ledger_step.caused_changes.clone(), - context.created_addr_index, - context.events.clone(), - context.created_event_index, - context.unsafe_rng.clone(), - ) - } - - /// Run code in read-only mode - pub(crate) fn run_read_only( - &self, - slot: Slot, - max_gas: u64, - simulated_gas_price: Amount, - bytecode: Vec, - address: Option
, - result_sender: oneshot::Sender, - ) { - // Reset active ledger changes history - self.clear_and_update_context(); - - { - let mut context = self.execution_context.lock().unwrap(); - - // Set the call stack, using the provided address, or a random one. - let address = address.unwrap_or_else(|| { - let private_key = generate_random_private_key(); - let public_key = derive_public_key(&private_key); - Address::from_public_key(&public_key) - }); - - context.stack = vec![StackElement { - address, - coins: Amount::zero(), - owned_addresses: vec![address], - }]; - - // Set read-only - context.read_only = true; - - // Set the max gas. - context.max_gas = max_gas; - - // Set the simulated gas price. - context.gas_price = simulated_gas_price; - - // Seed the RNG - let mut seed: Vec = slot.to_bytes_key().to_vec(); - seed.push(0u8); // read-only - let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); - context.unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); - } - - // run in the intepreter - let run_result = massa_sc_runtime::run(&bytecode, max_gas, &*self.execution_interface); - - let mut context = self.execution_context.lock().unwrap(); - // Send result back. - let execution_response = ExecuteReadOnlyResponse { - executed_at: slot, - // TODO: specify result. - result: run_result.map_or_else( - |_| ReadOnlyResult::Error("Failed to run in read-only mode".to_string()), - |_| ReadOnlyResult::Ok, - ), - // integrate with output events. - output_events: mem::take(&mut context.events).export(), - }; - if result_sender.send(execution_response).is_err() { - debug!("Execution: could not send ExecuteReadOnlyResponse."); - } - - // Note: changes are not applied to the ledger. - } - - /// Runs an active step - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// - /// 1. Get step history (cache of final ledger changes by slot and block_id history) - /// 2. clear caused changes - /// 3. accumulated step history - /// 4. Execute each block of each operation - /// - /// # Parameters - /// * step: execution step to run - fn run_step_internal(&mut self, step: &ExecutionStep) -> StepHistoryItem { - // reset active ledger changes history - self.clear_and_update_context(); - - { - let mut context = self.execution_context.lock().unwrap(); - - // seed the RNG - let mut seed: Vec = step.slot.to_bytes_key().to_vec(); - seed.push(1u8); // not read-only - if let Some((block_id, _block)) = &step.block { - seed.extend(block_id.to_bytes()); // append block ID - } - let seed = massa_hash::hash::Hash::compute_from(&seed).to_bytes(); - context.unsafe_rng = Xoshiro256PlusPlus::from_seed(seed); - } - - // run implicit and async calls - // TODO - - // run explicit calls within the block (if the slot is not a miss) - // note that total block gas is not checked, because currently Protocol makes the block invalid if it overflows gas - let opt_block_id: Option; - if let Some((block_id, block)) = &step.block { - opt_block_id = Some(*block_id); - - // get block creator addr - let block_creator_addr = Address::from_public_key(&block.header.content.creator); - // run all operations - for (op_idx, operation) in block.operations.iter().enumerate() { - // process ExecuteSC operations only - let execution_data = match ExecutionData::try_from(operation) { - Ok(data) => data, - _ => continue, - }; - - // Prepare context and save the initial ledger changes before execution. - // The returned snapshot takes into account the initial coin credits. - // This snapshot will be popped back if bytecode execution fails. - let ( - ledger_changes_backup, - created_addr_index_backup, - events_backup, - event_index_backup, - rng_backup, - ) = self.prepare_context( - &execution_data, - block_creator_addr, - *block_id, - step.slot, - Some(match operation.get_operation_id() { - Ok(id) => id, - Err(_) => continue, - }), - ); - - // run in the intepreter - let run_result = massa_sc_runtime::run( - &execution_data.bytecode, - execution_data.max_gas, - &*self.execution_interface, - ); - if let Err(err) = run_result { - debug!( - "failed running bytecode in operation index {} in block {}: {}", - op_idx, block_id, err - ); - // cancel the effects of execution only, pop back init_changes - let mut context = self.execution_context.lock().unwrap(); - context.ledger_step.caused_changes = ledger_changes_backup; - context.created_addr_index = created_addr_index_backup; - context.events = events_backup; - context.created_event_index = event_index_backup; - context.unsafe_rng = rng_backup; - context.origin_operation_id = None; - } - } - } else { - // There is no block for this step, miss - opt_block_id = None; - } - - // generate history item - let mut context = self.execution_context.lock().unwrap(); - StepHistoryItem { - slot: step.slot, - opt_block_id, - ledger_changes: mem::take(&mut context.ledger_step.caused_changes), - events: mem::take(&mut context.events), - } - } - - /// runs an SCE-active execution step - /// See https://github.com/massalabs/massa/wiki/vm_ledger_interaction - /// - /// # Parameters - /// * step: execution step to run - pub(crate) fn run_active_step(&mut self, step: ExecutionStep) { - // rewind history to optimize execution - if let Some(front_slot) = self.step_history.front().map(|h| h.slot) { - if let Ok(len) = slot_count_in_range(front_slot, step.slot, self.thread_count) { - self.step_history.truncate(len as usize); - } - } - - // run step - let history_item = self.run_step_internal(&step); - - // push step into history - self.step_history.push_back(history_item); - } - - pub fn reset_to_final(&mut self) { - self.step_history.clear(); - } -} diff --git a/massa-execution/src/worker.rs b/massa-execution/src/worker.rs deleted file mode 100644 index 92b00c5b22e..00000000000 --- a/massa-execution/src/worker.rs +++ /dev/null @@ -1,557 +0,0 @@ -use crate::error::ExecutionError; -use crate::sce_ledger::FinalLedger; -use crate::types::{ExecutionQueue, ExecutionRequest}; -use crate::vm::VM; -use crate::BootstrapExecutionState; -use crate::{settings::ExecutionConfigs, types::ExecutionStep}; -use massa_models::api::SCELedgerInfo; -use massa_models::execution::ExecuteReadOnlyResponse; -use massa_models::output_event::SCOutputEvent; -use massa_models::prehash::Map; -use massa_models::timeslots::{get_block_slot_timestamp, get_current_latest_block_slot}; -use massa_models::{Address, Amount, Block, BlockId, OperationId, Slot}; -use std::collections::BTreeMap; -use std::thread::{self, JoinHandle}; -use tokio::sync::{mpsc, oneshot}; -use tokio::time::sleep_until; -use tracing::debug; - -/// Commands sent to the `execution` component. -#[derive(Debug)] -pub enum ExecutionCommand { - /// The clique has changed, - /// contains the blocks of the new blockclique - /// and a list of blocks that became final - BlockCliqueChanged { - blockclique: Map, - finalized_blocks: Map, - }, - - /// Get a snapshot of the current state for bootstrap - GetBootstrapState(tokio::sync::oneshot::Sender), - - /// Get events optionnally filtered by: - /// * start slot - /// * end slot - /// * emitter address - /// * original caller address - /// * operation id - GetSCOutputEvents { - start: Option, - end: Option, - emitter_address: Option
, - original_caller_address: Option
, - original_operation_id: Option, - response_tx: oneshot::Sender>, - }, - - /// Execute bytecode in read-only mode - ExecuteReadOnlyRequest { - /// Maximum gas spend in execution. - max_gas: u64, - /// The simulated price of gas for the read-only execution. - simulated_gas_price: Amount, - /// The code to execute. - bytecode: Vec, - /// The channel used to send the result of execution. - result_sender: oneshot::Sender, - /// The address, or a default random one if none is provided, - /// which will simulate the sender of the operation. - address: Option
, - }, - GetSCELedgerForAddresses { - response_tx: oneshot::Sender>, - addresses: Vec
, - }, -} - -// Events produced by the execution component. -pub enum ExecutionEvent { - /// A coin transfer - /// from the SCE ledger to the CSS ledger. - TransferToConsensus, -} - -/// Management commands sent to the `execution` component. -pub enum ExecutionManagementCommand {} - -pub struct ExecutionWorker { - /// Configuration - cfg: ExecutionConfigs, - /// Receiver of commands. - controller_command_rx: mpsc::Receiver, - /// Receiver of management commands. - controller_manager_rx: mpsc::Receiver, - /// Sender of events. - _event_sender: mpsc::UnboundedSender, - /// Time cursors - last_final_slot: Slot, - last_active_slot: Slot, - /// pending CSS final blocks - pending_css_final_blocks: BTreeMap, - /// VM thread - vm_thread: JoinHandle<()>, - /// VM execution requests queue - execution_queue: ExecutionQueue, -} - -impl ExecutionWorker { - pub fn new( - cfg: ExecutionConfigs, - event_sender: mpsc::UnboundedSender, - controller_command_rx: mpsc::Receiver, - controller_manager_rx: mpsc::Receiver, - bootstrap_state: Option, - ) -> Result { - let execution_queue = ExecutionQueue::default(); - let execution_queue_clone = execution_queue.clone(); - - // Check bootstrap - let bootstrap_final_slot; - let bootstrap_ledger; - if let Some(bootstrap_state) = bootstrap_state { - // init from bootstrap - bootstrap_final_slot = bootstrap_state.final_slot; - bootstrap_ledger = Some((bootstrap_state.final_ledger, bootstrap_final_slot)); - } else { - // init without bootstrap - bootstrap_final_slot = Slot::new(0, cfg.thread_count.saturating_sub(1)); - bootstrap_ledger = None; - }; - - // Init VM - let mut vm = VM::new(cfg.clone(), bootstrap_ledger)?; - - // Start VM thread - let vm_thread = thread::spawn(move || { - let (lock, condvar) = &*execution_queue_clone; - let mut requests = lock.lock().unwrap(); - // Run until shutdown. - loop { - match requests.pop_front() { - Some(ExecutionRequest::RunFinalStep(step)) => { - vm.run_final_step(step, cfg.settings.max_final_events); // todo make settings static - } - Some(ExecutionRequest::RunActiveStep(step)) => { - vm.run_active_step(step); - } - Some(ExecutionRequest::RunReadOnly { - slot, - max_gas, - simulated_gas_price, - bytecode, - result_sender, - address, - }) => { - vm.run_read_only( - slot, - max_gas, - simulated_gas_price, - bytecode, - address, - result_sender, - ); - } - Some(ExecutionRequest::ResetToFinalState) => vm.reset_to_final(), - Some(ExecutionRequest::GetBootstrapState { response_tx }) => { - let FinalLedger { ledger, slot } = vm.get_bootstrap_state(); - let bootstrap_state = BootstrapExecutionState { - final_ledger: ledger, - final_slot: slot, - }; - if response_tx.send(bootstrap_state).is_err() { - debug!("execution: could not send get_bootstrap_state answer"); - } - } - Some(ExecutionRequest::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - }) => { - if response_tx - .send(vm.get_filtered_sc_output_event( - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - )) - .is_err() - { - debug!("execution: could not send get_sc_output_event_by_caller_address answer"); - } - } - - Some(ExecutionRequest::Shutdown) => return, - Some(ExecutionRequest::GetSCELedgerForAddresses { - addresses, - response_tx, - }) => { - let res = vm.get_sce_ledger_entry_for_addresses(addresses); - if response_tx.send(res).is_err() { - debug!("execution: could not send GetSCELedgerForAddresses response") - } - } - None => { - requests = condvar.wait(requests).unwrap(); - } - }; - } - }); - - // return execution worker - Ok(ExecutionWorker { - cfg, - controller_command_rx, - controller_manager_rx, - _event_sender: event_sender, - last_final_slot: bootstrap_final_slot, - last_active_slot: bootstrap_final_slot, - pending_css_final_blocks: Default::default(), - vm_thread, - execution_queue, - }) - } - - /// asks the VM to reset to its final state - pub fn reset_to_final(&mut self) { - let (queue_lock, condvar) = &*self.execution_queue; - let queue_guard = &mut queue_lock.lock().unwrap(); - // cancel all non-final requests - // Final execution requests are left to maintain final state consistency - queue_guard.retain(|req| { - matches!( - req, - ExecutionRequest::RunFinalStep(..) - | ExecutionRequest::Shutdown - | ExecutionRequest::GetBootstrapState { .. } - ) - }); - // request reset to final state - queue_guard.push_back(ExecutionRequest::ResetToFinalState); - // notify - condvar.notify_one(); - } - - /// sends an arbitrary VM request - fn push_request(&self, request: ExecutionRequest) { - let (queue_lock, condvar) = &*self.execution_queue; - let queue_guard = &mut queue_lock.lock().unwrap(); - queue_guard.push_back(request); - condvar.notify_one(); - } - - fn get_timer_to_next_slot(&self) -> Result { - Ok(sleep_until( - get_block_slot_timestamp( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - get_current_latest_block_slot( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.cfg.clock_compensation, - )? - .map_or(Ok(Slot::new(0, 0)), |v| { - v.get_next_slot(self.cfg.thread_count) - })?, - )? - .estimate_instant(self.cfg.clock_compensation)?, - )) - } - - pub async fn run_loop(mut self) -> Result<(), ExecutionError> { - // set slot timer - let next_slot_timer = self.get_timer_to_next_slot()?; - tokio::pin!(next_slot_timer); - loop { - tokio::select! { - // Process management commands - _ = self.controller_manager_rx.recv() => break, - // Process commands - Some(cmd) = self.controller_command_rx.recv() => self.process_command(cmd)?, - // Process slot timer event - _ = &mut next_slot_timer => { - self.fill_misses_until_now()?; - next_slot_timer.set(self.get_timer_to_next_slot()?); - } - } - } - // Shutdown VM, cancel all pending execution requests - self.push_request(ExecutionRequest::Shutdown); - if self.vm_thread.join().is_err() { - debug!("Failed joining vm thread") - } - Ok(()) - } - - /// Proces a given command. - /// - /// # Argument - /// * cmd: command to process - fn process_command(&mut self, cmd: ExecutionCommand) -> Result<(), ExecutionError> { - match cmd { - ExecutionCommand::BlockCliqueChanged { - blockclique, - finalized_blocks, - } => { - self.blockclique_changed(blockclique, finalized_blocks)?; - } - - ExecutionCommand::GetBootstrapState(response_tx) => { - self.push_request(ExecutionRequest::GetBootstrapState { response_tx }); - } - - ExecutionCommand::ExecuteReadOnlyRequest { - max_gas, - simulated_gas_price, - bytecode, - result_sender, - address, - } => { - // call the VM to execute in read-only mode at the last active slot. - self.push_request(ExecutionRequest::RunReadOnly { - slot: self.last_active_slot, - max_gas, - simulated_gas_price, - bytecode, - result_sender, - address, - }); - } - ExecutionCommand::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - } => self.push_request(ExecutionRequest::GetSCOutputEvents { - start, - end, - emitter_address, - original_caller_address, - original_operation_id, - response_tx, - }), - ExecutionCommand::GetSCELedgerForAddresses { - response_tx, - addresses, - } => self.push_request(ExecutionRequest::GetSCELedgerForAddresses { - response_tx, - addresses, - }), - } - Ok(()) - } - - /// fills the remaining slots until now() with miss executions - /// see step 4 in spec https://github.com/massalabs/massa/wiki/vm-block-feed - fn fill_misses_until_now(&mut self) -> Result<(), ExecutionError> { - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - let end_step = get_current_latest_block_slot( - self.cfg.thread_count, - self.cfg.t0, - self.cfg.genesis_timestamp, - self.cfg.clock_compensation, - )?; - if let Some(end_step) = end_step { - // slot S - let mut s = self.last_active_slot.get_next_slot(self.cfg.thread_count)?; - - while s <= end_step { - // call the VM to execute an SCE-active miss at slot S - self.push_request(ExecutionRequest::RunActiveStep(ExecutionStep { - slot: self.last_active_slot, - block: None, - })); - - // set last_active_slot = S - self.last_active_slot = s; - - s = s.get_next_slot(self.cfg.thread_count)?; - } - } - */ - Ok(()) - } - - /// checks whether a miss at slot S would be SCE-final by looking up subsequent CSS-final blocks in the same thread - /// see spec at https://github.com/massalabs/massa/wiki/vm-block-feed - /// - /// # Arguments - /// * s: missed slot - /// * max_css_final_slot: maximum lookup slot (included) - fn is_miss_sce_final(&self, s: Slot, max_css_final_slot: Slot) -> bool { - let mut check_slot = Slot::new(s.period + 1, s.thread); - while check_slot <= max_css_final_slot { - if self.pending_css_final_blocks.contains_key(&check_slot) { - break; - } - check_slot.period += 1; - } - check_slot <= max_css_final_slot - } - - /// called when the blockclique changes - /// see spec at https://github.com/massalabs/massa/wiki/vm-block-feed - fn blockclique_changed( - &mut self, - blockclique: Map, - finalized_blocks: Map, - ) -> Result<(), ExecutionError> { - // 1 - reset the SCE state back to its latest final state - - // revert the VM to its latest SCE-final state by clearing its active slot history. - // TODO make something more iterative/conservative in the future to reuse unaffected executions - self.reset_to_final(); - - self.last_active_slot = self.last_final_slot; - - // 2 - process CSS-final blocks - - // extend `pending_css_final_blocks` with `new_css_final_blocks` - let new_css_final_blocks = finalized_blocks.into_iter().filter_map(|(b_id, b)| { - if b.header.content.slot <= self.last_active_slot { - // eliminate blocks that are not from a stricly later slot than the current latest SCE-final one - // (this is an optimization) - return None; - } - Some((b.header.content.slot, (b_id, b))) - }); - self.pending_css_final_blocks.extend(new_css_final_blocks); - - if let Some(max_css_final_slot) = self - .pending_css_final_blocks - .last_key_value() - .map(|(s, _v)| *s) - { - // iterate over every slot S starting from `last_final_slot.get_next_slot()` up to the latest slot in `pending_css_final_blocks` (included) - let mut s = self.last_final_slot.get_next_slot(self.cfg.thread_count)?; - while s <= max_css_final_slot { - match self - .pending_css_final_blocks - .first_key_value() - .map(|(s, _v)| *s) - { - // there is a block B at slot S in `pending_css_final_blocks`: - Some(b_slot) if b_slot == s => { - // remove B from `pending_css_final_blocks` - // cannot panic, checked above - let (_s, (b_id, b)) = self - .pending_css_final_blocks - .pop_first() - .expect("pending_css_final_blocks was unexpectedly empty"); - // call the VM to execute the SCE-final block B at slot S - self.push_request(ExecutionRequest::RunFinalStep(ExecutionStep { - slot: s, - block: Some((b_id, b)), - })); - - self.last_active_slot = s; - self.last_final_slot = s; - } - - // there is no CSS-final block at s, but there are CSS-final blocks later - Some(_b_slot) => { - // check whether there is a CSS-final block later in the same thread - if self.is_miss_sce_final(s, max_css_final_slot) { - // subsequent CSS-final block found in the same thread as s - // call the VM to execute an SCE-final miss at slot S - self.push_request(ExecutionRequest::RunFinalStep(ExecutionStep { - slot: s, - block: None, - })); - - self.last_active_slot = s; - self.last_final_slot = s; - } else { - // no subsequent CSS-final block found in the same thread as s - break; - } - } - - // there are no more CSS-final blocks - None => break, - } - - s = s.get_next_slot(self.cfg.thread_count)?; - } - } - - // 3 - process CSS-active blocks - - // define `sce_active_blocks = blockclique_blocks UNION pending_css_final_blocks` - let new_blockclique_blocks = blockclique.iter().filter_map(|(b_id, b)| { - if b.header.content.slot <= self.last_final_slot { - // eliminate blocks that are not from a stricly later slot than the current latest SCE-final one - // (this is an optimization) - return None; - } - Some((b.header.content.slot, (b_id, b))) - }); - let mut sce_active_blocks: BTreeMap = new_blockclique_blocks - .chain( - self.pending_css_final_blocks - .iter() - .map(|(k, (b_id, b))| (*k, (b_id, b))), - ) - .collect(); - - if let Some(max_css_active_slot) = sce_active_blocks.last_key_value().map(|(s, _v)| *s) { - // iterate over every slot S starting from `last_active_slot.get_next_slot()` up to the latest slot in `sce_active_blocks` (included) - let mut s = self.last_final_slot.get_next_slot(self.cfg.thread_count)?; - while s <= max_css_active_slot { - let first_sce_active_slot = sce_active_blocks.first_key_value().map(|(s, _v)| *s); - match first_sce_active_slot { - // there is a block B at slot S in `sce_active_blocks`: - Some(b_slot) if b_slot == s => { - // remove the entry from sce_active_blocks (cannot panic, checked above) - let (_b_slot, (_b_id, _block)) = sce_active_blocks - .pop_first() - .expect("sce_active_blocks should not be empty"); - // call the VM to execute the SCE-active block B at slot S - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - self.push_request(ExecutionRequest::RunActiveStep(ExecutionStep { - slot: s, - block: Some((*b_id, block.clone())), - })); - self.last_active_slot = s; - */ - } - - // otherwise, if there is no CSS-active block at S - Some(b_slot) => { - // make sure b_slot is after s - if b_slot <= s { - panic!("remaining CSS-active blocks should be later than S"); - } - - // call the VM to execute an SCE-active miss at slot S - /* TODO DISABLED TEMPORARILY https://github.com/massalabs/massa/issues/2101 - self.push_request(ExecutionRequest::RunActiveStep(ExecutionStep { - slot: s, - block: None, - })); - self.last_active_slot = s; - */ - } - - // there are no more CSS-active blocks - None => break, - } - - s = s.get_next_slot(self.cfg.thread_count)?; - } - } - - // 4 - fill the remaining slots with misses - self.fill_misses_until_now()?; - - Ok(()) - } -} diff --git a/massa-graph/Cargo.toml b/massa-graph/Cargo.toml index 8508660f309..8e5aaa27a38 100644 --- a/massa-graph/Cargo.toml +++ b/massa-graph/Cargo.toml @@ -20,7 +20,7 @@ thiserror = "1.0" tokio = { version = "1.15", features = ["full"] } tracing = "0.1" # custom modules -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } massa_hash = { path = "../massa-hash" } massa_logging = { path = "../massa-logging" } massa_models = { path = "../massa-models" } @@ -34,4 +34,4 @@ serial_test = "0.5" tempfile = "3.2" [features] -instrument = ["tokio/tracing", "massa_execution/instrument", "massa_models/instrument", "massa_proof_of_stake_exports/instrument", "massa_time/instrument"] +instrument = ["tokio/tracing", "massa_models/instrument", "massa_proof_of_stake_exports/instrument", "massa_time/instrument"] diff --git a/massa-graph/src/error.rs b/massa-graph/src/error.rs index 06c1d7a34c9..d6d35efbfdb 100644 --- a/massa-graph/src/error.rs +++ b/massa-graph/src/error.rs @@ -2,7 +2,7 @@ use std::array::TryFromSliceError; // Copyright (c) 2022 MASSA LABS use displaydoc::Display; -use massa_execution::ExecutionError; +use massa_execution_exports::ExecutionError; use massa_models::ModelsError; use massa_proof_of_stake_exports::error::ProofOfStakeError; use thiserror::Error; diff --git a/massa-ledger/Cargo.toml b/massa-ledger/Cargo.toml new file mode 100644 index 00000000000..4b79f6ac844 --- /dev/null +++ b/massa-ledger/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "massa_ledger" +version = "0.1.0" +authors = ["Massa Labs "] +edition = "2021" + +[dependencies] +displaydoc = "0.2" +futures = "0.3" +lazy_static = "1.4.0" +num = "0.4" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +thiserror = "1.0" +tokio = { version = "1.11", features = ["full"] } +tracing = {version = "0.1", features = ["max_level_debug", "release_max_level_debug"] } +# custom modules +massa_hash = { path = "../massa-hash" } +massa_logging = { path = "../massa-logging" } +massa_models = { path = "../massa-models" } +massa_signature = { path = "../massa-signature" } +massa_time = { path = "../massa-time" } + +[dev-dependencies] +pretty_assertions = "1.0" +serial_test = "0.5" + +[features] +testing = [] diff --git a/massa-ledger/src/bootstrap.rs b/massa-ledger/src/bootstrap.rs new file mode 100644 index 00000000000..f56980c4b0c --- /dev/null +++ b/massa-ledger/src/bootstrap.rs @@ -0,0 +1,86 @@ +// Copyright (c) 2022 MASSA LABS + +//! Provides serializable strucutres for bootstrapping the FinalLedger + +use crate::LedgerEntry; +use massa_models::{ + array_from_slice, constants::ADDRESS_SIZE_BYTES, Address, DeserializeCompact, + DeserializeVarInt, ModelsError, SerializeCompact, SerializeVarInt, Slot, +}; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; + +/// Represents a snapshot of the ledger state, +/// which is enough to fully bootstrap a FinalLedger +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct FinalLedgerBootstrapState { + /// ledger slot + pub(crate) slot: Slot, + /// sorted ledger + pub(crate) sorted_ledger: BTreeMap, +} + +/// Allows serializing the FinalLedgerBootstrapState to a compact binary representation +impl SerializeCompact for FinalLedgerBootstrapState { + fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { + let mut res: Vec = Vec::new(); + + // final slot + res.extend(self.slot.to_bytes_compact()?); + + // final ledger size + let ledger_size: u64 = self.sorted_ledger.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not represent ledger size as u64".into()) + })?; + res.extend(ledger_size.to_varint_bytes()); + + // ledger elements + for (addr, entry) in &self.sorted_ledger { + // address + res.extend(addr.to_bytes()); + + // entry + res.extend(entry.to_bytes_compact()?); + } + + Ok(res) + } +} + +/// Allows deserializing a FinalLedgerBootstrapState from its compact binary representation +impl DeserializeCompact for FinalLedgerBootstrapState { + fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { + let mut cursor = 0usize; + + // final slot + let (slot, delta) = Slot::from_bytes_compact(&buffer[cursor..])?; + cursor += delta; + + // ledger size + let (ledger_size, delta) = u64::from_varint_bytes(&buffer[cursor..])?; + // TODO cap the ledger size https://github.com/massalabs/massa/issues/1200 + cursor += delta; + + // final ledger + let mut sorted_ledger: BTreeMap = BTreeMap::new(); + for _ in 0..ledger_size { + // address + let addr = Address::from_bytes(&array_from_slice(&buffer[cursor..])?)?; + cursor += ADDRESS_SIZE_BYTES; + + // entry + let (entry, delta) = LedgerEntry::from_bytes_compact(&buffer[cursor..])?; + cursor += delta; + + sorted_ledger.insert(addr, entry); + } + + Ok(( + FinalLedgerBootstrapState { + slot, + sorted_ledger, + }, + cursor, + )) + } +} diff --git a/massa-ledger/src/config.rs b/massa-ledger/src/config.rs new file mode 100644 index 00000000000..9f654e1d296 --- /dev/null +++ b/massa-ledger/src/config.rs @@ -0,0 +1,16 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines a config strucutre containing all settings for the ledger system + +use std::path::PathBuf; + +/// Ledger configuration +#[derive(Debug, Clone)] +pub struct LedgerConfig { + /// initial SCE ledger file + pub initial_sce_ledger_path: PathBuf, + /// final changes history length + pub final_history_length: usize, + /// thread count + pub thread_count: u8, +} diff --git a/massa-ledger/src/error.rs b/massa-ledger/src/error.rs new file mode 100644 index 00000000000..5aaee3ec16e --- /dev/null +++ b/massa-ledger/src/error.rs @@ -0,0 +1,17 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines all error types for the ledger system + +use displaydoc::Display; +use thiserror::Error; + +#[non_exhaustive] +#[derive(Display, Error, Debug)] +pub enum LedgerError { + /// container iconsistency: {0} + ContainerInconsistency(String), + /// missing entry: {0} + MissingEntry(String), + /// file error: {0} + FileError(String), +} diff --git a/massa-ledger/src/ledger.rs b/massa-ledger/src/ledger.rs new file mode 100644 index 00000000000..450a0a227d4 --- /dev/null +++ b/massa-ledger/src/ledger.rs @@ -0,0 +1,221 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines the final ledger associating addresses to their balances, bytecode and data. + +use crate::ledger_changes::LedgerChanges; +use crate::ledger_entry::LedgerEntry; +use crate::types::{Applicable, SetUpdateOrDelete}; +use crate::{FinalLedgerBootstrapState, LedgerConfig, LedgerError}; +use massa_hash::hash::Hash; +use massa_models::{Address, Amount, Slot}; +use std::collections::{BTreeMap, VecDeque}; + +/// Represents a final ledger associating addresses to their balances, bytecode and data. +/// The final ledger is also attached to a final slot, can be boostrapped and allows others to bootstrap. +/// The ledger size can be very high: it can exceed 1TB. +/// To allow for storage on disk, the ledger uses trees and has `O(log(N))` access, insertion and deletion complexity. +/// +/// Note: currently the ledger is stored in RAM. TODO put it on the hard drive with cache. +pub struct FinalLedger { + /// ledger config + config: LedgerConfig, + /// slot at the output of which the final ledger is attached + pub slot: Slot, + /// ledger tree, sorted by address + sorted_ledger: BTreeMap, + /// history of recent final ledger changes, useful for streaming bootstrap + /// front = oldest, back = newest + changes_history: VecDeque<(Slot, LedgerChanges)>, +} + +/// Allows applying LedgerChanges to the final ledger +/// +/// Warning: this does not push the changes in changes_history. +/// Always use FinalLedger::settle_slot to apply bootstrapable changes. +impl Applicable for FinalLedger { + fn apply(&mut self, changes: LedgerChanges) { + // for all incoming changes + for (addr, change) in changes.0 { + match change { + // the incoming change sets a ledger entry to a new one + SetUpdateOrDelete::Set(new_entry) => { + // inserts/overwrites the entry with the incoming one + self.sorted_ledger.insert(addr, new_entry); + } + + // the incoming change updates an existing ledger entry + SetUpdateOrDelete::Update(entry_update) => { + // applies the updates to the entry + // if the entry does not exist, inserts a default one and applies the updates to it + self.sorted_ledger + .entry(addr) + .or_insert_with(Default::default) + .apply(entry_update); + } + + // the incoming change deletes a ledger entry + SetUpdateOrDelete::Delete => { + // delete the entry, if it exists + self.sorted_ledger.remove(&addr); + } + } + } + } +} + +/// Macro used to shorten file error returns +macro_rules! init_file_error { + ($st:expr, $cfg:ident) => { + |err| { + LedgerError::FileError(format!( + "error $st initial ledger file {}: {}", + $cfg.initial_sce_ledger_path + .to_str() + .unwrap_or("(non-utf8 path)"), + err + )) + } + }; +} +pub(crate) use init_file_error; + +impl FinalLedger { + /// Initializes a new FinalLedger by reading its initial state from file. + pub fn new(config: LedgerConfig) -> Result { + // load the ledger tree from file + let sorted_ledger = serde_json::from_str::>( + &std::fs::read_to_string(&config.initial_sce_ledger_path) + .map_err(init_file_error!("loading", config))?, + ) + .map_err(init_file_error!("parsing", config))? + .into_iter() + .map(|(address, balance)| { + ( + address, + LedgerEntry { + parallel_balance: balance, + ..Default::default() + }, + ) + }) + .collect(); + + // the initial ledger is attached to the output of the last genesis block + let slot = Slot::new(0, config.thread_count.saturating_sub(1)); + + // generate the final ledger + Ok(FinalLedger { + slot, + sorted_ledger, + changes_history: Default::default(), + config, + }) + } + + /// Intiialize a FinalLedger from a bootstrap state + /// + /// TODO: This loads the whole ledger in RAM. Switch to streaming in the future + /// + /// # Arguments + /// * config: ledger config + /// * state: bootstrap state + pub fn from_bootstrap_state(config: LedgerConfig, state: FinalLedgerBootstrapState) -> Self { + FinalLedger { + slot: state.slot, + sorted_ledger: state.sorted_ledger, + changes_history: Default::default(), + config, + } + } + + /// Gets a snapshot of the ledger to bootstrap other nodes + /// + /// TODO: This loads the whole ledger in RAM. Switch to streaming in the future + pub fn get_bootstrap_state(&self) -> FinalLedgerBootstrapState { + FinalLedgerBootstrapState { + slot: self.slot, + sorted_ledger: self.sorted_ledger.clone(), + } + } + + /// Gets a copy of a full ledger entry. + /// + /// # Returns + /// A clone of the whole LedgerEntry, or None if not found. + /// + /// TODO: in the future, never manipulate full ledger entries because their datastore can be huge + /// https://github.com/massalabs/massa/issues/2342 + pub fn get_full_entry(&self, addr: &Address) -> Option { + self.sorted_ledger.get(addr).cloned() + } + + /// Applies changes to the ledger, pushes them to the bootstrap history, + /// and sets the ledger's attachment final slot. + /// After this is called, the final ledger is attached to the output of `slot` + /// and ready to bootstrap nodes with this new state. + pub fn settle_slot(&mut self, slot: Slot, changes: LedgerChanges) { + // apply changes + self.apply(changes.clone()); + + // update the attachment final slot + self.slot = slot; + + // update and prune changes history + self.changes_history.push_back((slot, changes)); + while self.changes_history.len() > self.config.final_history_length { + self.changes_history.pop_front(); + } + } + + /// Gets the parallel balance of a ledger entry + /// + /// # Returns + /// The parallel balance, or None if the ledger entry was not found + pub fn get_parallel_balance(&self, addr: &Address) -> Option { + self.sorted_ledger.get(addr).map(|v| v.parallel_balance) + } + + /// Gets a copy of the bytecode of a ledger entry + /// + /// # Returns + /// A copy of the found bytecode, or None if the ledger entry was not found + pub fn get_bytecode(&self, addr: &Address) -> Option> { + self.sorted_ledger.get(addr).map(|v| v.bytecode.clone()) + } + + /// Checks if a ledger entry exists + /// + /// # Returns + /// true if it exists, false otherwise. + pub fn entry_exists(&self, addr: &Address) -> bool { + self.sorted_ledger.contains_key(addr) + } + + /// Gets a copy of the value of a datastore entry for a given address. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// + /// # Returns + /// A copy of the datastore value, or None if the ledger entry or datastore entry was not found + pub fn get_data_entry(&self, addr: &Address, key: &Hash) -> Option> { + self.sorted_ledger + .get(addr) + .and_then(|v| v.datastore.get(key).cloned()) + } + + /// Checks for the existence of a datastore entry for a given address. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// + /// # Returns + /// true if the datastore entry was found, or false if the ledger entry or datastore entry was not found + pub fn has_data_entry(&self, addr: &Address, key: &Hash) -> bool { + self.sorted_ledger + .get(addr) + .map_or(false, |v| v.datastore.contains_key(key)) + } +} diff --git a/massa-ledger/src/ledger_changes.rs b/massa-ledger/src/ledger_changes.rs new file mode 100644 index 00000000000..15ee4a60746 --- /dev/null +++ b/massa-ledger/src/ledger_changes.rs @@ -0,0 +1,443 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file provides structures representing changes to ledger entries + +use crate::ledger_entry::LedgerEntry; +use crate::types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; +use massa_hash::hash::Hash; +use massa_models::{prehash::Map, Address, Amount}; +use std::collections::hash_map; + +/// represents an update to one or more fields of a LedgerEntry +#[derive(Default, Debug, Clone)] +pub struct LedgerEntryUpdate { + /// change the parallel balance + pub parallel_balance: SetOrKeep, + /// change the executable bytecode + pub bytecode: SetOrKeep>, + // change datastore entries + pub datastore: Map>>, +} + +impl Applicable for LedgerEntryUpdate { + /// extends the LedgerEntryUpdate with another one + fn apply(&mut self, update: LedgerEntryUpdate) { + self.parallel_balance.apply(update.parallel_balance); + self.bytecode.apply(update.bytecode); + self.datastore.extend(update.datastore); + } +} + +/// represents a list of changes to multiple ledger entries +#[derive(Default, Debug, Clone)] +pub struct LedgerChanges(pub Map>); + +impl Applicable for LedgerChanges { + /// extends the current LedgerChanges with another one + fn apply(&mut self, changes: LedgerChanges) { + for (addr, change) in changes.0 { + match self.0.entry(addr) { + hash_map::Entry::Occupied(mut occ) => { + // apply incoming change if a change on this entry already exists + occ.get_mut().apply(change); + } + hash_map::Entry::Vacant(vac) => { + // otherwise insert the incoming change + vac.insert(change); + } + } + } + } +} + +impl LedgerChanges { + /// get an item from the LedgerChanges + pub fn get( + &self, + addr: &Address, + ) -> Option<&SetUpdateOrDelete> { + self.0.get(addr) + } + + /// Tries to return the parallel balance of an entry + /// or gets it from a function if the entry's status is unknown. + /// + /// This function is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: address for which to get the value + /// * f: fallback function with no arguments and returning Option + /// + /// # Returns + /// * Some(v) if a value is present, where v is a copy of the value + /// * None if the value is absent + /// * f() if the value is unknown + pub fn get_parallel_balance_or_else Option>( + &self, + addr: &Address, + f: F, + ) -> Option { + // Get the changes for the provided address + match self.0.get(addr) { + // This entry is being replaced by a new one: get the balance from the new entry + Some(SetUpdateOrDelete::Set(v)) => Some(v.parallel_balance), + + // This entry is being updated + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { + parallel_balance, .. + })) => match parallel_balance { + // The update sets a new balance: return it + SetOrKeep::Set(v) => Some(*v), + // The update keeps the old balance. + // We therefore have no info on the absolute value of the balance. + // We call the fallback function and return its output. + SetOrKeep::Keep => f(), + }, + + // This entry is being deleted: return None. + Some(SetUpdateOrDelete::Delete) => None, + + // This entry is not being changed. + // We therefore have no info on the absolute value of the balance. + // We call the fallback function and return its output. + None => f(), + } + } + + /// Tries to return the executable bytecode of an entry + /// or gets it from a function if the entry's status is unknown. + /// + /// This function is used as an optimization: + /// if the value can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: address for which to get the value + /// * f: fallback function with no arguments and returning Option> + /// + /// # Returns + /// * Some(v) if a value is present, where v is a copy of the value + /// * None if the value is absent + /// * f() if the value is unknown + pub fn get_bytecode_or_else Option>>( + &self, + addr: &Address, + f: F, + ) -> Option> { + // Get the changes to the provided address + match self.0.get(addr) { + // This entry is being replaced by a new one: get the bytecode from the new entry + Some(SetUpdateOrDelete::Set(v)) => Some(v.bytecode.clone()), + + // This entry is being updated + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { bytecode, .. })) => match bytecode { + // The update sets a new bytecode: return it + SetOrKeep::Set(v) => Some(v.clone()), + + // The update keeps the old bytecode. + // We therefore have no info on the absolute value of the bytecode. + // We call the fallback function and return its output. + SetOrKeep::Keep => f(), + }, + + // This entry is being deleted: return None. + Some(SetUpdateOrDelete::Delete) => None, + + // This entry is not being changed. + // We therefore have no info on the absolute contents of the bytecode. + // We call the fallback function and return its output. + None => f(), + } + } + + /// Tries to return whether an entry exists + /// or gets the information from a function if the entry's status is unknown. + /// + /// This function is used as an optimization: + /// if the result can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: address to search for + /// * f: fallback function with no arguments and returning bool + /// + /// # Returns + /// * true if the entry exists + /// * false if the value is absent + /// * f() if the value's existence is unknown + pub fn entry_exists_or_else bool>(&self, addr: &Address, f: F) -> bool { + // Get the changes for the provided address + match self.0.get(addr) { + // The entry is being replaced by a new one: it exists + Some(SetUpdateOrDelete::Set(_)) => true, + + // The entry is being updated: + // assume it exists because it will be created on update if it doesn't + Some(SetUpdateOrDelete::Update(_)) => true, + + // The entry is being deleted: it doesn't exist anymore + Some(SetUpdateOrDelete::Delete) => false, + + // This entry is not being changed. + // We therefore have no info on its existence. + // We call the fallback function and return its output. + None => f(), + } + } + + /// Set the parallel balance of an address. + /// If the address doesn't exist, its ledger entry is created. + /// + /// # Arguments + /// * addr: target address + /// * balance: parallel balance to set for the provided address + pub fn set_parallel_balance(&mut self, addr: Address, balance: Amount) { + // Get the changes for the entry associated to the provided address + match self.0.entry(addr) { + // That entry is being changed + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + // The entry is being replaced by a new one + SetUpdateOrDelete::Set(v) => { + // update the parallel_balance of the replacement entry + v.parallel_balance = balance; + } + + // The entry is being updated + SetUpdateOrDelete::Update(u) => { + // Make sure the update sets the paralell balance of the entry to its new value + u.parallel_balance = SetOrKeep::Set(balance); + } + + // The entry is being deleted + d @ SetUpdateOrDelete::Delete => { + // Replace that deletion with a replacement by a new default entry + // for which the parallel balance was properly set + *d = SetUpdateOrDelete::Set(LedgerEntry { + parallel_balance: balance, + ..Default::default() + }); + } + } + } + + // This entry is not being changed + hash_map::Entry::Vacant(vac) => { + // Induce an Update to the entry that sets the balance to its new value + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + parallel_balance: SetOrKeep::Set(balance), + ..Default::default() + })); + } + } + } + + /// Set the executable bytecode of an address. + /// If the address doesn't exist, its ledger entry is created. + /// + /// # Parameters + /// * addr: target address + /// * bytecode: executable bytecode to assign to that address + pub fn set_bytecode(&mut self, addr: Address, bytecode: Vec) { + // Get the current changes being applied to the entry associated to that address + match self.0.entry(addr) { + // There are changes currently being applied to the entry + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + // The entry is being replaced by a new one + SetUpdateOrDelete::Set(v) => { + // update the bytecode of the replacement entry + v.bytecode = bytecode; + } + + // The entry is being updated + SetUpdateOrDelete::Update(u) => { + // Ensure that the update includes setting the bytecode to its new value + u.bytecode = SetOrKeep::Set(bytecode); + } + + // The entry is being deleted + d @ SetUpdateOrDelete::Delete => { + // Replace that deletion with a replacement by a new default entry + // for which the bytecode was properly set + *d = SetUpdateOrDelete::Set(LedgerEntry { + bytecode, + ..Default::default() + }); + } + } + } + + // This entry is not being changed + hash_map::Entry::Vacant(vac) => { + // Induce an Update to the entry that sets the bytecode to its new value + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + bytecode: SetOrKeep::Set(bytecode), + ..Default::default() + })); + } + } + } + + /// Tries to return a datastore entry for a given address, + /// or gets it from a function if the value's status is unknown. + /// + /// This function is used as an optimization: + /// if the result can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * f: fallback function with no arguments and returning Option> + /// + /// # Returns + /// * Some(v) if the value was found, where v is a copy of the value + /// * None if the value is absent + /// * f() if the value is unknown + pub fn get_data_entry_or_else Option>>( + &self, + addr: &Address, + key: &Hash, + f: F, + ) -> Option> { + // Get the current changes being applied to the ledger entry associated to that address + match self.0.get(addr) { + // This ledger entry is being replaced by a new one: + // get the datastore entry from the new ledger entry + Some(SetUpdateOrDelete::Set(v)) => v.datastore.get(key).cloned(), + + // This ledger entry is being updated + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + // Get the update being applied to that datastore entry + match datastore.get(key) { + // A new datastore value is being set: return a clone of it + Some(SetOrDelete::Set(v)) => Some(v.clone()), + + // This datastore entry is being deleted: return None + Some(SetOrDelete::Delete) => None, + + // There are no changes to this particular datastore entry. + // We therefore have no info on the absolute contents of the datastore entry. + // We call the fallback function and return its output. + None => f(), + } + } + + // This ledger entry is being deleted: return None + Some(SetUpdateOrDelete::Delete) => None, + + // This ledger entry is not being changed. + // We therefore have no info on the absolute contents of its datastore entry. + // We call the fallback function and return its output. + None => f(), + } + } + + /// Tries to return wherther a datastore entry exists for a given address, + /// or gets it from a function if the datastore entry's status is unknown. + /// + /// This function is used as an optimization: + /// if the result can be deduced unambiguously from the LedgerChanges, + /// no need to dig further (for example in the FinalLedger). + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * f: fallback function with no arguments and returning bool + /// + /// # Returns + /// * true if the ledger entry exists and the key is present in its datastore + /// * false if the ledger entry is absent, or if the key is not in its datastore + /// * f() if the existence of the ledger entry or datastore entry is unknown + pub fn has_data_entry_or_else bool>( + &self, + addr: &Address, + key: &Hash, + f: F, + ) -> bool { + // Get the current changes being applied to the ledger entry associated to that address + match self.0.get(addr) { + // This ledger entry is being replaced by a new one: + // check if the replacement ledger entry has the key in its datastore + Some(SetUpdateOrDelete::Set(v)) => v.datastore.contains_key(key), + + // This ledger entry is being updated + Some(SetUpdateOrDelete::Update(LedgerEntryUpdate { datastore, .. })) => { + // Get the update being applied to that datastore entry + match datastore.get(key) { + // A new datastore value is being set: the datastore entry exists + Some(SetOrDelete::Set(_)) => true, + + // The datastore entry is being deletes: it doesn't exist anymore + Some(SetOrDelete::Delete) => false, + + // There are no changes to this particular datastore entry. + // We therefore have no info on its existence. + // We call the fallback function and return its output. + None => f(), + } + } + + // This ledger entry is being deleted: it has no datastore anymore + Some(SetUpdateOrDelete::Delete) => false, + + // This ledger entry is not being changed. + // We therefore have no info on its datastore. + // We call the fallback function and return its output. + None => f(), + } + } + + /// Set a datastore entry for a given address. + /// If the address doesn't exist, its ledger entry is created. + /// If the datasotre entry exists, its value is replaced, otherwise it is created. + /// + /// # Arguments + /// * addr: target address + /// * key: datastore key + /// * data: datastore value to set + pub fn set_data_entry(&mut self, addr: Address, key: Hash, data: Vec) { + // Get the changes being applied to the ledgr entry associated to that address + match self.0.entry(addr) { + // There are changes currently being applied to the ledger entry + hash_map::Entry::Occupied(mut occ) => { + match occ.get_mut() { + // The ledger entry is being replaced by a new one + SetUpdateOrDelete::Set(v) => { + // Insert the value in the datastore of the replacement entry + // Any existing value is overwritten + v.datastore.insert(key, data); + } + + // The ledger entry is being updated + SetUpdateOrDelete::Update(u) => { + // Ensure that the update includes setting the datastore entry + u.datastore.insert(key, SetOrDelete::Set(data)); + } + + // The ledger entry is being deleted + d @ SetUpdateOrDelete::Delete => { + // Replace that ledger entry deletion with a replacement by a new default ledger entry + // for which the datastore contains the (key, value) to insert. + *d = SetUpdateOrDelete::Set(LedgerEntry { + datastore: vec![(key, data)].into_iter().collect(), + ..Default::default() + }); + } + } + } + + // This ledger entry is not being changed + hash_map::Entry::Vacant(vac) => { + // Induce an Update to the ledger entry that sets the datastore entry to the desired value + vac.insert(SetUpdateOrDelete::Update(LedgerEntryUpdate { + datastore: vec![(key, SetOrDelete::Set(data))].into_iter().collect(), + ..Default::default() + })); + } + } + } +} diff --git a/massa-ledger/src/ledger_entry.rs b/massa-ledger/src/ledger_entry.rs new file mode 100644 index 00000000000..14abb937641 --- /dev/null +++ b/massa-ledger/src/ledger_entry.rs @@ -0,0 +1,173 @@ +// Copyright (c) 2022 MASSA LABS + +//! This file defines the structure representing an entry in the FinalLedger + +use crate::ledger_changes::LedgerEntryUpdate; +use crate::types::{Applicable, SetOrDelete}; +use massa_hash::hash::Hash; +use massa_hash::HASH_SIZE_BYTES; +use massa_models::{array_from_slice, Amount, DeserializeVarInt, ModelsError, SerializeVarInt}; +use massa_models::{DeserializeCompact, SerializeCompact}; +use serde::{Deserialize, Serialize}; +use std::collections::BTreeMap; + +/// Structure defining an entry associated to an address in the FinalLedger +#[derive(Default, Debug, Clone, Serialize, Deserialize)] +pub struct LedgerEntry { + /// The parallel balance of that entry. + /// See lib.rs for an explanation on parallel vs sequential balances. + pub parallel_balance: Amount, + + /// Executable bytecode + pub bytecode: Vec, + + /// A key-value store associating a hash to arbitrary bytes + pub datastore: BTreeMap>, +} + +/// A LedgerEntryUpdate can be applied to a LedgerEntry +impl Applicable for LedgerEntry { + fn apply(&mut self, update: LedgerEntryUpdate) { + // apply updates to the parallel balance + update.parallel_balance.apply_to(&mut self.parallel_balance); + + // apply updates to the executable bytecode + update.bytecode.apply_to(&mut self.bytecode); + + // iterate over all datastore updates + for (key, value_update) in update.datastore { + match value_update { + // this update sets a new value to a datastore entry + SetOrDelete::Set(v) => { + // insert the new value in the datastore, + // replacing any existing value + self.datastore.insert(key, v); + } + + // this update deletes a datastore entry + SetOrDelete::Delete => { + // remove that entry from the datastore if it exists + self.datastore.remove(&key); + } + } + } + } +} + +/// Allow serializing the LedgerEntry into a compact binary representation +impl SerializeCompact for LedgerEntry { + fn to_bytes_compact(&self) -> Result, massa_models::ModelsError> { + let mut res: Vec = Vec::new(); + + // parallel balance + res.extend(self.parallel_balance.to_bytes_compact()?); + + // bytecode length + let bytecode_len: u64 = self.bytecode.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not convert bytecode size to u64".into()) + })?; + res.extend(bytecode_len.to_varint_bytes()); + + // bytecode + res.extend(&self.bytecode); + + // datastore length + let datastore_len: u64 = self.datastore.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not convert datastore size to u64".into()) + })?; + res.extend(datastore_len.to_varint_bytes()); + + // datastore + for (key, value) in &self.datastore { + // key + res.extend(key.to_bytes()); + + // value length + let value_len: u64 = value.len().try_into().map_err(|_| { + ModelsError::SerializeError("could not convert datastore value size to u64".into()) + })?; + res.extend(value_len.to_varint_bytes()); + + // value + res.extend(value); + } + + Ok(res) + } +} + +/// Allow deserializing a LedgerEntry from its compact binary representation +impl DeserializeCompact for LedgerEntry { + fn from_bytes_compact(buffer: &[u8]) -> Result<(Self, usize), massa_models::ModelsError> { + let mut cursor = 0usize; + + // parallel balance + let (parallel_balance, delta) = Amount::from_bytes_compact(&buffer[cursor..])?; + cursor += delta; + + // bytecode length + let (bytecode_len, delta) = u64::from_varint_bytes(&buffer[cursor..])?; + let bytecode_len: usize = bytecode_len.try_into().map_err(|_| { + ModelsError::SerializeError("could not convert bytecode size to usize".into()) + })?; + //TODO cap bytecode length https://github.com/massalabs/massa/issues/1200 + cursor += delta; + + // bytecode + let bytecode = if let Some(slice) = buffer.get(cursor..(cursor + (bytecode_len as usize))) { + cursor += bytecode_len as usize; + slice.to_vec() + } else { + return Err(ModelsError::DeserializeError( + "could not deserialize ledger entry bytecode: buffer too small".into(), + )); + }; + + // datastore length + let (datastore_len, delta) = u64::from_varint_bytes(&buffer[cursor..])?; + let datastore_len: usize = datastore_len.try_into().map_err(|_| { + ModelsError::SerializeError("could not convert datastore size to usize".into()) + })?; + //TODO cap datastore length https://github.com/massalabs/massa/issues/1200 + cursor += delta; + + // datastore entries + let mut datastore: BTreeMap> = BTreeMap::new(); + for _ in 0..datastore_len { + // key + let key = Hash::from_bytes(&array_from_slice(&buffer[cursor..])?)?; + cursor += HASH_SIZE_BYTES; + + // value length + let (value_len, delta) = u64::from_varint_bytes(&buffer[cursor..])?; + let value_len: usize = value_len.try_into().map_err(|_| { + ModelsError::SerializeError( + "could not convert datastore entry value size to usize".into(), + ) + })?; + //TODO cap value length https://github.com/massalabs/massa/issues/1200 + cursor += delta; + + // value + let value = if let Some(slice) = buffer.get(cursor..(cursor + (value_len as usize))) { + cursor += value_len as usize; + slice.to_vec() + } else { + return Err(ModelsError::DeserializeError( + "could not deserialize ledger entry datastore value: buffer too small".into(), + )); + }; + + datastore.insert(key, value); + } + + Ok(( + LedgerEntry { + parallel_balance, + bytecode, + datastore, + }, + cursor, + )) + } +} diff --git a/massa-ledger/src/lib.rs b/massa-ledger/src/lib.rs new file mode 100644 index 00000000000..abcd825d7d1 --- /dev/null +++ b/massa-ledger/src/lib.rs @@ -0,0 +1,67 @@ +// Copyright (c) 2022 MASSA LABS + +//! # General description +//! +//! This crate implements a ledger matching addresses to balances, executable bytecode and data. +//! It also provides tools to manipulate ledger entries. +//! +//! FinalLedger representing a ledger at a given slot that was executed as final +//! (see the massa-execution-worker crate for details on execution). +//! Only the execution worker writes into the final ledger. +//! +//! # A note on parallel vs sequential balance +//! +//! The distinctions between the parallel and the sequential balance of a ledger entry are the following: +//! * the parallel balance can be credited or spent in any slot +//! * the sequential balance can be credited in any slot but only spent in slots form the address' thread +//! * block produers are credited fees from the sequential balance, +//! and they can ensure that this balance will be available for their block simply +//! by looking for sequential balance spendings within the block's thread. +//! +//! # Architecture +//! +//! ## ledger.rs +//! Defines the FinalLedger that matches an address to a LedgerEntry (see ledger_entry.rs), +//! and can be manipulated using LedgerChanges (see ledger_changes.rs). +//! The FinalLedger is bootstrapped using tooling available in bootstrap.rs +//! +//! ## ledger_entry.rs +//! Represents an entry in the ledger for a given address. +//! It contains balances, executable bytecode and an arbitrary datastore. +//! +//! ## ledger_changes.rs +//! Represents a list of changes to ledger entries that +//! can be modified, combined or applied to the final ledger. +//! +//! ## bootstrap.rs +//! Provides serializable strucutres and tools for bootstrapping the final ledger. +//! +//! ## Test exports +//! +//! When the crate feature `testing` is enabled, tooling useful for testing purposes is exported. +//! See test_exports/mod.rs for details. + +#![feature(map_first_last)] +#![feature(async_closure)] + +mod bootstrap; +mod config; +mod error; +mod ledger; +mod ledger_changes; +mod ledger_entry; +mod types; + +pub use bootstrap::FinalLedgerBootstrapState; +pub use config::LedgerConfig; +pub use error::LedgerError; +pub use ledger::FinalLedger; +pub use ledger_changes::LedgerChanges; +pub use ledger_entry::LedgerEntry; +pub use types::{Applicable, SetOrDelete, SetOrKeep, SetUpdateOrDelete}; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "testing")] +pub mod test_exports; diff --git a/massa-ledger/src/test_exports/bootstrap.rs b/massa-ledger/src/test_exports/bootstrap.rs new file mode 100644 index 00000000000..2a6aff0c4d7 --- /dev/null +++ b/massa-ledger/src/test_exports/bootstrap.rs @@ -0,0 +1,57 @@ +// Copyright (c) 2022 MASSA LABS + +use std::collections::BTreeMap; + +use massa_models::{Address, Slot}; + +use crate::{FinalLedgerBootstrapState, LedgerEntry}; + +/// This file defines tools to test the ledger bootstrap + +/// creates a ledger bootstrap state from components +pub fn make_bootstrap_state( + slot: Slot, + sorted_ledger: BTreeMap, +) -> FinalLedgerBootstrapState { + FinalLedgerBootstrapState { + slot, + sorted_ledger, + } +} + +/// asserts that two ledger entries are the same +pub fn assert_eq_ledger_entry(v1: &LedgerEntry, v2: &LedgerEntry) { + assert_eq!( + v1.parallel_balance, v2.parallel_balance, + "parallel balance mismatch" + ); + assert_eq!(v1.bytecode, v2.bytecode, "bytecode mismatch"); + assert_eq!( + v1.datastore.len(), + v2.datastore.len(), + "datastore len mismatch" + ); + for k in v1.datastore.keys() { + let itm1 = v1.datastore.get(k).unwrap(); + let itm2 = v2.datastore.get(k).expect("datastore key mismatch"); + assert_eq!(itm1, itm2, "datasore entry mismatch"); + } +} + +/// asserts that two FinalLedgerBootstrapState are equal +pub fn assert_eq_ledger_bootstrap_state( + v1: &FinalLedgerBootstrapState, + v2: &FinalLedgerBootstrapState, +) { + assert_eq!(v1.slot, v2.slot, "final slot mismatch"); + assert_eq!( + v1.sorted_ledger.len(), + v2.sorted_ledger.len(), + "ledger len mismatch" + ); + for k in v1.sorted_ledger.keys() { + let itm1 = v1.sorted_ledger.get(k).unwrap(); + let itm2 = v2.sorted_ledger.get(k).expect("ledger key mismatch"); + assert_eq_ledger_entry(itm1, itm2); + } +} diff --git a/massa-ledger/src/test_exports/config.rs b/massa-ledger/src/test_exports/config.rs new file mode 100644 index 00000000000..eada136589b --- /dev/null +++ b/massa-ledger/src/test_exports/config.rs @@ -0,0 +1,15 @@ +// Copyright (c) 2022 MASSA LABS + +/// This file defines testing tools related to the config +use crate::LedgerConfig; + +/// Default value of LedgerConfig used for tests +impl Default for LedgerConfig { + fn default() -> Self { + LedgerConfig { + initial_sce_ledger_path: "".into(), // unused by the mock + final_history_length: 10, + thread_count: 2, + } + } +} diff --git a/massa-ledger/src/test_exports/mod.rs b/massa-ledger/src/test_exports/mod.rs new file mode 100644 index 00000000000..cf3ced58ef9 --- /dev/null +++ b/massa-ledger/src/test_exports/mod.rs @@ -0,0 +1,7 @@ +// exports testing utilities + +mod bootstrap; +mod config; + +pub use bootstrap::*; +pub use config::*; diff --git a/massa-execution/src/tests/mod.rs b/massa-ledger/src/tests/mod.rs similarity index 64% rename from massa-execution/src/tests/mod.rs rename to massa-ledger/src/tests/mod.rs index 7ba4c32332a..ca6fbbd29ca 100644 --- a/massa-execution/src/tests/mod.rs +++ b/massa-ledger/src/tests/mod.rs @@ -1,3 +1 @@ // Copyright (c) 2022 MASSA LABS - -mod scenarios_mandatories; diff --git a/massa-ledger/src/types.rs b/massa-ledger/src/types.rs new file mode 100644 index 00000000000..cd386577fab --- /dev/null +++ b/massa-ledger/src/types.rs @@ -0,0 +1,113 @@ +// Copyright (c) 2022 MASSA LABS + +//! Provides various tools to manipulate ledger entries and changes happening on them. + +/// Trait marking a structure that supports another one (V) being applied to it +pub trait Applicable { + fn apply(&mut self, _: V); +} + +/// Enum representing set/update/delete change on a value T +#[derive(Debug, Clone)] +pub enum SetUpdateOrDelete, V: Applicable + Clone> { + /// Sets the value T a new absolute value T + Set(T), + + /// Applies an update V to an existing value T. + /// If the value T doesn't exist: + /// a `new_t = T::default()` is created, + /// the update V is applied to it, + /// and the enum is changed to `SetUpdateOrDelete::Set(new_t)` + Update(V), + + /// Deletes the value T + Delete, +} + +/// support applying another SetUpdateOrDelete to self +impl, V: Applicable> Applicable> + for SetUpdateOrDelete +where + V: Clone, +{ + fn apply(&mut self, other: SetUpdateOrDelete) { + match other { + // the other SetUpdateOrDelete sets a new absolute value => force it on self + v @ SetUpdateOrDelete::Set(_) => *self = v, + + // the other SetUpdateOrDelete updates the value + SetUpdateOrDelete::Update(u) => match self { + // if self currently sets an absolute value, apply other to that value + SetUpdateOrDelete::Set(cur) => cur.apply(u), + + // if self currently updates a value, apply the updates of the other to that update + SetUpdateOrDelete::Update(cur) => cur.apply(u), + + // if self currently deletes a value, + // create a new default value, apply other's updates to it and make self set it as an absolute new value + SetUpdateOrDelete::Delete => { + let mut res = T::default(); + res.apply(u); + *self = SetUpdateOrDelete::Set(res); + } + }, + + // the other SetUpdateOrDelete deletes a value => force self to delete it as well + v @ SetUpdateOrDelete::Delete => *self = v, + } + } +} + +/// Enum representing a set/delete change on a value T +#[derive(Debug, Clone)] +pub enum SetOrDelete { + /// sets a new absolute value T + Set(T), + + /// deletes the value + Delete, +} + +/// allows applying another SetOrDelete to the current one +impl Applicable> for SetOrDelete { + fn apply(&mut self, other: Self) { + *self = other; + } +} + +/// represents a set/keep change +#[derive(Debug, Clone)] +pub enum SetOrKeep { + /// sets a new absolute value T + Set(T), + + /// keeps the existing value + Keep, +} + +/// allows applying another SetOrKeep to the current one +impl Applicable> for SetOrKeep { + fn apply(&mut self, other: SetOrKeep) { + if let v @ SetOrKeep::Set(..) = other { + // update the current value only if the other SetOrKeep sets a new one + *self = v; + } + } +} + +impl SetOrKeep { + /// applies the current SetOrKeep to a target mutable value + pub fn apply_to(self, val: &mut T) { + if let SetOrKeep::Set(v) = self { + // only change the value if self is setting a new one + *val = v; + } + } +} + +/// By default, SetOrKeep keeps the existing value +impl Default for SetOrKeep { + fn default() -> Self { + SetOrKeep::Keep + } +} diff --git a/massa-models/src/api.rs b/massa-models/src/api.rs index 725dc866be5..29fb2ba0d85 100644 --- a/massa-models/src/api.rs +++ b/massa-models/src/api.rs @@ -172,8 +172,8 @@ impl std::fmt::Display for AddressInfo { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { writeln!(f, "Address: {}", self.address)?; writeln!(f, "Thread: {}", self.thread)?; - writeln!(f, "Parallel balance:\n{}", self.ledger_info)?; - writeln!(f, "Sequential balance:\n{}", self.sce_ledger_info)?; + writeln!(f, "Sequential balance:\n{}", self.ledger_info)?; + writeln!(f, "Parallel balance:\n{}", self.sce_ledger_info)?; writeln!(f, "Rolls:\n{}", self.rolls)?; writeln!( f, diff --git a/massa-models/src/slot.rs b/massa-models/src/slot.rs index 828568a60d3..95d320e2e24 100644 --- a/massa-models/src/slot.rs +++ b/massa-models/src/slot.rs @@ -42,6 +42,22 @@ impl Slot { Slot { period, thread } } + /// returns the minimal slot + pub const fn min() -> Slot { + Slot { + period: 0, + thread: 0, + } + } + + /// returns the maximal slot + pub const fn max() -> Slot { + Slot { + period: u64::MAX, + thread: u8::MAX, + } + } + pub fn get_first_bit(&self) -> bool { Hash::compute_from(&self.to_bytes_key()).to_bytes()[0] >> 7 == 1 } diff --git a/massa-node/Cargo.toml b/massa-node/Cargo.toml index 97ef7b008d9..a2cccf14548 100644 --- a/massa-node/Cargo.toml +++ b/massa-node/Cargo.toml @@ -13,6 +13,7 @@ console-subscriber = "0.1.1" directories = "4.0" futures = "0.3" lazy_static = "1.4.0" +parking_lot = "0.12" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" tokio = { version = "1.15", features = ["full"] } @@ -25,8 +26,10 @@ massa_bootstrap = { path = "../massa-bootstrap" } massa_consensus_exports = { path = "../massa-consensus-exports" } massa_graph = { path = "../massa-graph" } massa_consensus_worker = { path = "../massa-consensus-worker" } -massa_execution = { path = "../massa-execution" } +massa_execution_exports = { path = "../massa-execution-exports" } +massa_execution_worker = { path = "../massa-execution-worker" } massa_logging = { path = "../massa-logging" } +massa_ledger = { path = "../massa-ledger" } massa_models = { path = "../massa-models" } massa_network = { path = "../massa-network" } massa_pool = { path = "../massa-pool" } @@ -41,7 +44,7 @@ serial_test = "0.5" [features] # nightly = ["beta"] beta = [] -instrument = ["tokio/tracing", "massa_api/instrument", "massa_bootstrap/instrument", "massa_consensus_exports/instrument", "massa_consensus_worker/instrument", "massa_execution/instrument", "massa_network/instrument", "massa_pool/instrument", "massa_protocol_exports/instrument", "massa_protocol_worker/instrument"] +instrument = ["tokio/tracing", "massa_api/instrument", "massa_bootstrap/instrument", "massa_consensus_exports/instrument", "massa_consensus_worker/instrument", "massa_network/instrument", "massa_pool/instrument", "massa_protocol_exports/instrument", "massa_protocol_worker/instrument"] sandbox = ["massa_consensus_exports/sandbox", "massa_consensus_worker/sandbox", "massa_models/sandbox"] [build] diff --git a/massa-node/base_config/config.toml b/massa-node/base_config/config.toml index a3d57fecb27..755a5a5345b 100644 --- a/massa-node/base_config/config.toml +++ b/massa-node/base_config/config.toml @@ -15,10 +15,19 @@ max_arguments = 128 [execution] - # path to the initial smart contract balance ledger - initial_sce_ledger_path = "base_config/initial_sce_ledger.json" # max number of generated events kept in RAM max_final_events = 100 + # maximum length of the read-only execution requests queue + readonly_queue_length = 10 + # by how many milliseconds shoud the execution lag behind real time + # higher values increase speculative execution lag but improve performance + cursor_delay = 0 + +[ledger] + # path to the initial smart contract balance ledger + initial_sce_ledger_path = "base_config/initial_sce_ledger.json" + # length of the changes history. Higher values allow bootstrapping nodes with slower connections + final_history_length = 100 [consensus] # max number of previously discarded blocks kept in RAM diff --git a/massa-node/src/main.rs b/massa-node/src/main.rs index e6bb8c69bd7..34d3c988ca7 100644 --- a/massa-node/src/main.rs +++ b/massa-node/src/main.rs @@ -12,12 +12,14 @@ use massa_consensus_exports::{ ConsensusEventReceiver, ConsensusManager, }; use massa_consensus_worker::start_consensus_controller; -use massa_execution::{ExecutionConfigs, ExecutionManager}; - +use massa_execution_exports::{ExecutionConfig, ExecutionManager}; +use massa_execution_worker::start_execution_worker; +use massa_ledger::{FinalLedger, LedgerConfig}; use massa_logging::massa_trace; use massa_models::{ constants::{ - END_TIMESTAMP, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, OPERATION_VALIDITY_PERIODS, VERSION, + END_TIMESTAMP, GENESIS_TIMESTAMP, MAX_GAS_PER_BLOCK, OPERATION_VALIDITY_PERIODS, T0, + THREAD_COUNT, VERSION, }, init_serialization_context, SerializationContext, }; @@ -26,7 +28,8 @@ use massa_pool::{start_pool_controller, PoolCommandSender, PoolManager}; use massa_protocol_exports::ProtocolManager; use massa_protocol_worker::start_protocol_controller; use massa_time::MassaTime; -use std::process; +use parking_lot::RwLock; +use std::{process, sync::Arc}; use tokio::signal; use tokio::sync::mpsc; use tracing::{error, info, warn}; @@ -42,7 +45,7 @@ async fn launch() -> ( NetworkCommandSender, Option, ConsensusManager, - ExecutionManager, + Box, PoolManager, ProtocolManager, NetworkManager, @@ -117,17 +120,29 @@ async fn launch() -> ( .await .expect("could not start pool controller"); - let execution_config = ExecutionConfigs { - settings: SETTINGS.execution.clone(), + // init ledger + let ledger_config = LedgerConfig { + initial_sce_ledger_path: SETTINGS.ledger.initial_sce_ledger_path.clone(), + final_history_length: SETTINGS.ledger.final_history_length, + thread_count: THREAD_COUNT, + }; + let final_ledger = Arc::new(RwLock::new(match bootstrap_state.final_ledger { + Some(l) => FinalLedger::from_bootstrap_state(ledger_config, l), + None => FinalLedger::new(ledger_config).expect("could not init final ledger"), + })); + + // launch execution module + let execution_config = ExecutionConfig { + max_final_events: SETTINGS.execution.max_final_events, + readonly_queue_length: SETTINGS.execution.readonly_queue_length, + cursor_delay: SETTINGS.execution.cursor_delay, clock_compensation: bootstrap_state.compensation_millis, - ..Default::default() + thread_count: THREAD_COUNT, + t0: T0, + genesis_timestamp: *GENESIS_TIMESTAMP, }; - - // launch execution controller - let (execution_command_sender, execution_event_receiver, execution_manager) = - massa_execution::start_controller(execution_config, bootstrap_state.execution) - .await - .expect("could not start execution controller"); + let (execution_manager, execution_controller) = + start_execution_worker(execution_config, final_ledger.clone()); let consensus_config = ConsensusConfig::from(&SETTINGS.consensus); // launch consensus controller @@ -135,8 +150,7 @@ async fn launch() -> ( start_consensus_controller( consensus_config.clone(), ConsensusChannels { - execution_command_sender: execution_command_sender.clone(), - execution_event_receiver, + execution_controller: execution_controller.clone(), protocol_command_sender: protocol_command_sender.clone(), protocol_event_receiver, pool_command_sender: pool_command_sender.clone(), @@ -152,7 +166,7 @@ async fn launch() -> ( let bootstrap_manager = start_bootstrap_server( consensus_command_sender.clone(), network_command_sender.clone(), - execution_command_sender.clone(), + final_ledger.clone(), &SETTINGS.bootstrap, massa_bootstrap::Establisher::new(), private_key, @@ -166,7 +180,7 @@ async fn launch() -> ( let (api_private, api_private_stop_rx) = API::::new( consensus_command_sender.clone(), network_command_sender.clone(), - execution_command_sender.clone(), + execution_controller.clone(), &SETTINGS.api, consensus_config.clone(), ); @@ -175,7 +189,7 @@ async fn launch() -> ( // spawn public API let api_public = API::::new( consensus_command_sender.clone(), - execution_command_sender, + execution_controller.clone(), &SETTINGS.api, consensus_config, pool_command_sender.clone(), @@ -207,7 +221,7 @@ async fn launch() -> ( struct Managers { bootstrap_manager: Option, consensus_manager: ConsensusManager, - execution_manager: ExecutionManager, + execution_manager: Box, pool_manager: PoolManager, protocol_manager: ProtocolManager, network_manager: NetworkManager, @@ -218,7 +232,7 @@ async fn stop( Managers { bootstrap_manager, consensus_manager, - execution_manager, + mut execution_manager, pool_manager, protocol_manager, network_manager, @@ -241,16 +255,13 @@ async fn stop( api_private_handle.stop(); // stop consensus controller - let (protocol_event_receiver, _execution_event_receiver) = consensus_manager + let protocol_event_receiver = consensus_manager .stop(consensus_event_receiver) .await .expect("consensus shutdown failed"); // Stop execution controller. - execution_manager - .stop() - .await - .expect("Failed to shutdown execution."); + execution_manager.stop(); // stop pool controller let protocol_pool_event_receiver = pool_manager.stop().await.expect("pool shutdown failed"); @@ -266,6 +277,8 @@ async fn stop( .stop(network_event_receiver) .await .expect("network shutdown failed"); + + // note that FinalLedger gets destroyed as soon as its Arc count goes to zero } /// To instrument `massa-node` with `tokio-console` run diff --git a/massa-node/src/settings.rs b/massa-node/src/settings.rs index 8db9b2ce716..2cc6cd36c98 100644 --- a/massa-node/src/settings.rs +++ b/massa-node/src/settings.rs @@ -1,9 +1,10 @@ // Copyright (c) 2022 MASSA LABS //! Build here the default node settings from the config file toml +use std::path::PathBuf; + use massa_bootstrap::settings::BootstrapSettings; use massa_consensus_exports::ConsensusSettings; -use massa_execution::ExecutionSettings; use massa_models::{ api::APISettings, constants::{build_massa_settings, OPERATION_VALIDITY_PERIODS, THREAD_COUNT}, @@ -11,6 +12,7 @@ use massa_models::{ use massa_network::NetworkSettings; use massa_pool::{PoolConfig, PoolSettings}; use massa_protocol_exports::ProtocolSettings; +use massa_time::MassaTime; use serde::Deserialize; lazy_static::lazy_static! { @@ -22,11 +24,24 @@ lazy_static::lazy_static! { }; } -#[derive(Debug, Deserialize, Clone, Copy)] +#[derive(Debug, Deserialize, Clone)] pub struct LoggingSettings { pub level: usize, } +#[derive(Clone, Debug, Deserialize)] +pub struct ExecutionSettings { + pub max_final_events: usize, + pub readonly_queue_length: usize, + pub cursor_delay: MassaTime, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct LedgerSettings { + pub initial_sce_ledger_path: PathBuf, + pub final_history_length: usize, +} + #[derive(Debug, Deserialize, Clone)] pub struct Settings { pub logging: LoggingSettings, @@ -37,4 +52,5 @@ pub struct Settings { pub bootstrap: BootstrapSettings, pub pool: PoolSettings, pub execution: ExecutionSettings, + pub ledger: LedgerSettings, } diff --git a/massa-proof-of-stake-exports/src/proof_of_stake.rs b/massa-proof-of-stake-exports/src/proof_of_stake.rs index 471a7bc1c54..1ffdc757d35 100644 --- a/massa-proof-of-stake-exports/src/proof_of_stake.rs +++ b/massa-proof-of-stake-exports/src/proof_of_stake.rs @@ -12,7 +12,7 @@ use massa_signature::derive_public_key; use num::rational::Ratio; use rand::{distributions::Uniform, Rng, SeedableRng}; use rand_xoshiro::Xoshiro256PlusPlus; -use tracing::log::warn; +use tracing::warn; use crate::{ error::POSResult, error::ProofOfStakeError, export_pos::ExportProofOfStake,