From e85fca35bccf9fd22b50c8933726c0a2a41bdd1c Mon Sep 17 00:00:00 2001 From: Saketh Are Date: Tue, 14 Nov 2023 16:11:25 -0500 Subject: [PATCH 01/30] fix flaky test snapshot_hosts::invalid_signature (#10174) Follow up to #10123, which fixed the previous line in this test: https://github.com/near/nearcore/blob/5b3e69aef58cf0274c080264a60692d889089aaa/chain/network/src/snapshot_hosts/tests.rs#L91-L94 The test was still flaky because the line modified in this PR needs to be fixed as well. --- chain/network/src/snapshot_hosts/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/chain/network/src/snapshot_hosts/tests.rs b/chain/network/src/snapshot_hosts/tests.rs index a36e859e47e..90bdab9e7a1 100644 --- a/chain/network/src/snapshot_hosts/tests.rs +++ b/chain/network/src/snapshot_hosts/tests.rs @@ -93,7 +93,7 @@ async fn invalid_signature() { // due to parallelization, so we check for superset rather than strict equality. assert_is_superset(&[&info1].as_set(), &res.0.as_set()); // Partial update should match the state. - assert_eq!([&info1].as_set(), cache.get_hosts().iter().collect::>()); + assert_eq!(res.0.as_set(), cache.get_hosts().iter().collect::>()); } #[tokio::test] From 7e93c6e5f472c3635b3eac10035c154c047ed514 Mon Sep 17 00:00:00 2001 From: Andrei <122784628+andrei-near@users.noreply.github.com> Date: Wed, 15 Nov 2023 09:21:54 +0000 Subject: [PATCH 02/30] Docker push GHA workflow (#10171) Last missing bit for moving buildkite pipelines to GHA. Test run: https://github.com/near/nearcore/actions/runs/6866048800/job/18671396688 Docker image [published](https://hub.docker.com/layers/nearprotocol/nearcore/docker-push-wf/images/sha256-ead845ba8f8f02213d97d875a8add9649117c0d444e3bf101089072b985b26a8?context=repo) --- .github/workflows/neard_linux_binary.yml | 25 ++++++++++++++- Dockerfile | 39 ++---------------------- 2 files changed, 27 insertions(+), 37 deletions(-) diff --git a/.github/workflows/neard_linux_binary.yml b/.github/workflows/neard_linux_binary.yml index da1fcea72e5..e0461f7bc1e 100644 --- a/.github/workflows/neard_linux_binary.yml +++ b/.github/workflows/neard_linux_binary.yml @@ -1,4 +1,4 @@ -name: Neard Linux binary release +name: Neard Linux binary and Docker image release on: # Run when a new release or rc is created @@ -50,3 +50,26 @@ jobs: echo $(git rev-parse HEAD) > latest BRANCH=$(git branch --show-current) aws s3 cp --acl public-read latest s3://build.nearprotocol.com/nearcore/$(uname)/${BRANCH}/latest + + - name: Login to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKER_PAT_TOKEN }} + + - name: Build and push Docker image to nearprotocol/nearcore + run: | + COMMIT=$(git rev-parse HEAD) + BRANCH=${{ github.ref_name }} + cp target/release/neard neard + docker build -t nearcore -f Dockerfile --progress=plain . + docker tag nearcore nearprotocol/nearcore:${BRANCH}-${COMMIT} + docker tag nearcore nearprotocol/nearcore:${BRANCH} + + docker push nearprotocol/nearcore:${BRANCH}-${COMMIT} + docker push nearprotocol/nearcore:${BRANCH} + if [[ ${BRANCH} == "master" ]]; + then + docker tag nearcore nearprotocol/nearcore:latest + docker push nearprotocol/nearcore:latest + fi \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 90ea4c23c5e..df9e342468a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,38 +1,5 @@ -# syntax=docker/dockerfile-upstream:experimental - -FROM ubuntu:18.04 as build - -RUN apt-get update -qq && apt-get install -y \ - git \ - cmake \ - g++ \ - pkg-config \ - libssl-dev \ - curl \ - llvm \ - clang \ - && rm -rf /var/lib/apt/lists/* - -COPY ./rust-toolchain.toml /tmp/rust-toolchain.toml - -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH - -RUN curl https://sh.rustup.rs -sSf | \ - sh -s -- -y --no-modify-path --default-toolchain none - -VOLUME [ /near ] -WORKDIR /near -COPY . . - -ENV PORTABLE=ON -ARG make_target= -RUN make CARGO_TARGET_DIR=/tmp/target \ - "${make_target:?make_target not set}" - -# Actual image -FROM ubuntu:18.04 +# Docker image +FROM ubuntu:22.04 EXPOSE 3030 24567 @@ -41,6 +8,6 @@ RUN apt-get update -qq && apt-get install -y \ && rm -rf /var/lib/apt/lists/* COPY scripts/run_docker.sh /usr/local/bin/run.sh -COPY --from=build /tmp/target/release/neard /usr/local/bin/ +COPY neard /usr/local/bin/ CMD ["/usr/local/bin/run.sh"] From 2b3ba917e4508ec81827f2613afd6268df0399e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adam=20Chuda=C5=9B?= <18039094+staffik@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:47:57 +0100 Subject: [PATCH 03/30] Fix test related documentation issues (#10168) Summary: - Update Runner Test Results URL. - Fix `cargo nextest` command for running `cross_shard_tx`test target. - Update outdated / not working links in the documentation. `python_tests.md` file already has a guidance about cargo build command for local test runs. Feature guards are test-specific. Depending on what test do we want to run, we should modify the script below accordingly. > ## Compiling the client for tests > The local tests by default expect the binary to be in the default location for a > debug build (`{nearcore}/target/debug`). Some tests might also expect > test-specific features guarded by a feature flag to be available. To compile the > binary with such features run: > ``` > cargo build -p neard --features=adversarial > ``` --- docs/practices/testing/README.md | 21 ++++++++++----------- docs/practices/testing/python_tests.md | 8 ++++---- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/docs/practices/testing/README.md b/docs/practices/testing/README.md index 265025fc3d1..20ce69817bc 100644 --- a/docs/practices/testing/README.md +++ b/docs/practices/testing/README.md @@ -36,19 +36,18 @@ It requires nextest harness which can be installed by running `cargo install car such tests is to enable all the features by passing `--all-features` to `cargo nextest run`, e.g: -`cargo nextest run --package near-client --test cross_shard_tx -tests::test_cross_shard_tx --all-features` +`cargo nextest run --package near-client -E 'test(=tests::cross_shard_tx::test_cross_shard_tx)' --all-features` 3. **Python tests:** We have an infrastructure to spin up nodes, both locally and remotely, in python, and interact with them using RPC. The infrastructure and the tests are located in the `pytest` folder. The infrastructure is relatively straightforward, see for example `block_production.py` - [here](https://github.com/nearprotocol/nearcore/blob/master/pytest/tests/sanity/block_production.py). + [here](https://github.com/near/nearcore/blob/master/pytest/tests/sanity/block_production.py). See the `Test infrastructure` section below for details. Expensive and python tests are not part of CI, and are run by a custom nightly runner. The results of the latest runs are available -[here](http://nightly.neartest.com/). Today, test runs launch approximately +[here](https://nayduck.near.org/#/). Today, test runs launch approximately every 5-6 hours. For the latest results look at the **second** run, since the first one has some tests still scheduled to run. @@ -71,7 +70,7 @@ predefined timeout. For the most basic example of using this infrastructure see `produce_two_blocks` in -[`tests/process_blocks.rs`](https://github.com/nearprotocol/nearcore/blob/master/chain/client/tests/process_blocks.rs). +[`tests/process_blocks.rs`](https://github.com/near/nearcore/blob/master/chain/client/src/tests/process_blocks.rs). 1. The callback (`Box::new(move |msg, _ctx, _| { ...`) is what is executed whenever the client sends a message. The return value of the callback is sent @@ -84,13 +83,13 @@ in For an example of a test that launches multiple nodes, see `chunks_produced_and_distributed_common` in -[tests/chunks_management.rs](https://github.com/nearprotocol/nearcore/blob/master/chain/client/tests/chunks_management.rs). +[tests/chunks_management.rs](https://github.com/near/nearcore/blob/master/chain/client/src/tests/chunks_management.rs). The `setup_mock_all_validators` function is the key piece of infrastructure here. ## Runtime Tests for Runtime are listed in -[tests/test_cases_runtime.rs](https://github.com/near/nearcore/blob/master/tests/test_cases_runtime.rs). +[tests/test_cases_runtime.rs](https://github.com/near/nearcore/blob/master/integration-tests/src/tests/standard_cases/runtime.rs). To run a test, usually, a mock `RuntimeNode` is created via `create_runtime_node()`. In its constructor, the `Runtime` is created in the @@ -114,16 +113,16 @@ make sure to build new components sufficiently abstract so that they can be test without relying on other components. For example, see tests for doomslug -[here](https://github.com/nearprotocol/nearcore/blob/master/chain/chain/tests/doomslug.rs), +[here](https://github.com/near/nearcore/blob/master/chain/chain/src/tests/doomslug.rs), for network cache -[here](https://github.com/nearprotocol/nearcore/blob/master/chain/network/tests/cache_edges.rs), +[here](https://github.com/near/nearcore/blob/master/chain/network/src/routing/edge_cache/tests.rs), or for promises in runtime -[here](https://github.com/nearprotocol/nearcore/blob/master/runtime/near-vm-logic/tests/test_promises.rs). +[here](https://github.com/near/nearcore/blob/master/runtime/near-vm-runner/src/logic/tests/promises.rs). ## Python tests See -[this page](https://github.com/nearprotocol/nearcore/wiki/Writing-integration-tests-for-nearcore) +[this page](python_tests.md) for detailed coverage of how to write a python test. We have a python library that allows one to create and run python tests. diff --git a/docs/practices/testing/python_tests.md b/docs/practices/testing/python_tests.md index 95ebb9d8b77..d1922eba288 100644 --- a/docs/practices/testing/python_tests.md +++ b/docs/practices/testing/python_tests.md @@ -63,7 +63,7 @@ want to run tests against a release build, you can create a file with the following config: ```json -{"local": True, "near_root": "../target/release/"} +{"local": true, "near_root": "../target/release/"} ``` and run the test with the following command: @@ -116,7 +116,7 @@ Note that `start_cluster` spins up all the nodes right away. Some tests (e.g. tests that test syncing) might want to configure the nodes but delay their start. In such a case you will initialize the cluster by calling `init_cluster` and will run the nodes manually, for example, see -[`state_sync.py`](https://github.com/nearprotocol/nearcore/blob/master/pytest/tests/sanity/state_sync.py) +[`state_sync.py`](https://github.com/near/nearcore/blob/master/pytest/tests/sanity/state_sync.py) ## Connecting to a mocknet @@ -234,7 +234,7 @@ if want_async: ``` See -[rpc_tx_forwarding.py](https://github.com/nearprotocol/nearcore/blob/master/pytest/tests/sanity/rpc_tx_forwarding.py) +[rpc_tx_forwarding.py](https://github.com/near/nearcore/blob/master/pytest/tests/sanity/rpc_tx_forwarding.py) for an example of signing and submitting a transaction. ## Adversarial behavior @@ -269,6 +269,6 @@ See the tests that match `tests/sanity/proxy_*.py` for examples. We always welcome new tests, especially python tests that use the above infrastructure. We have a list of test requests -[here](https://github.com/nearprotocol/nearcore/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+test%22+), +[here](https://github.com/nearprotocol/nearcore/issues?q=is%3Aissue+is%3Aopen+label%3A%22A-testing%22), but also welcome any other tests that test aspects of the network we haven't thought about. From e1cec998e44b2a4c59ec4c67b22f3c44ea0f1694 Mon Sep 17 00:00:00 2001 From: Ekleog-NEAR <96595974+Ekleog-NEAR@users.noreply.github.com> Date: Wed, 15 Nov 2023 10:56:34 +0100 Subject: [PATCH 04/30] use a hack to make fuzzer building actually work (#10173) See discussion at [1]. Here, the solution is what would be a bad solution for bolero upstream (because it no longer works with un-harnessed fuzzers), but is fine enough for our use case. [1] https://github.com/camshaft/bolero/issues/196 --- .github/workflows/master_fuzzer_binaries.yml | 5 ++-- .../workflows/ondemand_fuzzer_binaries.yml | 7 +++--- Cargo.lock | 24 +++++++------------ Cargo.toml | 2 +- .../near-vm-runner/src/prepare/prepare_v1.rs | 2 +- .../near-vm-runner/src/prepare/prepare_v2.rs | 4 ++-- runtime/near-vm-runner/src/tests/fuzzers.rs | 11 ++++----- runtime/near-vm/types/Cargo.toml | 1 - .../types/tests/partial-sum-map/main.rs | 3 ++- test-utils/runtime-tester/src/fuzzing.rs | 2 +- 10 files changed, 27 insertions(+), 34 deletions(-) diff --git a/.github/workflows/master_fuzzer_binaries.yml b/.github/workflows/master_fuzzer_binaries.yml index 02b9e4f33b7..b960c7114a2 100644 --- a/.github/workflows/master_fuzzer_binaries.yml +++ b/.github/workflows/master_fuzzer_binaries.yml @@ -29,8 +29,9 @@ jobs: with: crate: cargo-bolero # TODO: remove the below once https://github.com/camshaft/bolero/pull/195 is released on crates.io - git: https://github.com/camshaft/bolero - rev: 8c5a50a57b0e4c4cc8111cfd95670dc75cd2dea7 + # and https://github.com/camshaft/bolero/pull/196 has a proper fix + git: https://github.com/Ekleog-NEAR/bolero + rev: 8f4e49d65c702a2f9858ed3c217b1cb52ce91243 - run: rustup target add --toolchain nightly wasm32-unknown-unknown diff --git a/.github/workflows/ondemand_fuzzer_binaries.yml b/.github/workflows/ondemand_fuzzer_binaries.yml index ec0dd7fedce..4f20d0a2acd 100644 --- a/.github/workflows/ondemand_fuzzer_binaries.yml +++ b/.github/workflows/ondemand_fuzzer_binaries.yml @@ -44,9 +44,10 @@ jobs: - uses: baptiste0928/cargo-install@21a18ba3bf4a184d1804e8b759930d3471b1c941 with: crate: cargo-bolero - # TODO: remove the below once https://github.com/camshaft/bolero/pull/195 lands - git: https://github.com/camshaft/bolero - rev: 8c5a50a57b0e4c4cc8111cfd95670dc75cd2dea7 + # TODO: remove the below once https://github.com/camshaft/bolero/pull/195 is released on crates.io + # and https://github.com/camshaft/bolero/pull/196 has a proper fix + git: https://github.com/Ekleog-NEAR/bolero + rev: 8f4e49d65c702a2f9858ed3c217b1cb52ce91243 - run: rustup target add --toolchain nightly wasm32-unknown-unknown diff --git a/Cargo.lock b/Cargo.lock index 5e265309f63..8bb01fb394e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -726,8 +726,7 @@ dependencies = [ [[package]] name = "bolero" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f9fec67acd9afcd579067cc506c537da49751b8b81c98d5a5e15ba1e853aa3c" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "bolero-afl", "bolero-engine", @@ -742,8 +741,7 @@ dependencies = [ [[package]] name = "bolero-afl" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b34f05de1527425bb05287da09ff1ff1612538648824db49e16d9693b24065" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "bolero-engine", "cc", @@ -752,8 +750,7 @@ dependencies = [ [[package]] name = "bolero-engine" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ddcfa4c2aa7d57b1785c6e258f612e74c96afa078300d0f811dee73592d7ca" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "anyhow", "backtrace", @@ -766,8 +763,7 @@ dependencies = [ [[package]] name = "bolero-generator" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8efabd99549391e8b372e8afe566e8236ca4be6be379c1b6bf81b027c472fe7" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "arbitrary", "bolero-generator-derive", @@ -778,8 +774,7 @@ dependencies = [ [[package]] name = "bolero-generator-derive" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53397bfda19ccb48527faa14025048fc4bb76f090ccdeef1e5a355bfe4a94467" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -790,8 +785,7 @@ dependencies = [ [[package]] name = "bolero-honggfuzz" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf78581db1a7263620a8767e645b93ad287c70122ae76f5bd67040c7f06ff8e3" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "bolero-engine", ] @@ -799,8 +793,7 @@ dependencies = [ [[package]] name = "bolero-kani" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e55cec272a617f5ae4ce670db035108eb97c10cd4f67de851a3c8d3f18f19cb" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "bolero-engine", ] @@ -808,8 +801,7 @@ dependencies = [ [[package]] name = "bolero-libfuzzer" version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb42f66ee3ec89b9c411994de59d4710ced19df96fea2059feea1c2d73904c5b" +source = "git+https://github.com/Ekleog-NEAR/bolero?rev=8f4e49d65c702a2f9858ed3c217b1cb52ce91243#8f4e49d65c702a2f9858ed3c217b1cb52ce91243" dependencies = [ "bolero-engine", "cc", diff --git a/Cargo.toml b/Cargo.toml index 51af476acfc..683aa306db9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -109,7 +109,7 @@ bencher = "0.1.5" bitflags = "1.2" blake2 = "0.9.1" bn = { package = "zeropool-bn", version = "0.5.11" } -bolero = { version = "0.10.0", features = ["arbitrary"] } +bolero = { version = "0.10.0", git = "https://github.com/Ekleog-NEAR/bolero", rev = "8f4e49d65c702a2f9858ed3c217b1cb52ce91243", features = ["arbitrary"] } borsh = { version = "1.0.0", features = ["derive", "rc"] } bs58 = "0.4" bytes = "1" diff --git a/runtime/near-vm-runner/src/prepare/prepare_v1.rs b/runtime/near-vm-runner/src/prepare/prepare_v1.rs index 36ed2a4ff4d..6a29dadfb4f 100644 --- a/runtime/near-vm-runner/src/prepare/prepare_v1.rs +++ b/runtime/near-vm-runner/src/prepare/prepare_v1.rs @@ -237,7 +237,7 @@ mod test { use crate::logic::{Config, ContractPrepareVersion}; #[test] - fn v1_preparation_generates_valid_contract() { + fn v1_preparation_generates_valid_contract_fuzzer() { let mut config = Config::test(); let prepare_version = ContractPrepareVersion::V1; config.limit_config.contract_prepare_version = prepare_version; diff --git a/runtime/near-vm-runner/src/prepare/prepare_v2.rs b/runtime/near-vm-runner/src/prepare/prepare_v2.rs index c624235e3a8..3e4a854ac1e 100644 --- a/runtime/near-vm-runner/src/prepare/prepare_v2.rs +++ b/runtime/near-vm-runner/src/prepare/prepare_v2.rs @@ -366,7 +366,7 @@ mod test { use crate::VMKind; #[test] - fn v2_preparation_wasmtime_generates_valid_contract() { + fn v2_preparation_wasmtime_generates_valid_contract_fuzzer() { let mut config = Config::test(); let prepare_version = ContractPrepareVersion::V2; config.limit_config.contract_prepare_version = prepare_version; @@ -393,7 +393,7 @@ mod test { } #[test] - fn v2_preparation_near_vm_generates_valid_contract() { + fn v2_preparation_near_vm_generates_valid_contract_fuzzer() { let mut config = Config::test(); let prepare_version = ContractPrepareVersion::V2; config.limit_config.contract_prepare_version = prepare_version; diff --git a/runtime/near-vm-runner/src/tests/fuzzers.rs b/runtime/near-vm-runner/src/tests/fuzzers.rs index e2420d04fe3..b5827cee422 100644 --- a/runtime/near-vm-runner/src/tests/fuzzers.rs +++ b/runtime/near-vm-runner/src/tests/fuzzers.rs @@ -6,7 +6,6 @@ use crate::runner::VMResult; use crate::ContractCode; use crate::VMKind; use arbitrary::Arbitrary; -use bolero::check; use core::fmt; use near_primitives_core::runtime::fees::RuntimeFeesConfig; @@ -145,8 +144,8 @@ fn run_fuzz(code: &ContractCode, vm_kind: VMKind) -> VMResult { } #[test] -fn current_vm_does_not_crash() { - check!().with_arbitrary::().for_each(|module: &ArbitraryModule| { +fn current_vm_does_not_crash_fuzzer() { + bolero::check!().with_arbitrary::().for_each(|module: &ArbitraryModule| { let code = ContractCode::new(module.0.module.to_bytes(), None); let config = Config::test(); let _result = run_fuzz(&code, config.vm_kind); @@ -155,8 +154,8 @@ fn current_vm_does_not_crash() { #[test] #[cfg_attr(not(all(feature = "near_vm", target_arch = "x86_64")), ignore)] -fn near_vm_and_wasmtime_agree() { - check!().with_arbitrary::().for_each(|module: &ArbitraryModule| { +fn near_vm_and_wasmtime_agree_fuzzer() { + bolero::check!().with_arbitrary::().for_each(|module: &ArbitraryModule| { let code = ContractCode::new(module.0.module.to_bytes(), None); let near_vm = run_fuzz(&code, VMKind::NearVm).expect("fatal failure"); let wasmtime = run_fuzz(&code, VMKind::Wasmtime).expect("fatal failure"); @@ -166,7 +165,7 @@ fn near_vm_and_wasmtime_agree() { #[test] #[cfg(all(feature = "near_vm", target_arch = "x86_64"))] -fn near_vm_is_reproducible() { +fn near_vm_is_reproducible_fuzzer() { use crate::near_vm_runner::NearVM; use near_primitives::hash::CryptoHash; diff --git a/runtime/near-vm/types/Cargo.toml b/runtime/near-vm/types/Cargo.toml index 6f5def1999d..769a9b0ccae 100644 --- a/runtime/near-vm/types/Cargo.toml +++ b/runtime/near-vm/types/Cargo.toml @@ -23,4 +23,3 @@ bolero.workspace = true [[test]] name = "partial-sum-map" -harness = false diff --git a/runtime/near-vm/types/tests/partial-sum-map/main.rs b/runtime/near-vm/types/tests/partial-sum-map/main.rs index 92d58a8aaf0..dc86898c415 100644 --- a/runtime/near-vm/types/tests/partial-sum-map/main.rs +++ b/runtime/near-vm/types/tests/partial-sum-map/main.rs @@ -1,6 +1,7 @@ use near_vm_types::partial_sum_map::{Error, PartialSumMap}; -fn main() { +#[test] +fn partial_sum_map_fuzzer() { bolero::check!().with_type::<(Vec<(u32, u32)>, Vec)>().for_each(|input| { let adds = &input.0; let tests = &input.1; diff --git a/test-utils/runtime-tester/src/fuzzing.rs b/test-utils/runtime-tester/src/fuzzing.rs index 3a1a8db71ec..f228613b157 100644 --- a/test-utils/runtime-tester/src/fuzzing.rs +++ b/test-utils/runtime-tester/src/fuzzing.rs @@ -789,7 +789,7 @@ mod tests { } #[test] - fn fuzz_scenario() { + fn scenario_fuzzer() { bolero::check!() .with_iterations(100) // Limit to 100 iterations, the default of 1000 would be too slow .with_arbitrary::() From 9211120372bdf0fdcbfc00c8a1b8333e9fcdb7ed Mon Sep 17 00:00:00 2001 From: ecp88 <109925246+ecp88@users.noreply.github.com> Date: Wed, 15 Nov 2023 05:33:17 -0600 Subject: [PATCH 05/30] [Snyk] Upgrade @types/react-dom from 18.2.11 to 18.2.12 (#10051) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit

This PR was automatically created by Snyk using the credentials of a real user.


Snyk has created this PR to upgrade @types/react-dom from 18.2.11 to 18.2.12.

:information_source: Keep your dependencies up-to-date. This makes it easier to fix existing vulnerabilities and to more quickly identify and fix newly disclosed vulnerabilities when they affect your project.
- The recommended version is **1 version** ahead of your current version. - The recommended version was released **22 days ago**, on 2023-10-09.
Release notes
Package name: @types/react-dom
  • 18.2.12 - 2023-10-09
  • 18.2.11 - 2023-10-06
from @types/react-dom GitHub release notes

**Note:** *You are seeing this because you or someone else with access to this repository has authorized Snyk to open upgrade PRs.* For more information: 🧐 [View latest project report](https://app.snyk.io/org/ecp88/project/98480bdc-d80b-4fd1-89d7-c4c56a706763?utm_source=github&utm_medium=referral&page=upgrade-pr) 🛠 [Adjust upgrade PR settings](https://app.snyk.io/org/ecp88/project/98480bdc-d80b-4fd1-89d7-c4c56a706763/settings/integration?utm_source=github&utm_medium=referral&page=upgrade-pr) 🔕 [Ignore this dependency or unsubscribe from future upgrade PRs](https://app.snyk.io/org/ecp88/project/98480bdc-d80b-4fd1-89d7-c4c56a706763/settings/integration?pkg=@types/react-dom&utm_source=github&utm_medium=referral&page=upgrade-pr#auto-dep-upgrades) Co-authored-by: snyk-bot --- tools/debug-ui/package-lock.json | 14 +++++++------- tools/debug-ui/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/debug-ui/package-lock.json b/tools/debug-ui/package-lock.json index 04c4d79dbca..9e86a861be6 100644 --- a/tools/debug-ui/package-lock.json +++ b/tools/debug-ui/package-lock.json @@ -11,7 +11,7 @@ "@patternfly/react-log-viewer": "^4.87.101", "@types/node": "^16.18.3", "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.11", + "@types/react-dom": "^18.2.12", "react": "^18.2.0", "react-dom": "^18.2.0", "react-query": "^3.39.3", @@ -3954,9 +3954,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.2.11", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.11.tgz", - "integrity": "sha512-zq6Dy0EiCuF9pWFW6I6k6W2LdpUixLE4P6XjXU1QHLfak3GPACQfLwEuHzY5pOYa4hzj1d0GxX/P141aFjZsyg==", + "version": "18.2.12", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.12.tgz", + "integrity": "sha512-QWZuiA/7J/hPIGocXreCRbx7wyoeet9ooxfbSA+zbIWqyQEE7GMtRn4A37BdYyksnN+/NDnWgfxZH9UVGDw1hg==", "dependencies": { "@types/react": "*" } @@ -21011,9 +21011,9 @@ } }, "@types/react-dom": { - "version": "18.2.11", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.11.tgz", - "integrity": "sha512-zq6Dy0EiCuF9pWFW6I6k6W2LdpUixLE4P6XjXU1QHLfak3GPACQfLwEuHzY5pOYa4hzj1d0GxX/P141aFjZsyg==", + "version": "18.2.12", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.12.tgz", + "integrity": "sha512-QWZuiA/7J/hPIGocXreCRbx7wyoeet9ooxfbSA+zbIWqyQEE7GMtRn4A37BdYyksnN+/NDnWgfxZH9UVGDw1hg==", "requires": { "@types/react": "*" } diff --git a/tools/debug-ui/package.json b/tools/debug-ui/package.json index f64cbc8d4f6..92c4aa7e54f 100644 --- a/tools/debug-ui/package.json +++ b/tools/debug-ui/package.json @@ -6,7 +6,7 @@ "@patternfly/react-log-viewer": "^4.87.101", "@types/node": "^16.18.3", "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.11", + "@types/react-dom": "^18.2.12", "react": "^18.2.0", "react-dom": "^18.2.0", "react-query": "^3.39.3", From ffa32f836de3a2e445713ba028aff7cbbb028f03 Mon Sep 17 00:00:00 2001 From: Andrei <122784628+andrei-near@users.noreply.github.com> Date: Wed, 15 Nov 2023 13:21:41 +0000 Subject: [PATCH 06/30] Ubuntu 20 distro for GHA binary builds (#10178) neard binaries built on Ubuntu 22 are no compatible with Ubuntu 20 libs: $ ./neard --version ./neard: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.34' not found (required by ./neard) ./neard: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.32' not found (required by ./neard) ./neard: /lib/x86_64-linux-gnu/libc.so.6: version `GLIBC_2.33' not found (required by ./neard) --- .github/workflows/neard_assertion_binary.yml | 2 +- .github/workflows/neard_linux_binary.yml | 2 +- .github/workflows/neard_nightly_binary.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/neard_assertion_binary.yml b/.github/workflows/neard_assertion_binary.yml index b5d6837e456..e1251792d56 100644 --- a/.github/workflows/neard_assertion_binary.yml +++ b/.github/workflows/neard_assertion_binary.yml @@ -15,7 +15,7 @@ on: jobs: binary-release: name: "Build and publish neard binary" - runs-on: "ubuntu-22.04-16core" + runs-on: "ubuntu-20.04-16core" environment: deploy permissions: id-token: write # required to use OIDC authentication diff --git a/.github/workflows/neard_linux_binary.yml b/.github/workflows/neard_linux_binary.yml index e0461f7bc1e..dd02dc990db 100644 --- a/.github/workflows/neard_linux_binary.yml +++ b/.github/workflows/neard_linux_binary.yml @@ -18,7 +18,7 @@ on: jobs: binary-release: name: "Build and publish neard binary" - runs-on: "ubuntu-22.04-16core" + runs-on: "ubuntu-20.04-16core" environment: deploy permissions: id-token: write # required to use OIDC authentication diff --git a/.github/workflows/neard_nightly_binary.yml b/.github/workflows/neard_nightly_binary.yml index 4010cf4d870..9fea5c4cdf9 100644 --- a/.github/workflows/neard_nightly_binary.yml +++ b/.github/workflows/neard_nightly_binary.yml @@ -12,7 +12,7 @@ on: jobs: binary-release: name: "Build and publish neard binary" - runs-on: "ubuntu-22.04-16core" + runs-on: "ubuntu-20.04-16core" environment: deploy permissions: id-token: write # required to use OIDC authentication From 1040452f92eb3cbe62ffd8ce019fb1b3a2b4297d Mon Sep 17 00:00:00 2001 From: Ekleog-NEAR <96595974+Ekleog-NEAR@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:32:31 +0100 Subject: [PATCH 07/30] remove overcommit settings from CI (#10181) Issue #9634 has been solved since. --- .github/workflows/ci.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d35f972db2e..352875776d8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -50,10 +50,6 @@ jobs: flags: "--exclude integration-tests --exclude node-runtime --exclude runtime-params-estimator --exclude near-network --exclude estimator-warehouse" timeout-minutes: 90 steps: - # Some of the tests allocate really sparse maps, so heuristic-based overcommit limits are not - # appropriate here. - # FIXME(#9634): remove this once the issue is resolved. - - run: sudo sysctl vm.overcommit_memory=1 || true - uses: actions/checkout@v4 - uses: baptiste0928/cargo-install@21a18ba3bf4a184d1804e8b759930d3471b1c941 with: From e7f879f7f393c1729c4d1fcb3ddd63b845778ce8 Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Wed, 15 Nov 2023 19:04:32 +0530 Subject: [PATCH 08/30] [tracing] Add tracing for Garbage Collection (#10177) Adding logs for generic GC. Tracing for GC during the last block of the epoch during which resharding happened. --- chain/chain/src/chain.rs | 2 +- chain/chain/src/store.rs | 15 ++++++++++++++- core/store/src/flat/manager.rs | 1 + 3 files changed, 16 insertions(+), 2 deletions(-) diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 22c9183c3c8..9895328b84b 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -1006,7 +1006,7 @@ impl Chain { tries: ShardTries, gc_config: &near_chain_configs::GCConfig, ) -> Result<(), Error> { - let _span = tracing::debug_span!(target: "chain", "clear_data").entered(); + let _span = tracing::debug_span!(target: "garbage_collection", "clear_data").entered(); let head = self.store.head()?; let tail = self.store.tail()?; diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs index bef2be9082a..9324c38d46b 100644 --- a/chain/chain/src/store.rs +++ b/chain/chain/src/store.rs @@ -1,6 +1,6 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; -use std::io; +use std::{fmt, io}; use borsh::{BorshDeserialize, BorshSerialize}; use chrono::Utc; @@ -74,6 +74,16 @@ pub enum GCMode { StateSync { clear_block_info: bool }, } +impl fmt::Debug for GCMode { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + GCMode::Fork(_) => write!(f, "GCMode::Fork"), + GCMode::Canonical(_) => write!(f, "GCMode::Canonical"), + GCMode::StateSync { .. } => write!(f, "GCMode::StateSync"), + } + } +} + /// Accesses the chain store. Used to create atomic editable views that can be reverted. pub trait ChainStoreAccess { /// Returns underlaying store. @@ -2344,6 +2354,7 @@ impl<'a> ChainStoreUpdate<'a> { // Now we can proceed to removing the trie state and flat state let mut store_update = self.store().store_update(); for shard_uid in prev_shard_layout.get_shard_uids() { + tracing::info!(target: "garbage_collection", ?block_hash, ?shard_uid, "GC resharding"); runtime.get_tries().delete_trie_for_shard(shard_uid, &mut store_update); runtime .get_flat_storage_manager() @@ -2364,6 +2375,8 @@ impl<'a> ChainStoreUpdate<'a> { ) -> Result<(), Error> { let mut store_update = self.store().store_update(); + tracing::info!(target: "garbage_collection", ?gc_mode, ?block_hash, "GC block_hash"); + // 1. Apply revert insertions or deletions from DBCol::TrieChanges for Trie { let shard_uids_to_gc: Vec<_> = self.get_shard_uids_to_gc(epoch_manager, &block_hash); diff --git a/core/store/src/flat/manager.rs b/core/store/src/flat/manager.rs index ab574b4ca32..77ffc57b203 100644 --- a/core/store/src/flat/manager.rs +++ b/core/store/src/flat/manager.rs @@ -226,6 +226,7 @@ impl FlatStorageManager { let mut flat_storages = self.0.flat_storages.lock().expect(POISONED_LOCK_ERR); if let Some(flat_store) = flat_storages.remove(&shard_uid) { flat_store.clear_state(store_update)?; + tracing::info!(target: "store", ?shard_uid, "remove_flat_storage_for_shard successful"); Ok(true) } else { Ok(false) From 83fe943e4e270db87a89535037d2b3a8909a2c6d Mon Sep 17 00:00:00 2001 From: Aleksandr Logunov Date: Wed, 15 Nov 2023 19:43:47 +0400 Subject: [PATCH 09/30] fix: disable in-memory trie in master (#10183) Until we see that it works on our testing infrastructure. cc @robin-near --------- Co-authored-by: Longarithm --- core/store/src/config.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/core/store/src/config.rs b/core/store/src/config.rs index 2e31c560822..ec3fd5fae1a 100644 --- a/core/store/src/config.rs +++ b/core/store/src/config.rs @@ -269,7 +269,12 @@ impl Default for StoreConfig { "sweat_the_oracle.testnet".to_owned(), ], - load_mem_tries_for_shards: vec![ShardUId { shard_id: 3, version: 1 }], + // TODO(#9511): Consider adding here shard id 3 or all shards after + // this feature will be tested. Until that, use at your own risk. + // Doesn't work for resharding. + // It will speed up processing of shards where it is enabled, but + // requires more RAM and takes several minutes on startup. + load_mem_tries_for_shards: Default::default(), load_mem_tries_for_all_shards: false, migration_snapshot: Default::default(), From 4afba2ada5dd6a627b7f55142115abc7e7814bc4 Mon Sep 17 00:00:00 2001 From: Anton Puhach Date: Wed, 15 Nov 2023 17:37:23 +0100 Subject: [PATCH 10/30] refactor: clean up post-state-root code (#10184) This PR removes all the code which was introduced as part of the post-state-root effort. --- chain/chain/src/chain.rs | 69 ------- chain/client/src/client.rs | 186 +----------------- core/primitives-core/src/version.rs | 4 - core/primitives/src/block.rs | 3 - core/primitives/src/block_header.rs | 116 ----------- core/primitives/src/sharding.rs | 33 ---- .../src/sharding/shard_chunk_header_inner.rs | 56 ------ core/primitives/src/test_utils.rs | 12 -- core/primitives/src/views.rs | 7 - .../src/tests/client/block_corruption.rs | 1 - .../src/tests/client/process_blocks.rs | 1 - test-utils/testlib/src/process_blocks.rs | 14 -- 12 files changed, 8 insertions(+), 494 deletions(-) diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 9895328b84b..4044b02e5a8 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -62,7 +62,6 @@ use near_primitives::state_sync::{ use near_primitives::static_clock::StaticClock; use near_primitives::transaction::{ExecutionOutcomeWithIdAndProof, SignedTransaction}; use near_primitives::types::chunk_extra::ChunkExtra; -use near_primitives::types::validator_stake::ValidatorStakeIter; use near_primitives::types::{ AccountId, Balance, BlockExtra, BlockHeight, BlockHeightDelta, EpochId, Gas, MerkleHash, NumBlocks, NumShards, ShardId, StateChangesForSplitStates, StateRoot, @@ -831,73 +830,6 @@ impl Chain { Ok(()) } - pub fn apply_chunk_for_post_state_root( - &self, - shard_id: ShardId, - prev_state_root: StateRoot, - block_height: BlockHeight, - prev_block: &Block, - transactions: &[SignedTransaction], - last_validator_proposals: ValidatorStakeIter, - gas_limit: Gas, - last_chunk_height_included: BlockHeight, - ) -> Result { - let prev_block_hash = prev_block.hash(); - let is_first_block_with_chunk_of_version = check_if_block_is_first_with_chunk_of_version( - self.store(), - self.epoch_manager.as_ref(), - prev_block_hash, - shard_id, - )?; - // TODO(post-state-root): - // This misses outgoing receipts from the last non-post-state-root block B. - // Before post-state-root incoming receipts store receipts that are supposed to be applied - // in this block, which corresponds to the outgoing receipts from the previous block. - // After post-state-root incoming receipts store receipts that are the result of executing - // that block, which corresponds to the outgoing receipts from the current block. - // So considering which outgoing receipts correspond to the incoming receipts for the blocks: - // * ... - // * pre-state-root block B-1: outgoing B-2 -> incoming B-1 - // * pre-state-root block B: outgoing B-1 -> incoming B - // * post-state-root block B+1: outgoing B+1 -> incoming B+1 - // * post-state-root block B+2: outgoing B+2 -> incoming B+2 - // * ... - // We can see that outgoing receipts of block B are not stored anywhere in the incoming receipts. - // These receipts can be obtained from the db using get_outgoing_receipts_for_shard since we - // currently track all shard. This will be implemented later along with an intergation test - // to reproduce the issue. - let receipts = - collect_receipts_from_response(&self.store.get_incoming_receipts_for_shard( - self.epoch_manager.as_ref(), - shard_id, - *prev_block_hash, - last_chunk_height_included, - )?); - // TODO(post-state-root): block-level fields, take values from the previous block for now - let block_timestamp = prev_block.header().raw_timestamp(); - let block_hash = prev_block_hash; - let random_seed = *prev_block.header().random_value(); - let next_gas_price = prev_block.header().next_gas_price(); - - self.runtime_adapter.apply_transactions( - shard_id, - RuntimeStorageConfig::new(prev_state_root, true), - block_height, - block_timestamp, - prev_block_hash, - &block_hash, - &receipts, - transactions, - last_validator_proposals, - next_gas_price, - gas_limit, - &vec![], - random_seed, - true, - is_first_block_with_chunk_of_version, - ) - } - pub fn save_orphan( &mut self, block: MaybeValidated, @@ -3067,7 +2999,6 @@ impl Chain { (chunk, prev_chunk_header) } chunk @ ShardChunk::V2(_) => (chunk, prev_chunk_header), - ShardChunk::V3(_) => todo!("#9535"), }; let shard_state_header = ShardStateSyncResponseHeaderV2 { diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index ea793cbeff1..4c5ff5900cc 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -25,7 +25,6 @@ use near_chain::flat_storage_creator::FlatStorageCreator; use near_chain::resharding::StateSplitRequest; use near_chain::state_snapshot_actor::SnapshotCallbacks; use near_chain::test_utils::format_hash; -use near_chain::types::ApplyTransactionResult; use near_chain::types::RuntimeAdapter; use near_chain::types::{ChainConfig, LatestKnown}; use near_chain::{ @@ -61,11 +60,6 @@ use near_primitives::hash::CryptoHash; use near_primitives::merkle::{merklize, MerklePath, PartialMerkleTree}; use near_primitives::network::PeerId; use near_primitives::receipt::Receipt; -use near_primitives::sharding::shard_chunk_header_inner::ShardChunkHeaderInnerV3; -use near_primitives::sharding::EncodedShardChunkBody; -use near_primitives::sharding::EncodedShardChunkV2; -use near_primitives::sharding::ShardChunkHeaderInner; -use near_primitives::sharding::ShardChunkHeaderV3; use near_primitives::sharding::StateSyncInfo; use near_primitives::sharding::{ ChunkHash, EncodedShardChunk, PartialEncodedChunk, ReedSolomonWrapper, ShardChunk, @@ -73,7 +67,6 @@ use near_primitives::sharding::{ }; use near_primitives::static_clock::StaticClock; use near_primitives::transaction::SignedTransaction; -use near_primitives::types::validator_stake::ValidatorStakeIter; use near_primitives::types::Gas; use near_primitives::types::StateRoot; use near_primitives::types::{AccountId, ApprovalStake, BlockHeight, EpochId, NumBlocks, ShardId}; @@ -827,35 +820,6 @@ impl Client { validator_signer.validator_id() ); - let ret = self.produce_pre_state_root_chunk( - validator_signer.as_ref(), - prev_block_hash, - epoch_id, - last_header, - next_height, - shard_id, - )?; - - metrics::CHUNK_PRODUCED_TOTAL.inc(); - self.chunk_production_info.put( - (next_height, shard_id), - ChunkProduction { - chunk_production_time: Some(StaticClock::utc()), - chunk_production_duration_millis: Some(timer.elapsed().as_millis() as u64), - }, - ); - Ok(Some(ret)) - } - - fn produce_pre_state_root_chunk( - &mut self, - validator_signer: &dyn ValidatorSigner, - prev_block_hash: CryptoHash, - epoch_id: &EpochId, - last_header: ShardChunkHeader, - next_height: BlockHeight, - shard_id: ShardId, - ) -> Result<(EncodedShardChunk, Vec, Vec), Error> { let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; let chunk_extra = self .chain @@ -917,150 +881,16 @@ impl Client { outgoing_receipts.len(), ); - Ok((encoded_chunk, merkle_paths, outgoing_receipts)) - } - - #[allow(dead_code)] - fn produce_post_state_root_chunk( - &mut self, - validator_signer: &dyn ValidatorSigner, - prev_block_hash: CryptoHash, - epoch_id: &EpochId, - last_header: ShardChunkHeader, - next_height: BlockHeight, - shard_id: ShardId, - ) -> Result<(EncodedShardChunk, Vec, Vec), Error> { - let shard_uid = self.epoch_manager.shard_id_to_uid(shard_id, epoch_id)?; - let prev_block = self.chain.get_block(&prev_block_hash)?; - let prev_block_header = prev_block.header(); - let gas_limit; - let prev_gas_used; - let prev_state_root; - let prev_validator_proposals; - let prev_outcome_root; - let prev_balance_burnt; - let prev_outgoing_receipts_root; - match &last_header { - ShardChunkHeader::V3(ShardChunkHeaderV3 { - inner: ShardChunkHeaderInner::V3(last_header_inner), - .. - }) => { - gas_limit = last_header_inner.next_gas_limit; - prev_gas_used = last_header_inner.gas_used; - prev_state_root = last_header_inner.post_state_root; - prev_validator_proposals = - ValidatorStakeIter::new(&last_header_inner.validator_proposals) - .collect::>(); - prev_outcome_root = last_header_inner.outcome_root; - prev_balance_burnt = last_header_inner.balance_burnt; - prev_outgoing_receipts_root = last_header_inner.outgoing_receipts_root; - } - _ => { - let chunk_extra = - self.chain.get_chunk_extra(&prev_block_hash, &shard_uid).map_err(|err| { - Error::ChunkProducer(format!("No chunk extra available: {}", err)) - })?; - gas_limit = chunk_extra.gas_limit(); - prev_gas_used = chunk_extra.gas_used(); - prev_state_root = *chunk_extra.state_root(); - prev_validator_proposals = chunk_extra.validator_proposals().collect(); - prev_outcome_root = *chunk_extra.outcome_root(); - prev_balance_burnt = chunk_extra.balance_burnt(); - let prev_outgoing_receipts = self.chain.get_outgoing_receipts_for_shard( - prev_block_hash, - shard_id, - last_header.height_included(), - )?; - prev_outgoing_receipts_root = - self.calculate_receipts_root(epoch_id, &prev_outgoing_receipts)?; - } - } - #[cfg(feature = "test_features")] - let prev_gas_used = - if self.produce_invalid_chunks { prev_gas_used + 1 } else { prev_gas_used }; - - let transactions = - self.prepare_transactions(shard_uid, gas_limit, prev_state_root, prev_block_header)?; - #[cfg(feature = "test_features")] - let transactions = Self::maybe_insert_invalid_transaction( - transactions, - prev_block_hash, - self.produce_invalid_tx_in_chunks, - ); - let num_filtered_transactions = transactions.len(); - let (tx_root, _) = merklize(&transactions); - - // TODO(post-state-root): applying the chunk can be time consuming, so probably - // we should not block the client thread here. - let apply_result = self.chain.apply_chunk_for_post_state_root( - shard_id, - prev_state_root, - // TODO(post-state-root): block-level field, need to double check if using next_height is correct here - next_height, - &prev_block, - &transactions, - ValidatorStakeIter::new(&prev_validator_proposals), - gas_limit, - last_header.height_included(), - )?; - - let (transaction_receipts_parts, encoded_length) = - EncodedShardChunk::encode_transaction_receipts( - &mut self.rs_for_chunk_production, - transactions, - &apply_result.outgoing_receipts, - ) - .map_err(|err| Error::Chunk(err.into()))?; - let mut content = EncodedShardChunkBody { parts: transaction_receipts_parts }; - content.reconstruct(&mut self.rs_for_chunk_production).unwrap(); - let (encoded_merkle_root, merkle_paths) = content.get_merkle_hash_and_paths(); - - let (outcome_root, _) = - ApplyTransactionResult::compute_outcomes_proof(&apply_result.outcomes); - let header_inner = ShardChunkHeaderInnerV3 { - prev_block_hash, - prev_state_root, - prev_outcome_root, - encoded_merkle_root, - encoded_length, - height_created: next_height, - shard_id, - prev_gas_used, - gas_limit, - prev_balance_burnt, - prev_outgoing_receipts_root, - tx_root, - prev_validator_proposals, - post_state_root: apply_result.new_root, - // Currently we don't change gas limit, also with pre-state-root - next_gas_limit: gas_limit, - gas_used: apply_result.total_gas_burnt, - validator_proposals: apply_result.validator_proposals, - outcome_root, - balance_burnt: apply_result.total_balance_burnt, - outgoing_receipts_root: self - .calculate_receipts_root(epoch_id, &apply_result.outgoing_receipts)?, - }; - let header = ShardChunkHeaderV3::from_inner( - ShardChunkHeaderInner::V3(header_inner), - validator_signer, - ); - let encoded_chunk = EncodedShardChunk::V2(EncodedShardChunkV2 { - header: ShardChunkHeader::V3(header), - content, - }); - - debug!( - target: "client", - me=%validator_signer.validator_id(), - chunk_hash=%encoded_chunk.chunk_hash().0, - %prev_block_hash, - "Produced post-state-root chunk with {} txs and {} receipts", - num_filtered_transactions, - apply_result.outgoing_receipts.len(), + metrics::CHUNK_PRODUCED_TOTAL.inc(); + self.chunk_production_info.put( + (next_height, shard_id), + ChunkProduction { + chunk_production_time: Some(StaticClock::utc()), + chunk_production_duration_millis: Some(timer.elapsed().as_millis() as u64), + }, ); - Ok((encoded_chunk, merkle_paths, apply_result.outgoing_receipts)) + Ok(Some((encoded_chunk, merkle_paths, outgoing_receipts))) } /// Calculates the root of receipt proofs. diff --git a/core/primitives-core/src/version.rs b/core/primitives-core/src/version.rs index 67a685a5950..45c57cafd5f 100644 --- a/core/primitives-core/src/version.rs +++ b/core/primitives-core/src/version.rs @@ -122,9 +122,6 @@ pub enum ProtocolFeature { RejectBlocksWithOutdatedProtocolVersions, SimpleNightshadeV2, RestrictTla, - /// Enables block production with post-state-root. - /// NEP: https://github.com/near/NEPs/pull/507 - PostStateRoot, /// Increases the number of chunk producers. TestnetFewerBlockProducers, /// Enables chunk validation which is introduced with stateless validation. @@ -184,7 +181,6 @@ impl ProtocolFeature { #[cfg(feature = "protocol_feature_reject_blocks_with_outdated_protocol_version")] ProtocolFeature::RejectBlocksWithOutdatedProtocolVersions => 132, ProtocolFeature::SimpleNightshadeV2 => 135, - ProtocolFeature::PostStateRoot => 136, #[cfg(feature = "protocol_feature_chunk_validation")] ProtocolFeature::ChunkValidation => 137, } diff --git a/core/primitives/src/block.rs b/core/primitives/src/block.rs index 34ac2930445..590fcfe773f 100644 --- a/core/primitives/src/block.rs +++ b/core/primitives/src/block.rs @@ -289,9 +289,6 @@ impl Block { BlockHeader::BlockHeaderV4(_) => { debug_assert_eq!(prev.block_ordinal() + 1, block_ordinal) } - BlockHeader::BlockHeaderV5(_) => { - debug_assert_eq!(prev.block_ordinal() + 1, block_ordinal) - } }; let body = BlockBody { chunks, challenges, vrf_value, vrf_proof }; diff --git a/core/primitives/src/block_header.rs b/core/primitives/src/block_header.rs index 715a83dab5f..d9e57cab976 100644 --- a/core/primitives/src/block_header.rs +++ b/core/primitives/src/block_header.rs @@ -196,51 +196,6 @@ pub struct BlockHeaderInnerRestV4 { pub latest_protocol_version: ProtocolVersion, } -/// V4 -> V5: Switch to post-state-root -#[derive(BorshSerialize, BorshDeserialize, serde::Serialize, Debug, Clone, Eq, PartialEq)] -pub struct BlockHeaderInnerRestV5 { - /// Hash of block body - pub block_body_hash: CryptoHash, - /// Root hash of the previous chunks' outgoing receipts in the given block. - pub prev_chunk_outgoing_receipts_root: MerkleHash, - /// Root hash of the chunk headers in the given block. - pub chunk_headers_root: MerkleHash, - /// Root hash of the chunk transactions in the given block. - pub chunk_tx_root: MerkleHash, - /// Root hash of the challenges in the given block. - pub challenges_root: MerkleHash, - /// The output of the randomness beacon - pub random_value: CryptoHash, - /// Validator proposals from the previous chunks. - pub prev_validator_proposals: Vec, - /// Mask for new chunks included in the block - pub chunk_mask: Vec, - /// Gas price for chunks in the next block. - pub next_gas_price: Balance, - /// Total supply of tokens in the system - pub total_supply: Balance, - /// List of challenges result from previous block. - pub challenges_result: ChallengesResult, - - /// Last block that has full BFT finality - pub last_final_block: CryptoHash, - /// Last block that has doomslug finality - pub last_ds_final_block: CryptoHash, - - /// The ordinal of the Block on the Canonical Chain - pub block_ordinal: NumBlocks, - - pub prev_height: BlockHeight, - - pub epoch_sync_data_hash: Option, - - /// All the approvals included in this block - pub approvals: Vec>>, - - /// Latest protocol version that this block producer has. - pub latest_protocol_version: ProtocolVersion, -} - /// The part of the block approval that is different for endorsements and skips #[derive(BorshSerialize, BorshDeserialize, serde::Serialize, Debug, Clone, PartialEq, Eq, Hash)] pub enum ApprovalInner { @@ -394,25 +349,6 @@ pub struct BlockHeaderV4 { pub hash: CryptoHash, } -/// V4 -> V5: Switch to post-state-root -#[derive(BorshSerialize, BorshDeserialize, serde::Serialize, Debug, Clone, Eq, PartialEq)] -#[borsh(init=init)] -pub struct BlockHeaderV5 { - pub prev_hash: CryptoHash, - - /// Inner part of the block header that gets hashed, split into two parts, one that is sent - /// to light clients, and the rest - pub inner_lite: BlockHeaderInnerLite, - pub inner_rest: BlockHeaderInnerRestV5, - - /// Signature of the block producer. - pub signature: Signature, - - /// Cached value of hash for this block - #[borsh(skip)] - pub hash: CryptoHash, -} - impl BlockHeaderV2 { pub fn init(&mut self) { self.hash = BlockHeader::compute_hash( @@ -443,16 +379,6 @@ impl BlockHeaderV4 { } } -impl BlockHeaderV5 { - pub fn init(&mut self) { - self.hash = BlockHeader::compute_hash( - self.prev_hash, - &borsh::to_vec(&self.inner_lite).expect("Failed to serialize"), - &borsh::to_vec(&self.inner_rest).expect("Failed to serialize"), - ); - } -} - /// Versioned BlockHeader data structure. /// For each next version, document what are the changes between versions. #[derive(BorshSerialize, BorshDeserialize, serde::Serialize, Debug, Clone, Eq, PartialEq)] @@ -461,7 +387,6 @@ pub enum BlockHeader { BlockHeaderV2(Arc), BlockHeaderV3(Arc), BlockHeaderV4(Arc), - BlockHeaderV5(Arc), } impl BlockHeader { @@ -821,7 +746,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.hash, BlockHeader::BlockHeaderV3(header) => &header.hash, BlockHeader::BlockHeaderV4(header) => &header.hash, - BlockHeader::BlockHeaderV5(header) => &header.hash, } } @@ -832,7 +756,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.prev_hash, BlockHeader::BlockHeaderV3(header) => &header.prev_hash, BlockHeader::BlockHeaderV4(header) => &header.prev_hash, - BlockHeader::BlockHeaderV5(header) => &header.prev_hash, } } @@ -843,7 +766,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.signature, BlockHeader::BlockHeaderV3(header) => &header.signature, BlockHeader::BlockHeaderV4(header) => &header.signature, - BlockHeader::BlockHeaderV5(header) => &header.signature, } } @@ -854,7 +776,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => header.inner_lite.height, BlockHeader::BlockHeaderV3(header) => header.inner_lite.height, BlockHeader::BlockHeaderV4(header) => header.inner_lite.height, - BlockHeader::BlockHeaderV5(header) => header.inner_lite.height, } } @@ -865,7 +786,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(_) => None, BlockHeader::BlockHeaderV3(header) => Some(header.inner_rest.prev_height), BlockHeader::BlockHeaderV4(header) => Some(header.inner_rest.prev_height), - BlockHeader::BlockHeaderV5(header) => Some(header.inner_rest.prev_height), } } @@ -876,7 +796,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_lite.epoch_id, BlockHeader::BlockHeaderV3(header) => &header.inner_lite.epoch_id, BlockHeader::BlockHeaderV4(header) => &header.inner_lite.epoch_id, - BlockHeader::BlockHeaderV5(header) => &header.inner_lite.epoch_id, } } @@ -887,7 +806,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_lite.next_epoch_id, BlockHeader::BlockHeaderV3(header) => &header.inner_lite.next_epoch_id, BlockHeader::BlockHeaderV4(header) => &header.inner_lite.next_epoch_id, - BlockHeader::BlockHeaderV5(header) => &header.inner_lite.next_epoch_id, } } @@ -898,7 +816,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_lite.prev_state_root, BlockHeader::BlockHeaderV3(header) => &header.inner_lite.prev_state_root, BlockHeader::BlockHeaderV4(header) => &header.inner_lite.prev_state_root, - BlockHeader::BlockHeaderV5(header) => &header.inner_lite.prev_state_root, } } @@ -917,9 +834,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV4(header) => { &header.inner_rest.prev_chunk_outgoing_receipts_root } - BlockHeader::BlockHeaderV5(header) => { - &header.inner_rest.prev_chunk_outgoing_receipts_root - } } } @@ -930,7 +844,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.chunk_headers_root, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.chunk_headers_root, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.chunk_headers_root, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.chunk_headers_root, } } @@ -941,7 +854,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.chunk_tx_root, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.chunk_tx_root, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.chunk_tx_root, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.chunk_tx_root, } } @@ -951,7 +863,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.chunk_mask, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.chunk_mask, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.chunk_mask, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.chunk_mask, }; mask.iter().map(|&x| u64::from(x)).sum::() } @@ -963,7 +874,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.challenges_root, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.challenges_root, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.challenges_root, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.challenges_root, } } @@ -974,7 +884,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_lite.prev_outcome_root, BlockHeader::BlockHeaderV3(header) => &header.inner_lite.prev_outcome_root, BlockHeader::BlockHeaderV4(header) => &header.inner_lite.prev_outcome_root, - BlockHeader::BlockHeaderV5(header) => &header.inner_lite.prev_outcome_root, } } @@ -985,7 +894,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(_) => None, BlockHeader::BlockHeaderV3(_) => None, BlockHeader::BlockHeaderV4(header) => Some(header.inner_rest.block_body_hash), - BlockHeader::BlockHeaderV5(header) => Some(header.inner_rest.block_body_hash), } } @@ -996,7 +904,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => header.inner_lite.timestamp, BlockHeader::BlockHeaderV3(header) => header.inner_lite.timestamp, BlockHeader::BlockHeaderV4(header) => header.inner_lite.timestamp, - BlockHeader::BlockHeaderV5(header) => header.inner_lite.timestamp, } } @@ -1015,9 +922,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV4(header) => { ValidatorStakeIter::new(&header.inner_rest.prev_validator_proposals) } - BlockHeader::BlockHeaderV5(header) => { - ValidatorStakeIter::new(&header.inner_rest.prev_validator_proposals) - } } } @@ -1028,7 +932,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.chunk_mask, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.chunk_mask, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.chunk_mask, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.chunk_mask, } } @@ -1039,7 +942,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(_) => 0, // not applicable BlockHeader::BlockHeaderV3(header) => header.inner_rest.block_ordinal, BlockHeader::BlockHeaderV4(header) => header.inner_rest.block_ordinal, - BlockHeader::BlockHeaderV5(header) => header.inner_rest.block_ordinal, } } @@ -1050,7 +952,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => header.inner_rest.next_gas_price, BlockHeader::BlockHeaderV3(header) => header.inner_rest.next_gas_price, BlockHeader::BlockHeaderV4(header) => header.inner_rest.next_gas_price, - BlockHeader::BlockHeaderV5(header) => header.inner_rest.next_gas_price, } } @@ -1061,7 +962,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => header.inner_rest.total_supply, BlockHeader::BlockHeaderV3(header) => header.inner_rest.total_supply, BlockHeader::BlockHeaderV4(header) => header.inner_rest.total_supply, - BlockHeader::BlockHeaderV5(header) => header.inner_rest.total_supply, } } @@ -1072,7 +972,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.random_value, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.random_value, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.random_value, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.random_value, } } @@ -1083,7 +982,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.last_final_block, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.last_final_block, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.last_final_block, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.last_final_block, } } @@ -1094,7 +992,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.last_ds_final_block, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.last_ds_final_block, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.last_ds_final_block, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.last_ds_final_block, } } @@ -1105,7 +1002,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.challenges_result, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.challenges_result, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.challenges_result, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.challenges_result, } } @@ -1116,7 +1012,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_lite.next_bp_hash, BlockHeader::BlockHeaderV3(header) => &header.inner_lite.next_bp_hash, BlockHeader::BlockHeaderV4(header) => &header.inner_lite.next_bp_hash, - BlockHeader::BlockHeaderV5(header) => &header.inner_lite.next_bp_hash, } } @@ -1127,7 +1022,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_lite.block_merkle_root, BlockHeader::BlockHeaderV3(header) => &header.inner_lite.block_merkle_root, BlockHeader::BlockHeaderV4(header) => &header.inner_lite.block_merkle_root, - BlockHeader::BlockHeaderV5(header) => &header.inner_lite.block_merkle_root, } } @@ -1138,7 +1032,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(_) => None, BlockHeader::BlockHeaderV3(header) => header.inner_rest.epoch_sync_data_hash, BlockHeader::BlockHeaderV4(header) => header.inner_rest.epoch_sync_data_hash, - BlockHeader::BlockHeaderV5(header) => header.inner_rest.epoch_sync_data_hash, } } @@ -1149,7 +1042,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => &header.inner_rest.approvals, BlockHeader::BlockHeaderV3(header) => &header.inner_rest.approvals, BlockHeader::BlockHeaderV4(header) => &header.inner_rest.approvals, - BlockHeader::BlockHeaderV5(header) => &header.inner_rest.approvals, } } @@ -1175,7 +1067,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(_header) => true, BlockHeader::BlockHeaderV3(_header) => true, BlockHeader::BlockHeaderV4(_header) => true, - BlockHeader::BlockHeaderV5(_header) => true, } } @@ -1186,7 +1077,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV2(header) => header.inner_rest.latest_protocol_version, BlockHeader::BlockHeaderV3(header) => header.inner_rest.latest_protocol_version, BlockHeader::BlockHeaderV4(header) => header.inner_rest.latest_protocol_version, - BlockHeader::BlockHeaderV5(header) => header.inner_rest.latest_protocol_version, } } @@ -1204,9 +1094,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV4(header) => { borsh::to_vec(&header.inner_lite).expect("Failed to serialize") } - BlockHeader::BlockHeaderV5(header) => { - borsh::to_vec(&header.inner_lite).expect("Failed to serialize") - } } } @@ -1224,9 +1111,6 @@ impl BlockHeader { BlockHeader::BlockHeaderV4(header) => { borsh::to_vec(&header.inner_rest).expect("Failed to serialize") } - BlockHeader::BlockHeaderV5(header) => { - borsh::to_vec(&header.inner_rest).expect("Failed to serialize") - } } } } diff --git a/core/primitives/src/sharding.rs b/core/primitives/src/sharding.rs index 815884df2fc..cabe9c1201c 100644 --- a/core/primitives/src/sharding.rs +++ b/core/primitives/src/sharding.rs @@ -663,20 +663,10 @@ pub struct ShardChunkV2 { pub prev_outgoing_receipts: Vec, } -// V2 -> V3: Switch to post-state-root -#[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Eq, PartialEq)] -pub struct ShardChunkV3 { - pub chunk_hash: ChunkHash, - pub header: ShardChunkHeader, - pub transactions: Vec, - pub outgoing_receipts: Vec, -} - #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Eq, PartialEq)] pub enum ShardChunk { V1(ShardChunkV1), V2(ShardChunkV2), - V3(ShardChunkV3), } impl ShardChunk { @@ -698,11 +688,6 @@ impl ShardChunk { transactions: chunk.transactions, prev_outgoing_receipts: chunk.prev_outgoing_receipts, })), - Self::V3(chunk) => Some(ShardChunk::V3(ShardChunkV3 { - chunk_hash: header.chunk_hash(), - header, - ..chunk - })), } } @@ -710,7 +695,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.height_included = height, Self::V2(chunk) => *chunk.header.height_included_mut() = height, - Self::V3(chunk) => *chunk.header.height_included_mut() = height, } } @@ -719,7 +703,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.height_included, Self::V2(chunk) => chunk.header.height_included(), - Self::V3(chunk) => chunk.header.height_included(), } } @@ -728,7 +711,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.inner.height_created, Self::V2(chunk) => chunk.header.height_created(), - Self::V3(chunk) => chunk.header.height_created(), } } @@ -737,7 +719,6 @@ impl ShardChunk { match &self { ShardChunk::V1(chunk) => &chunk.header.inner.prev_block_hash, ShardChunk::V2(chunk) => chunk.header.prev_block_hash(), - ShardChunk::V3(chunk) => chunk.header.prev_block_hash(), } } @@ -746,7 +727,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.inner.prev_state_root, Self::V2(chunk) => chunk.header.prev_state_root(), - Self::V3(chunk) => chunk.header.prev_state_root(), } } @@ -755,7 +735,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.inner.tx_root, Self::V2(chunk) => chunk.header.tx_root(), - Self::V3(chunk) => chunk.header.tx_root(), } } @@ -764,7 +743,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.inner.prev_outgoing_receipts_root, Self::V2(chunk) => chunk.header.prev_outgoing_receipts_root(), - Self::V3(chunk) => chunk.header.prev_outgoing_receipts_root(), } } @@ -773,7 +751,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.inner.shard_id, Self::V2(chunk) => chunk.header.shard_id(), - Self::V3(chunk) => chunk.header.shard_id(), } } @@ -782,7 +759,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.chunk_hash.clone(), Self::V2(chunk) => chunk.chunk_hash.clone(), - Self::V3(chunk) => chunk.chunk_hash.clone(), } } @@ -791,9 +767,6 @@ impl ShardChunk { match self { Self::V1(chunk) => &chunk.prev_outgoing_receipts, Self::V2(chunk) => &chunk.prev_outgoing_receipts, - Self::V3(_) => { - panic!("post-state-root chunk does not contain previous chunk's outgoing receipts") - } } } @@ -802,7 +775,6 @@ impl ShardChunk { match self { Self::V1(chunk) => &chunk.transactions, Self::V2(chunk) => &chunk.transactions, - Self::V3(chunk) => &chunk.transactions, } } @@ -811,7 +783,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.chunk_hash(), Self::V2(chunk) => chunk.header.chunk_hash(), - Self::V3(chunk) => chunk.header.chunk_hash(), } } @@ -820,7 +791,6 @@ impl ShardChunk { match self { Self::V1(chunk) => chunk.header.inner.prev_block_hash, Self::V2(chunk) => *chunk.header.prev_block_hash(), - Self::V3(chunk) => *chunk.header.prev_block_hash(), } } @@ -829,7 +799,6 @@ impl ShardChunk { match self { Self::V1(chunk) => ShardChunkHeader::V1(chunk.header), Self::V2(chunk) => chunk.header, - Self::V3(chunk) => chunk.header, } } @@ -837,7 +806,6 @@ impl ShardChunk { match self { Self::V1(chunk) => ShardChunkHeader::V1(chunk.header.clone()), Self::V2(chunk) => chunk.header.clone(), - Self::V3(chunk) => chunk.header.clone(), } } @@ -845,7 +813,6 @@ impl ShardChunk { match self { Self::V1(chunk) => ShardChunkHeaderV1::compute_hash(&chunk.header.inner), Self::V2(chunk) => chunk.header.compute_hash(), - Self::V3(chunk) => chunk.header.compute_hash(), } } } diff --git a/core/primitives/src/sharding/shard_chunk_header_inner.rs b/core/primitives/src/sharding/shard_chunk_header_inner.rs index 4a267229a48..bd3550df230 100644 --- a/core/primitives/src/sharding/shard_chunk_header_inner.rs +++ b/core/primitives/src/sharding/shard_chunk_header_inner.rs @@ -8,7 +8,6 @@ use near_primitives_core::types::{Balance, BlockHeight, Gas, ShardId}; pub enum ShardChunkHeaderInner { V1(ShardChunkHeaderInnerV1), V2(ShardChunkHeaderInnerV2), - V3(ShardChunkHeaderInnerV3), } impl ShardChunkHeaderInner { @@ -17,7 +16,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => &inner.prev_state_root, Self::V2(inner) => &inner.prev_state_root, - Self::V3(inner) => &inner.prev_state_root, } } @@ -26,7 +24,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => &inner.prev_block_hash, Self::V2(inner) => &inner.prev_block_hash, - Self::V3(inner) => &inner.prev_block_hash, } } @@ -35,7 +32,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => inner.gas_limit, Self::V2(inner) => inner.gas_limit, - Self::V3(inner) => inner.gas_limit, } } @@ -44,7 +40,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => inner.prev_gas_used, Self::V2(inner) => inner.prev_gas_used, - Self::V3(inner) => inner.prev_gas_used, } } @@ -53,7 +48,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => ValidatorStakeIter::v1(&inner.prev_validator_proposals), Self::V2(inner) => ValidatorStakeIter::new(&inner.prev_validator_proposals), - Self::V3(inner) => ValidatorStakeIter::new(&inner.prev_validator_proposals), } } @@ -62,7 +56,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => inner.height_created, Self::V2(inner) => inner.height_created, - Self::V3(inner) => inner.height_created, } } @@ -71,7 +64,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => inner.shard_id, Self::V2(inner) => inner.shard_id, - Self::V3(inner) => inner.shard_id, } } @@ -80,7 +72,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => &inner.prev_outcome_root, Self::V2(inner) => &inner.prev_outcome_root, - Self::V3(inner) => &inner.prev_outcome_root, } } @@ -89,7 +80,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => &inner.encoded_merkle_root, Self::V2(inner) => &inner.encoded_merkle_root, - Self::V3(inner) => &inner.encoded_merkle_root, } } @@ -98,7 +88,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => inner.encoded_length, Self::V2(inner) => inner.encoded_length, - Self::V3(inner) => inner.encoded_length, } } @@ -107,7 +96,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => inner.prev_balance_burnt, Self::V2(inner) => inner.prev_balance_burnt, - Self::V3(inner) => inner.prev_balance_burnt, } } @@ -116,7 +104,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => &inner.prev_outgoing_receipts_root, Self::V2(inner) => &inner.prev_outgoing_receipts_root, - Self::V3(inner) => &inner.prev_outgoing_receipts_root, } } @@ -125,7 +112,6 @@ impl ShardChunkHeaderInner { match self { Self::V1(inner) => &inner.tx_root, Self::V2(inner) => &inner.tx_root, - Self::V3(inner) => &inner.tx_root, } } } @@ -182,45 +168,3 @@ pub struct ShardChunkHeaderInnerV2 { /// Validator proposals from the previous chunk. pub prev_validator_proposals: Vec, } - -// V2 -> V3: Switch to post-state-root -#[derive(BorshSerialize, BorshDeserialize, Clone, PartialEq, Eq, Debug)] -pub struct ShardChunkHeaderInnerV3 { - /// Previous block hash. - pub prev_block_hash: CryptoHash, - pub prev_state_root: StateRoot, - /// Root of the outcomes from execution transactions and results of the previous chunk. - pub prev_outcome_root: CryptoHash, - pub encoded_merkle_root: CryptoHash, - pub encoded_length: u64, - pub height_created: BlockHeight, - /// Shard index. - pub shard_id: ShardId, - /// Gas used in the previous chunk. - pub prev_gas_used: Gas, - /// Gas limit voted by validators. - pub gas_limit: Gas, - /// Total balance burnt in the previous chunk. - pub prev_balance_burnt: Balance, - /// Previous chunk's outgoing receipts merkle root. - pub prev_outgoing_receipts_root: CryptoHash, - /// Tx merkle root. - pub tx_root: CryptoHash, - /// Validator proposals from the previous chunk. - pub prev_validator_proposals: Vec, - - /// State root after applying this chunk. - pub post_state_root: StateRoot, - /// Gas limit for the next chunk. - pub next_gas_limit: Gas, - /// Gas used in this chunk. - pub gas_used: Gas, - /// Validator proposals from this chunk. - pub validator_proposals: Vec, - /// Root of the outcomes from execution transactions and results of this chunk. - pub outcome_root: CryptoHash, - /// Total balance burnt in this chunk. - pub balance_burnt: Balance, - /// This chunk's outgoing receipts merkle root. - pub outgoing_receipts_root: CryptoHash, -} diff --git a/core/primitives/src/test_utils.rs b/core/primitives/src/test_utils.rs index 18d111e127a..0aaf94ec599 100644 --- a/core/primitives/src/test_utils.rs +++ b/core/primitives/src/test_utils.rs @@ -281,9 +281,6 @@ impl BlockHeader { panic!("old header should not appear in tests") } BlockHeader::BlockHeaderV4(header) => Arc::make_mut(header), - BlockHeader::BlockHeaderV5(_) => { - panic!("post-state-root header should not appear in tests yet") - } } } @@ -305,10 +302,6 @@ impl BlockHeader { let header = Arc::make_mut(header); header.inner_rest.latest_protocol_version = latest_protocol_version; } - BlockHeader::BlockHeaderV5(header) => { - let header = Arc::make_mut(header); - header.inner_rest.latest_protocol_version = latest_protocol_version; - } } } @@ -339,11 +332,6 @@ impl BlockHeader { header.hash = hash; header.signature = signature; } - BlockHeader::BlockHeaderV5(header) => { - let header = Arc::make_mut(header); - header.hash = hash; - header.signature = signature; - } } } } diff --git a/core/primitives/src/views.rs b/core/primitives/src/views.rs index 1a42d891406..580bed3c3d9 100644 --- a/core/primitives/src/views.rs +++ b/core/primitives/src/views.rs @@ -995,7 +995,6 @@ impl From for BlockHeaderInnerLiteView { BlockHeader::BlockHeaderV2(header) => &header.inner_lite, BlockHeader::BlockHeaderV3(header) => &header.inner_lite, BlockHeader::BlockHeaderV4(header) => &header.inner_lite, - BlockHeader::BlockHeaderV5(header) => &header.inner_lite, }; BlockHeaderInnerLiteView { height: inner_lite.height, @@ -1153,12 +1152,6 @@ impl ChunkView { transactions: chunk.transactions.into_iter().map(Into::into).collect(), receipts: chunk.prev_outgoing_receipts.into_iter().map(Into::into).collect(), }, - ShardChunk::V3(chunk) => Self { - author, - header: chunk.header.into(), - transactions: chunk.transactions.into_iter().map(Into::into).collect(), - receipts: chunk.outgoing_receipts.into_iter().map(Into::into).collect(), - }, } } } diff --git a/integration-tests/src/tests/client/block_corruption.rs b/integration-tests/src/tests/client/block_corruption.rs index fcfdcce1948..af4d4d19f59 100644 --- a/integration-tests/src/tests/client/block_corruption.rs +++ b/integration-tests/src/tests/client/block_corruption.rs @@ -74,7 +74,6 @@ fn change_shard_id_to_invalid() { ShardChunkHeader::V3(new_chunk) => match &mut new_chunk.inner { ShardChunkHeaderInner::V1(inner) => inner.shard_id = 100, ShardChunkHeaderInner::V2(inner) => inner.shard_id = 100, - ShardChunkHeaderInner::V3(inner) => inner.shard_id = 100, }, }; new_chunks.push(new_chunk); diff --git a/integration-tests/src/tests/client/process_blocks.rs b/integration-tests/src/tests/client/process_blocks.rs index cc5bbac24e3..0f332c5471e 100644 --- a/integration-tests/src/tests/client/process_blocks.rs +++ b/integration-tests/src/tests/client/process_blocks.rs @@ -1200,7 +1200,6 @@ fn test_bad_orphan() { match &mut chunk.inner { ShardChunkHeaderInner::V1(inner) => inner.prev_outcome_root = CryptoHash([1; 32]), ShardChunkHeaderInner::V2(inner) => inner.prev_outcome_root = CryptoHash([1; 32]), - ShardChunkHeaderInner::V3(inner) => inner.prev_outcome_root = CryptoHash([1; 32]), } chunk.hash = ShardChunkHeaderV3::compute_hash(&chunk.inner); } diff --git a/test-utils/testlib/src/process_blocks.rs b/test-utils/testlib/src/process_blocks.rs index 4a0a3e9c489..19915f12767 100644 --- a/test-utils/testlib/src/process_blocks.rs +++ b/test-utils/testlib/src/process_blocks.rs @@ -66,20 +66,6 @@ pub fn set_no_chunk_in_block(block: &mut Block, prev_block: &Block) { header.inner_rest.total_supply += balance_burnt; header.inner_rest.block_body_hash = block_body_hash.unwrap(); } - BlockHeader::BlockHeaderV5(header) => { - let header = Arc::make_mut(header); - header.inner_rest.chunk_headers_root = - Block::compute_chunk_headers_root(&chunk_headers).0; - header.inner_rest.chunk_tx_root = Block::compute_chunk_tx_root(&chunk_headers); - header.inner_rest.prev_chunk_outgoing_receipts_root = - Block::compute_chunk_prev_outgoing_receipts_root(&chunk_headers); - header.inner_lite.prev_state_root = Block::compute_state_root(&chunk_headers); - header.inner_lite.prev_outcome_root = Block::compute_outcome_root(&chunk_headers); - header.inner_rest.chunk_mask = vec![false]; - header.inner_rest.next_gas_price = prev_block.header().next_gas_price(); - header.inner_rest.total_supply += balance_burnt; - header.inner_rest.block_body_hash = block_body_hash.unwrap(); - } } let validator_signer = create_test_signer("test0"); block.mut_header().resign(&validator_signer); From d782f038ea5b0e5f35781e38d0b65c449485cb4f Mon Sep 17 00:00:00 2001 From: wacban Date: Wed, 15 Nov 2023 17:22:10 +0000 Subject: [PATCH 11/30] feat(resharding): implemented error handling and test for it (#10179) - added a new ReshardingStatus - Failed - and use it when resharding fails - made a number of delays and timeouts configurable so that it's testable - added a new nayduck test for checking the error handling - added a bunch of new debug, warn and error logs https://nayduck.near.org/#/run/3270 --- chain/chain/src/chain.rs | 2 - chain/chain/src/metrics.rs | 3 + chain/chain/src/resharding.rs | 47 +++-- chain/client/src/client.rs | 1 + chain/client/src/sync/state.rs | 2 +- chain/client/src/sync_jobs_actor.rs | 24 ++- core/chain-configs/src/client_config.rs | 14 ++ nightly/pytest-sanity.txt | 4 + pytest/lib/cluster.py | 25 +++ pytest/lib/resharding_lib.py | 9 + pytest/lib/utils.py | 38 ++-- pytest/tests/sanity/resharding.py | 9 +- .../tests/sanity/resharding_error_handling.py | 173 ++++++++++++++++++ tools/database/src/commands.rs | 5 + tools/database/src/corrupt.rs | 52 ++++++ tools/database/src/lib.rs | 1 + tools/database/src/utils.rs | 27 ++- 17 files changed, 395 insertions(+), 41 deletions(-) create mode 100644 pytest/tests/sanity/resharding_error_handling.py create mode 100644 tools/database/src/corrupt.rs diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index 4044b02e5a8..f1c5d7157ec 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -2078,8 +2078,6 @@ impl Chain { "start_process_block_impl", height = block_height) .entered(); - - tracing::debug!(target: "chain", "start process block"); // 0) Before we proceed with any further processing, we first check that the block // hash and signature matches to make sure the block is indeed produced by the assigned // block producer. If not, we drop the block immediately diff --git a/chain/chain/src/metrics.rs b/chain/chain/src/metrics.rs index 57f3c3ff39d..b3163bde6a8 100644 --- a/chain/chain/src/metrics.rs +++ b/chain/chain/src/metrics.rs @@ -148,6 +148,8 @@ pub(crate) enum ReshardingStatus { BuildingState, /// The resharding is finished. Finished, + /// The resharding failed. Manual recovery is necessary! + Failed, } impl From for i64 { @@ -158,6 +160,7 @@ impl From for i64 { ReshardingStatus::Scheduled => 0, ReshardingStatus::BuildingState => 1, ReshardingStatus::Finished => 2, + ReshardingStatus::Failed => -1, } } } diff --git a/chain/chain/src/resharding.rs b/chain/chain/src/resharding.rs index 820f68822b5..486fd856d73 100644 --- a/chain/chain/src/resharding.rs +++ b/chain/chain/src/resharding.rs @@ -31,8 +31,6 @@ use std::sync::Arc; use std::time::Duration; use tracing::debug; -const MAX_RESHARDING_POLL_TIME: Duration = Duration::from_secs(5 * 60 * 60); // 5 hrs - /// StateSplitRequest has all the information needed to start a resharding job. This message is sent /// from ClientActor to SyncJobsActor. We do not want to stall the ClientActor with a long running /// resharding job. The SyncJobsActor is helpful for handling such long running jobs. @@ -68,7 +66,8 @@ impl Debug for StateSplitRequest { .field("prev_prev_hash", &self.prev_prev_hash) .field("shard_uid", &self.shard_uid) .field("state_root", &self.state_root) - .field("next_epoch_shard_layout", &self.next_epoch_shard_layout) + .field("next_epoch_shard_layout_version", &self.next_epoch_shard_layout.version()) + .field("curr_poll_time", &self.curr_poll_time) .finish() } } @@ -200,6 +199,7 @@ impl Chain { shard_id: ShardId, state_split_scheduler: &dyn Fn(StateSplitRequest), ) -> Result<(), Error> { + tracing::debug!(target: "resharding", ?shard_id, ?sync_hash, "preprocessing started"); let block_header = self.get_block_header(sync_hash)?; let shard_layout = self.epoch_manager.get_shard_layout(block_header.epoch_id())?; let next_epoch_shard_layout = @@ -233,26 +233,47 @@ impl Chain { /// Function to check whether the snapshot is ready for resharding or not. We return true if the snapshot is not /// ready and we need to retry/reschedule the resharding job. pub fn retry_build_state_for_split_shards(state_split_request: &StateSplitRequest) -> bool { - let StateSplitRequest { tries, prev_prev_hash, curr_poll_time, .. } = state_split_request; - // Do not retry if we have spent more than MAX_RESHARDING_POLL_TIME + let StateSplitRequest { tries, prev_prev_hash, curr_poll_time, config, .. } = + state_split_request; + + // Do not retry if we have spent more than max_poll_time // The error would be caught in build_state_for_split_shards and propagated to client actor - if curr_poll_time > &MAX_RESHARDING_POLL_TIME { + if curr_poll_time > &config.max_poll_time { + tracing::warn!(target: "resharding", ?curr_poll_time, ?config.max_poll_time, "exceeded max poll time while waiting for snapsthot"); return false; } - tries.get_state_snapshot(prev_prev_hash).is_err_and(|err| match err { - SnapshotError::SnapshotNotFound(_) => true, - SnapshotError::LockWouldBlock => true, - SnapshotError::IncorrectSnapshotRequested(_, _) => false, - SnapshotError::Other(_) => false, - }) + + let state_snapshot = tries.get_state_snapshot(prev_prev_hash); + if let Err(err) = state_snapshot { + tracing::debug!(target: "resharding", ?err, "state snapshot is not ready"); + return match err { + SnapshotError::SnapshotNotFound(_) => true, + SnapshotError::LockWouldBlock => true, + SnapshotError::IncorrectSnapshotRequested(_, _) => false, + SnapshotError::Other(_) => false, + }; + } + + // The snapshot is Ok, no need to retry. + return false; } pub fn build_state_for_split_shards( state_split_request: StateSplitRequest, ) -> StateSplitResponse { - let shard_id = state_split_request.shard_uid.shard_id(); + let shard_uid = state_split_request.shard_uid; + let shard_id = shard_uid.shard_id(); let sync_hash = state_split_request.sync_hash; let new_state_roots = Self::build_state_for_split_shards_impl(state_split_request); + match &new_state_roots { + Ok(_) => {} + Err(err) => { + tracing::error!(target: "resharding", ?shard_uid, ?err, "Resharding failed, manual recovery is necessary!"); + RESHARDING_STATUS + .with_label_values(&[&shard_uid.to_string()]) + .set(ReshardingStatus::Failed.into()); + } + } StateSplitResponse { shard_id, sync_hash, new_state_roots } } diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs index 4c5ff5900cc..bfcbcf4c7f5 100644 --- a/chain/client/src/client.rs +++ b/chain/client/src/client.rs @@ -2218,6 +2218,7 @@ impl Client { let (state_sync, shards_to_split, blocks_catch_up_state) = self.catchup_state_syncs.entry(sync_hash).or_insert_with(|| { + tracing::debug!(target: "client", ?sync_hash, "inserting new state sync"); notify_state_sync = true; ( StateSync::new( diff --git a/chain/client/src/sync/state.rs b/chain/client/src/sync/state.rs index 32aab9dde6c..8650e276f41 100644 --- a/chain/client/src/sync/state.rs +++ b/chain/client/src/sync/state.rs @@ -1041,7 +1041,7 @@ impl StateSync { shard_id, state_split_scheduler, )?; - tracing::debug!(target: "sync", %shard_id, %sync_hash, ?me, "State sync split scheduled"); + tracing::debug!(target: "sync", %shard_id, %sync_hash, ?me, "resharding scheduled"); *shard_sync_download = ShardSyncDownload { downloads: vec![], status: ShardSyncStatus::StateSplitApplying }; Ok(()) diff --git a/chain/client/src/sync_jobs_actor.rs b/chain/client/src/sync_jobs_actor.rs index 2b36b3f1231..a16887339bf 100644 --- a/chain/client/src/sync_jobs_actor.rs +++ b/chain/client/src/sync_jobs_actor.rs @@ -1,5 +1,6 @@ use crate::ClientActor; use actix::AsyncContext; +use std::time::Duration; use near_chain::chain::{ do_apply_chunks, ApplyStatePartsRequest, ApplyStatePartsResponse, BlockCatchUpRequest, @@ -155,18 +156,29 @@ impl actix::Handler> for SyncJobsActor { msg: WithSpanContext, context: &mut Self::Context, ) -> Self::Result { - let (_span, mut state_split_request) = handler_debug_span!(target: "client", msg); + let (_span, mut state_split_request) = handler_debug_span!(target: "resharding", msg); + + // Wait for the initial delay. It should only be used in tests. + let initial_delay = state_split_request.config.initial_delay; + if state_split_request.curr_poll_time == Duration::ZERO && initial_delay > Duration::ZERO { + tracing::debug!(target: "resharding", ?state_split_request, ?initial_delay, "Waiting for the initial delay"); + state_split_request.curr_poll_time += initial_delay; + context.notify_later(state_split_request.with_span_context(), initial_delay); + return; + } + if Chain::retry_build_state_for_split_shards(&state_split_request) { // Actix implementation let's us send message to ourselves with a delay. // In case snapshots are not ready yet, we will retry resharding later. - tracing::debug!(target: "client", ?state_split_request, "Snapshot missing, retrying resharding later"); let retry_delay = state_split_request.config.retry_delay; + tracing::debug!(target: "resharding", ?state_split_request, ?retry_delay, "Snapshot missing, retrying resharding later"); state_split_request.curr_poll_time += retry_delay; context.notify_later(state_split_request.with_span_context(), retry_delay); - } else { - tracing::debug!(target: "client", ?state_split_request, "Starting resharding"); - let response = Chain::build_state_for_split_shards(state_split_request); - self.client_addr.do_send(response.with_span_context()); + return; } + + tracing::debug!(target: "resharding", ?state_split_request, "Starting resharding"); + let response = Chain::build_state_for_split_shards(state_split_request); + self.client_addr.do_send(response.with_span_context()); } } diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index 8b7a639685e..a85c0f62e39 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -177,6 +177,15 @@ pub struct StateSplitConfig { /// The delay between attempts to start resharding while waiting for the /// state snapshot to become available. pub retry_delay: Duration, + + /// The delay between the resharding request is received and when the actor + /// actually starts working on it. This delay should only be used in tests. + pub initial_delay: Duration, + + /// The maximum time that the actor will wait for the snapshot to be ready, + /// before starting resharding. Do not wait indefinitely since we want to + /// report error early enough for the node maintainer to have time to recover. + pub max_poll_time: Duration, } impl Default for StateSplitConfig { @@ -187,6 +196,11 @@ impl Default for StateSplitConfig { batch_size: bytesize::ByteSize::kb(500), batch_delay: Duration::from_millis(100), retry_delay: Duration::from_secs(10), + initial_delay: Duration::from_secs(0), + // The snapshot typically is available within a minute from the + // epoch start. Set the default higher in case we need to wait for + // state sync. + max_poll_time: Duration::from_secs(2 * 60 * 60), // 2 hours } } } diff --git a/nightly/pytest-sanity.txt b/nightly/pytest-sanity.txt index f23f8204bb1..81948339ba8 100644 --- a/nightly/pytest-sanity.txt +++ b/nightly/pytest-sanity.txt @@ -150,3 +150,7 @@ pytest --timeout=600 sanity/split_storage.py --features nightly # Test for resharding pytest --timeout=120 sanity/resharding.py pytest --timeout=120 sanity/resharding.py --features nightly + +# Test for resharding error handling +pytest --timeout=120 sanity/resharding_error_handling.py +pytest --timeout=120 sanity/resharding_error_handling.py --features nightly diff --git a/pytest/lib/cluster.py b/pytest/lib/cluster.py index b685c9f313c..b902883005c 100644 --- a/pytest/lib/cluster.py +++ b/pytest/lib/cluster.py @@ -825,6 +825,7 @@ def apply_config_changes(node_dir, client_config_change): 'consensus.block_fetch_horizon', 'consensus.min_block_production_delay', 'consensus.state_sync_timeout', + 'expected_shutdown', 'log_summary_period', 'max_gas_burnt_view', 'rosetta_rpc', @@ -982,3 +983,27 @@ def get_binary_protocol_version(config) -> typing.Optional[int]: if tokens[i] == "protocol" and i + 1 < n: return int(tokens[i + 1]) return None + + +def corrupt_state_snapshot(config, node_dir, shard_layout_version): + near_root = config['near_root'] + binary_name = config.get('binary_name', 'neard') + binary_path = os.path.join(near_root, binary_name) + + cmd = [ + binary_path, + "--home", + node_dir, + "database", + "corrupt-state-snapshot", + "--shard-layout-version", + str(shard_layout_version), + ] + + env = os.environ.copy() + env["RUST_BACKTRACE"] = "1" + env["RUST_LOG"] = "db=warn,db_opener=warn," + env.get("RUST_LOG", "debug") + + out = subprocess.check_output(cmd, text=True, env=env) + + return out diff --git a/pytest/lib/resharding_lib.py b/pytest/lib/resharding_lib.py index ff2dc471370..41d19e12a92 100644 --- a/pytest/lib/resharding_lib.py +++ b/pytest/lib/resharding_lib.py @@ -113,3 +113,12 @@ def get_target_num_shards(binary_protocol_version): return 4 assert False + + +def get_epoch_offset(binary_protocol_version): + if binary_protocol_version >= V2_PROTOCOL_VERSION: + return 1 + if binary_protocol_version >= V1_PROTOCOL_VERSION: + return 0 + + assert False diff --git a/pytest/lib/utils.py b/pytest/lib/utils.py index 3c087a2791c..42b0113e0ba 100644 --- a/pytest/lib/utils.py +++ b/pytest/lib/utils.py @@ -144,25 +144,33 @@ def get_all_metrics(self) -> str: f"Could not fetch metrics from {self.addr}: {response}") return response.content.decode('utf-8') + def get_metric_all_values( + self, metric_name: str) -> typing.List[typing.Tuple[str, str]]: + for family in text_string_to_metric_families(self.get_all_metrics()): + if family.name == metric_name: + return [ + (sample.labels, sample.value) for sample in family.samples + ] + return [] + def get_metric_value( self, metric_name: str, labels: typing.Optional[typing.Dict[str, str]] = None ) -> typing.Optional[str]: - for family in text_string_to_metric_families(self.get_all_metrics()): - if family.name == metric_name: - all_samples = [sample for sample in family.samples] - if not labels: - if len(all_samples) > 1: - raise AssertionError( - f"Too many metric values ({len(all_samples)}) for {metric_name} - please specify a label" - ) - if not all_samples: - return None - return all_samples[0].value - for sample in all_samples: - if sample.labels == labels: - return sample.value + all_samples = self.get_metric_all_values(metric_name) + if not labels: + if len(all_samples) > 1: + raise AssertionError( + f"Too many metric values ({len(all_samples)}) for {metric_name} - please specify a label" + ) + if not all_samples: + return None + (sample_labels, sample_value) = all_samples[0] + return sample_value + for (sample_labels, sample_value) in all_samples: + if sample_labels == labels: + return sample_value return None def get_int_metric_value( @@ -172,7 +180,7 @@ def get_int_metric_value( ) -> typing.Optional[int]: """Helper function to return the integer value of the metric (as function above returns strings).""" value = self.get_metric_value(metric_name, labels) - if not value: + if value is None: return None return round(float(value)) diff --git a/pytest/tests/sanity/resharding.py b/pytest/tests/sanity/resharding.py index 97af3b0765c..9715cbbf963 100644 --- a/pytest/tests/sanity/resharding.py +++ b/pytest/tests/sanity/resharding.py @@ -15,7 +15,7 @@ from configured_logger import logger from cluster import get_binary_protocol_version, init_cluster, load_config, spin_up_node from utils import MetricsTracker, poll_blocks -from resharding_lib import append_shard_layout_config_changes, get_genesis_num_shards, get_genesis_shard_layout_version, get_target_num_shards, get_target_shard_layout_version +from resharding_lib import append_shard_layout_config_changes, get_epoch_offset, get_genesis_num_shards, get_genesis_shard_layout_version, get_target_num_shards, get_target_shard_layout_version class ReshardingTest(unittest.TestCase): @@ -36,6 +36,8 @@ def setUp(self) -> None: self.target_num_shards = get_target_num_shards( self.binary_protocol_version) + self.epoch_offset = get_epoch_offset(self.binary_protocol_version) + def __get_genesis_config_changes(self): genesis_config_changes = [ ["epoch_length", self.epoch_length], @@ -62,7 +64,7 @@ def __get_client_config_changes(self, num_nodes): # retry often to start resharding as fast as possible "retry_delay": { "secs": 0, - "nanos": 500_000_000 + "nanos": 100_000_000 } } } @@ -114,7 +116,8 @@ def test_resharding(self): # after the block is processed. If there is some delay the shard # layout may change and the assertions below will fail. - if height <= 2 * self.epoch_length + 1: + # TODO(resharding) Why is epoch offset needed here? + if height <= 2 * self.epoch_length + self.epoch_offset: self.assertEqual(version, self.genesis_shard_layout_version) self.assertEqual(num_shards, self.genesis_num_shards) else: diff --git a/pytest/tests/sanity/resharding_error_handling.py b/pytest/tests/sanity/resharding_error_handling.py new file mode 100644 index 00000000000..82424812e97 --- /dev/null +++ b/pytest/tests/sanity/resharding_error_handling.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python3 + +# Test for checking error handling during resharding. Spins up a few nodes from +# genesis with the previous shard layout. Stops the nodes in the middle of the +# epoch before resharding and corrupts the state snapshot. Resumes the nodes and +# verifies that the error is reported correctly. + +# Usage: +# python3 pytest/tests/sanity/resharding_error_handling.py + +import unittest +import sys +import pathlib + +sys.path.append(str(pathlib.Path(__file__).resolve().parents[2] / 'lib')) + +from configured_logger import logger +from cluster import corrupt_state_snapshot, get_binary_protocol_version, init_cluster, load_config, spin_up_node +from utils import MetricsTracker, poll_blocks, wait_for_blocks +from resharding_lib import append_shard_layout_config_changes, get_genesis_num_shards, get_genesis_shard_layout_version, get_target_num_shards, get_target_shard_layout_version + + +# TODO(resharding): refactor the resharding tests to re-use the common logic +class ReshardingErrorHandlingTest(unittest.TestCase): + + def setUp(self) -> None: + self.epoch_length = 10 + self.config = load_config() + self.binary_protocol_version = get_binary_protocol_version(self.config) + assert self.binary_protocol_version is not None + + self.genesis_shard_layout_version = get_genesis_shard_layout_version( + self.binary_protocol_version) + self.target_shard_layout_version = get_target_shard_layout_version( + self.binary_protocol_version) + + self.genesis_num_shards = get_genesis_num_shards( + self.binary_protocol_version) + self.target_num_shards = get_target_num_shards( + self.binary_protocol_version) + + def __get_genesis_config_changes(self): + genesis_config_changes = [ + ["epoch_length", self.epoch_length], + ] + + append_shard_layout_config_changes( + genesis_config_changes, + self.binary_protocol_version, + logger, + ) + + return genesis_config_changes + + def __get_client_config_changes(self, num_nodes): + single = { + "tracked_shards": [0], + # arbitrary long initial delay to not trigger resharding + # will get overwritten before restarting the node + "state_split_config": self.__get_state_split_config(10) + } + return {i: single for i in range(num_nodes)} + + def __get_state_split_config(self, initial_delay): + return { + "batch_size": 1000000, + # don't throttle resharding + "batch_delay": { + "secs": 0, + "nanos": 0, + }, + # retry often to start resharding as fast as possible + "retry_delay": { + "secs": 0, + "nanos": 100_000_000 + }, + "initial_delay": { + "secs": initial_delay, + "nanos": 0 + }, + } + + # timeline by block number + # epoch_length + 2 - snapshot is requested + # epoch_length + 3 - snapshot is finished + # epoch_length + 4 - stop the nodes, corrupt the snapshot, start nodes + # epoch_length + 4 - resharding starts and fails + # epoch_length * 2 + 1 - last block while node is still healthy before chain + # upgrades to the new shard layout + def test_resharding(self): + logger.info("The resharding test is starting.") + num_nodes = 2 + + genesis_config_changes = self.__get_genesis_config_changes() + client_config_changes = self.__get_client_config_changes(num_nodes) + + near_root, [node0_dir, node1_dir] = init_cluster( + num_nodes=num_nodes, + num_observers=0, + num_shards=1, + config=self.config, + genesis_config_changes=genesis_config_changes, + client_config_changes=client_config_changes, + ) + + node0 = spin_up_node( + self.config, + near_root, + node0_dir, + 0, + ) + node1 = spin_up_node( + self.config, + near_root, + node1_dir, + 1, + boot_node=node0, + ) + + logger.info("wait until the snapshot is ready") + wait_for_blocks(node0, target=self.epoch_length + 4) + wait_for_blocks(node1, target=self.epoch_length + 4) + + logger.info("the snapshot should be ready, stopping nodes") + node0.kill(gentle=True) + node1.kill(gentle=True) + + logger.info("corrupting the state snapshot of node0") + output = corrupt_state_snapshot( + self.config, + node0_dir, + self.genesis_shard_layout_version, + ) + logger.info(f"corrupted state snapshot\n{output}") + + # Update the initial delay to start resharding as soon as possible. + client_config_changes = { + "state_split_config": self.__get_state_split_config(initial_delay=0) + } + node0.change_config(client_config_changes) + node1.change_config(client_config_changes) + + logger.info("restarting nodes") + node0.start() + node1.start(boot_node=node0) + + all_failed_observed = False + + metrics = MetricsTracker(node0) + for height, _ in poll_blocks(node0): + status = metrics.get_metric_all_values("near_resharding_status") + logger.info(f"#{height} resharding status {status}") + + if len(status) > 0: + all_failed = all([s == -1.0 for (_, s) in status]) + all_failed_observed = all_failed_observed or all_failed + + # The node should be able to survive until the end of the epoch even + # though resharding is broken. Only break after the last block of epoch. + if height >= self.epoch_length * 2: + break + + node0.kill(gentle=True) + node1.kill(gentle=True) + + # Resharding should fail for all shards. + self.assertTrue(all_failed_observed) + + logger.info("The resharding error handling test is finished.") + + +if __name__ == '__main__': + unittest.main() diff --git a/tools/database/src/commands.rs b/tools/database/src/commands.rs index e0fabe763bf..f3e656e68e7 100644 --- a/tools/database/src/commands.rs +++ b/tools/database/src/commands.rs @@ -1,6 +1,7 @@ use crate::adjust_database::ChangeDbKindCommand; use crate::analyse_data_size_distribution::AnalyseDataSizeDistributionCommand; use crate::compact::RunCompactionCommand; +use crate::corrupt::CorruptStateSnapshotCommand; use crate::make_snapshot::MakeSnapshotCommand; use crate::memtrie::LoadMemTrieCommand; use crate::run_migrations::RunMigrationsCommand; @@ -26,6 +27,9 @@ enum SubCommand { /// Run SST file compaction on database CompactDatabase(RunCompactionCommand), + /// Corrupt the state snapshot. + CorruptStateSnapshot(CorruptStateSnapshotCommand), + /// Make snapshot of the database MakeSnapshot(MakeSnapshotCommand), @@ -46,6 +50,7 @@ impl DatabaseCommand { SubCommand::AnalyseDataSizeDistribution(cmd) => cmd.run(home), SubCommand::ChangeDbKind(cmd) => cmd.run(home), SubCommand::CompactDatabase(cmd) => cmd.run(home), + SubCommand::CorruptStateSnapshot(cmd) => cmd.run(home), SubCommand::MakeSnapshot(cmd) => { let near_config = nearcore::config::load_config( &home, diff --git a/tools/database/src/corrupt.rs b/tools/database/src/corrupt.rs new file mode 100644 index 00000000000..d172cc52019 --- /dev/null +++ b/tools/database/src/corrupt.rs @@ -0,0 +1,52 @@ +use crate::utils::open_state_snapshot; +use anyhow::anyhow; +use clap::Parser; +use near_primitives::shard_layout::{ShardLayout, ShardVersion}; +use near_store::{flat::FlatStorageManager, ShardUId, StoreUpdate}; +use std::path::PathBuf; + +#[derive(Parser)] +pub(crate) struct CorruptStateSnapshotCommand { + #[clap(short, long)] + shard_layout_version: ShardVersion, +} + +impl CorruptStateSnapshotCommand { + pub(crate) fn run(&self, home: &PathBuf) -> anyhow::Result<()> { + let store = open_state_snapshot(home, near_store::Mode::ReadWrite)?; + let flat_storage_manager = FlatStorageManager::new(store.clone()); + + let mut store_update = store.store_update(); + // TODO(resharding) automatically detect the shard version + let shard_uids = match self.shard_layout_version { + 0 => ShardLayout::v0(1, 0).get_shard_uids(), + 1 => ShardLayout::get_simple_nightshade_layout().get_shard_uids(), + 2 => ShardLayout::get_simple_nightshade_layout_v2().get_shard_uids(), + _ => { + return Err(anyhow!( + "Unsupported shard layout version! {}", + self.shard_layout_version + )) + } + }; + for shard_uid in shard_uids { + corrupt(&mut store_update, &flat_storage_manager, shard_uid)?; + } + store_update.commit().unwrap(); + + println!("corrupted the state snapshot"); + + Ok(()) + } +} + +fn corrupt( + store_update: &mut StoreUpdate, + flat_storage_manager: &FlatStorageManager, + shard_uid: ShardUId, +) -> Result<(), anyhow::Error> { + flat_storage_manager.create_flat_storage_for_shard(shard_uid)?; + let result = flat_storage_manager.remove_flat_storage_for_shard(shard_uid, store_update)?; + println!("removed flat storage for shard {shard_uid:?} result is {result}"); + Ok(()) +} diff --git a/tools/database/src/lib.rs b/tools/database/src/lib.rs index c575892b5a5..6d7e70f1691 100644 --- a/tools/database/src/lib.rs +++ b/tools/database/src/lib.rs @@ -2,6 +2,7 @@ mod adjust_database; mod analyse_data_size_distribution; pub mod commands; mod compact; +mod corrupt; mod make_snapshot; mod memtrie; mod run_migrations; diff --git a/tools/database/src/utils.rs b/tools/database/src/utils.rs index 6d6a18493a9..558d6b86d9c 100644 --- a/tools/database/src/utils.rs +++ b/tools/database/src/utils.rs @@ -1,10 +1,11 @@ +use std::fs; use std::path::Path; use anyhow::anyhow; use near_primitives::hash::CryptoHash; use near_primitives::shard_layout::get_block_shard_uid; use near_store::flat::{store_helper, BlockInfo}; -use near_store::{DBCol, ShardUId, Store}; +use near_store::{DBCol, NodeStorage, ShardUId, Store}; use strum::IntoEnumIterator; pub(crate) fn open_rocksdb( @@ -21,6 +22,30 @@ pub(crate) fn open_rocksdb( Ok(rocksdb) } +pub(crate) fn open_state_snapshot(home: &Path, mode: near_store::Mode) -> anyhow::Result { + let config = nearcore::config::Config::from_file_skip_validation( + &home.join(nearcore::config::CONFIG_FILENAME), + )?; + let store_config = &config.store; + let db_path = store_config.path.as_ref().cloned().unwrap_or_else(|| home.join("data")); + + let state_snapshot_dir = db_path.join("state_snapshot"); + let snapshots: Result, _> = fs::read_dir(state_snapshot_dir)?.into_iter().collect(); + let snapshots = snapshots?; + let &[snapshot_dir] = &snapshots.as_slice() else { + return Err(anyhow!("found more than one snapshot")); + }; + + let path = snapshot_dir.path(); + println!("state snapshot path {path:?}"); + + let opener = NodeStorage::opener(&path, false, &store_config, None); + let storage = opener.open_in_mode(mode)?; + let store = storage.get_hot_store(); + + Ok(store) +} + pub(crate) fn resolve_column(col_name: &str) -> anyhow::Result { DBCol::iter() .filter(|db_col| <&str>::from(db_col) == col_name) From dbfb0df3a0b3ab26c0a2c41620129cadd2650c49 Mon Sep 17 00:00:00 2001 From: robin-near <111538878+robin-near@users.noreply.github.com> Date: Wed, 15 Nov 2023 14:20:23 -0800 Subject: [PATCH 12/30] [fork-network] Optimize amend-access-keys using in-memory tries. (#10176) https://near.zulipchat.com/#narrow/stream/297873-pagoda.2Fnode/topic/New.20betanet.20infra --- Cargo.lock | 1 + tools/fork-network/Cargo.toml | 1 + tools/fork-network/src/cli.rs | 70 ++++++---- .../src/single_shard_storage_mutator.rs | 120 +++++++++--------- tools/fork-network/src/storage_mutator.rs | 15 +-- 5 files changed, 115 insertions(+), 92 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8bb01fb394e..b50052585dc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3763,6 +3763,7 @@ dependencies = [ "anyhow", "chrono", "clap 4.2.4", + "hex", "near-chain", "near-chain-configs", "near-crypto", diff --git a/tools/fork-network/Cargo.toml b/tools/fork-network/Cargo.toml index c69b48c6294..cbb535d0dae 100644 --- a/tools/fork-network/Cargo.toml +++ b/tools/fork-network/Cargo.toml @@ -13,6 +13,7 @@ actix.workspace = true anyhow.workspace = true chrono.workspace = true clap.workspace = true +hex.workspace = true rayon.workspace = true serde.workspace = true serde_json.workspace = true diff --git a/tools/fork-network/src/cli.rs b/tools/fork-network/src/cli.rs index 3217f94575c..0ea52130688 100644 --- a/tools/fork-network/src/cli.rs +++ b/tools/fork-network/src/cli.rs @@ -1,6 +1,6 @@ use crate::single_shard_storage_mutator::SingleShardStorageMutator; use crate::storage_mutator::StorageMutator; -use near_chain::types::Tip; +use near_chain::types::{RuntimeAdapter, Tip}; use near_chain::{ChainStore, ChainStoreAccess}; use near_chain_configs::{Genesis, GenesisConfig, GenesisValidationMode}; use near_crypto::PublicKey; @@ -87,7 +87,7 @@ struct FinalizeCmd; #[derive(clap::Parser)] struct AmendAccessKeysCmd { - #[arg(short, long, default_value = "100000")] + #[arg(short, long, default_value = "2000000")] batch_size: u64, } @@ -121,7 +121,7 @@ struct Validator { } type MakeSingleShardStorageMutatorFn = - Arc anyhow::Result + Send + Sync>; + Arc anyhow::Result + Send + Sync>; impl ForkNetworkCommand { pub fn run( @@ -306,10 +306,11 @@ impl ForkNetworkCommand { home_dir: &Path, ) -> anyhow::Result> { // Open storage with migration + near_config.config.store.load_mem_tries_for_all_shards = true; let storage = open_storage(&home_dir, near_config).unwrap(); let store = storage.get_hot_store(); - let (prev_state_roots, prev_hash, epoch_id, _block_height) = + let (prev_state_roots, prev_hash, epoch_id, block_height) = self.get_state_roots_and_hash(store.clone())?; tracing::info!(?prev_state_roots, ?epoch_id, ?prev_hash); @@ -321,15 +322,11 @@ impl ForkNetworkCommand { .collect(); let runtime = NightshadeRuntime::from_config(home_dir, store.clone(), &near_config, epoch_manager); + runtime.load_mem_tries_on_startup(&all_shard_uids).unwrap(); let make_storage_mutator: MakeSingleShardStorageMutatorFn = - Arc::new(move |shard_id, prev_state_root| { - SingleShardStorageMutator::new( - shard_id, - &runtime.clone(), - prev_hash, - prev_state_root, - ) + Arc::new(move |prev_state_root| { + SingleShardStorageMutator::new(&runtime.clone(), prev_state_root) }); let new_state_roots = self.prepare_state( @@ -337,6 +334,7 @@ impl ForkNetworkCommand { &all_shard_uids, store, &prev_state_roots, + block_height, make_storage_mutator.clone(), )?; Ok(new_state_roots) @@ -358,7 +356,7 @@ impl ForkNetworkCommand { let storage = open_storage(&home_dir, near_config).unwrap(); let store = storage.get_hot_store(); - let (prev_state_roots, prev_hash, epoch_id, block_height) = + let (prev_state_roots, _prev_hash, epoch_id, block_height) = self.get_state_roots_and_hash(store.clone())?; let epoch_manager = @@ -374,7 +372,6 @@ impl ForkNetworkCommand { epoch_manager.clone(), &runtime, epoch_id.clone(), - prev_hash, prev_state_roots, )?; let (new_state_roots, new_validator_accounts) = @@ -477,13 +474,12 @@ impl ForkNetworkCommand { shard_uid: ShardUId, store: Store, prev_state_root: StateRoot, + block_height: BlockHeight, make_storage_mutator: MakeSingleShardStorageMutatorFn, ) -> anyhow::Result { // Doesn't support secrets. tracing::info!(?shard_uid); - let shard_id = shard_uid.shard_id as ShardId; - let mut storage_mutator: SingleShardStorageMutator = - make_storage_mutator(shard_id, prev_state_root)?; + let mut storage_mutator: SingleShardStorageMutator = make_storage_mutator(prev_state_root)?; // Keeps track of accounts that have a full access key. let mut has_full_key = HashSet::new(); @@ -492,6 +488,7 @@ impl ForkNetworkCommand { // Iterate over the whole flat storage and do the necessary changes to have access to all accounts. let mut index_delayed_receipt = 0; + let mut ref_keys_retrieved = 0; let mut records_not_parsed = 0; let mut records_parsed = 0; let mut access_keys_updated = 0; @@ -501,9 +498,11 @@ impl ForkNetworkCommand { let mut postponed_receipts_updated = 0; let mut delayed_receipts_updated = 0; let mut received_data_updated = 0; + let mut fake_block_height = block_height + 1; for item in store_helper::iter_flat_state_entries(shard_uid, &store, None, None) { let (key, value) = match item { Ok((key, FlatStateValue::Ref(ref_value))) => { + ref_keys_retrieved += 1; (key, trie_storage.retrieve_raw_bytes(&ref_value.hash)?.to_vec()) } Ok((key, FlatStateValue::Inlined(value))) => (key, value), @@ -609,13 +608,27 @@ impl ForkNetworkCommand { records_not_parsed += 1; } if storage_mutator.should_commit(batch_size) { - let state_root = storage_mutator.commit(&shard_uid)?; - storage_mutator = make_storage_mutator(shard_id, state_root)?; + tracing::info!( + ?shard_uid, + ref_keys_retrieved, + records_parsed, + updated = access_keys_updated + + accounts_implicit_updated + + contract_data_updated + + contract_code_updated + + postponed_receipts_updated + + delayed_receipts_updated + + received_data_updated, + ); + let state_root = storage_mutator.commit(&shard_uid, fake_block_height)?; + fake_block_height += 1; + storage_mutator = make_storage_mutator(state_root)?; } } tracing::info!( ?shard_uid, + ref_keys_retrieved, records_parsed, records_not_parsed, accounts_implicit_updated, @@ -638,7 +651,17 @@ impl ForkNetworkCommand { if let Ok((key, _)) = item { if key[0] == col::ACCOUNT { num_accounts += 1; - let account_id = parse_account_id_from_account_key(&key).unwrap(); + let account_id = match parse_account_id_from_account_key(&key) { + Ok(account_id) => account_id, + Err(err) => { + tracing::error!( + ?err, + "Failed to parse account id {}", + hex::encode(&key) + ); + continue; + } + }; if has_full_key.contains(&account_id) { continue; } @@ -649,15 +672,16 @@ impl ForkNetworkCommand { )?; num_added += 1; if storage_mutator.should_commit(batch_size) { - let state_root = storage_mutator.commit(&shard_uid)?; - storage_mutator = make_storage_mutator(shard_id, state_root)?; + let state_root = storage_mutator.commit(&shard_uid, fake_block_height)?; + fake_block_height += 1; + storage_mutator = make_storage_mutator(state_root)?; } } } } tracing::info!(?shard_uid, num_accounts, num_added, "Pass 2 done"); - let state_root = storage_mutator.commit(&shard_uid)?; + let state_root = storage_mutator.commit(&shard_uid, fake_block_height)?; tracing::info!(?shard_uid, "Commit done"); Ok(state_root) @@ -669,6 +693,7 @@ impl ForkNetworkCommand { all_shard_uids: &[ShardUId], store: Store, prev_state_roots: &[StateRoot], + block_height: BlockHeight, make_storage_mutator: MakeSingleShardStorageMutatorFn, ) -> anyhow::Result> { let state_roots = all_shard_uids @@ -680,6 +705,7 @@ impl ForkNetworkCommand { *shard_uid, store.clone(), prev_state_roots[shard_uid.shard_id as usize], + block_height, make_storage_mutator.clone(), ) .unwrap(); diff --git a/tools/fork-network/src/single_shard_storage_mutator.rs b/tools/fork-network/src/single_shard_storage_mutator.rs index 01b1752a084..9c2368b3237 100644 --- a/tools/fork-network/src/single_shard_storage_mutator.rs +++ b/tools/fork-network/src/single_shard_storage_mutator.rs @@ -7,33 +7,31 @@ use near_primitives::receipt::Receipt; use near_primitives::shard_layout::ShardUId; use near_primitives::trie_key::TrieKey; use near_primitives::types::{AccountId, StateRoot}; -use near_primitives::types::{ShardId, StoreKey, StoreValue}; -use near_store::{flat::FlatStateChanges, DBCol, ShardTries, TrieUpdate}; +use near_primitives::types::{StoreKey, StoreValue}; +use near_store::{flat::FlatStateChanges, DBCol, ShardTries}; use nearcore::NightshadeRuntime; /// Object that updates the existing state. Combines all changes, commits them /// and returns new state roots. pub(crate) struct SingleShardStorageMutator { - trie_update: TrieUpdate, + updates: Vec<(Vec, Option>)>, + state_root: StateRoot, shard_tries: ShardTries, - num_changes: u64, } impl SingleShardStorageMutator { - pub(crate) fn new( - shard_id: ShardId, - runtime: &NightshadeRuntime, - prev_block_hash: CryptoHash, - state_root: StateRoot, - ) -> anyhow::Result { - let trie = runtime.get_trie_for_shard(shard_id, &prev_block_hash, state_root, false)?; - let trie_update = TrieUpdate::new(trie); - Ok(Self { trie_update, shard_tries: runtime.get_tries(), num_changes: 0 }) + pub(crate) fn new(runtime: &NightshadeRuntime, state_root: StateRoot) -> anyhow::Result { + Ok(Self { updates: Vec::new(), state_root, shard_tries: runtime.get_tries() }) } - fn trie_update(&mut self) -> &mut TrieUpdate { - self.num_changes += 1; - &mut self.trie_update + fn set(&mut self, key: TrieKey, value: Vec) -> anyhow::Result<()> { + self.updates.push((key.to_vec(), Some(value))); + Ok(()) + } + + fn remove(&mut self, key: TrieKey) -> anyhow::Result<()> { + self.updates.push((key.to_vec(), None)); + Ok(()) } pub(crate) fn set_account( @@ -41,13 +39,11 @@ impl SingleShardStorageMutator { account_id: AccountId, value: Account, ) -> anyhow::Result<()> { - self.trie_update().set(TrieKey::Account { account_id }, borsh::to_vec(&value)?); - Ok(()) + self.set(TrieKey::Account { account_id }, borsh::to_vec(&value)?) } pub(crate) fn delete_account(&mut self, account_id: AccountId) -> anyhow::Result<()> { - self.trie_update().remove(TrieKey::Account { account_id }); - Ok(()) + self.remove(TrieKey::Account { account_id }) } pub(crate) fn set_access_key( @@ -56,9 +52,7 @@ impl SingleShardStorageMutator { public_key: PublicKey, access_key: AccessKey, ) -> anyhow::Result<()> { - self.trie_update() - .set(TrieKey::AccessKey { account_id, public_key }, borsh::to_vec(&access_key)?); - Ok(()) + self.set(TrieKey::AccessKey { account_id, public_key }, borsh::to_vec(&access_key)?) } pub(crate) fn delete_access_key( @@ -66,8 +60,7 @@ impl SingleShardStorageMutator { account_id: AccountId, public_key: PublicKey, ) -> anyhow::Result<()> { - self.trie_update().remove(TrieKey::AccessKey { account_id, public_key }); - Ok(()) + self.remove(TrieKey::AccessKey { account_id, public_key }) } pub(crate) fn set_data( @@ -76,11 +69,10 @@ impl SingleShardStorageMutator { data_key: &StoreKey, value: StoreValue, ) -> anyhow::Result<()> { - self.trie_update().set( + self.set( TrieKey::ContractData { account_id, key: data_key.to_vec() }, borsh::to_vec(&value)?, - ); - Ok(()) + ) } pub(crate) fn delete_data( @@ -88,37 +80,32 @@ impl SingleShardStorageMutator { account_id: AccountId, data_key: &StoreKey, ) -> anyhow::Result<()> { - self.trie_update().remove(TrieKey::ContractData { account_id, key: data_key.to_vec() }); - Ok(()) + self.remove(TrieKey::ContractData { account_id, key: data_key.to_vec() }) } pub(crate) fn set_code(&mut self, account_id: AccountId, value: Vec) -> anyhow::Result<()> { - self.trie_update().set(TrieKey::ContractCode { account_id }, value); - Ok(()) + self.set(TrieKey::ContractCode { account_id }, value) } pub(crate) fn delete_code(&mut self, account_id: AccountId) -> anyhow::Result<()> { - self.trie_update().remove(TrieKey::ContractCode { account_id }); - Ok(()) + self.remove(TrieKey::ContractCode { account_id }) } pub(crate) fn set_postponed_receipt(&mut self, receipt: &Receipt) -> anyhow::Result<()> { - self.trie_update().set( + self.set( TrieKey::PostponedReceipt { receiver_id: receipt.receiver_id.clone(), receipt_id: receipt.receipt_id, }, borsh::to_vec(&receipt)?, - ); - Ok(()) + ) } pub(crate) fn delete_postponed_receipt(&mut self, receipt: Box) -> anyhow::Result<()> { - self.trie_update().remove(TrieKey::PostponedReceipt { + self.remove(TrieKey::PostponedReceipt { receiver_id: receipt.receiver_id, receipt_id: receipt.receipt_id, - }); - Ok(()) + }) } pub(crate) fn set_received_data( @@ -127,9 +114,7 @@ impl SingleShardStorageMutator { data_id: CryptoHash, data: &Option>, ) -> anyhow::Result<()> { - self.trie_update() - .set(TrieKey::ReceivedData { receiver_id: account_id, data_id }, borsh::to_vec(data)?); - Ok(()) + self.set(TrieKey::ReceivedData { receiver_id: account_id, data_id }, borsh::to_vec(data)?) } pub(crate) fn delete_received_data( @@ -137,8 +122,7 @@ impl SingleShardStorageMutator { account_id: AccountId, data_id: CryptoHash, ) -> anyhow::Result<()> { - self.trie_update().remove(TrieKey::ReceivedData { receiver_id: account_id, data_id }); - Ok(()) + self.remove(TrieKey::ReceivedData { receiver_id: account_id, data_id }) } pub(crate) fn set_delayed_receipt( @@ -146,28 +130,46 @@ impl SingleShardStorageMutator { index: u64, receipt: &Receipt, ) -> anyhow::Result<()> { - self.trie_update().set(TrieKey::DelayedReceipt { index }, borsh::to_vec(receipt)?); - Ok(()) + self.set(TrieKey::DelayedReceipt { index }, borsh::to_vec(receipt)?) } pub(crate) fn delete_delayed_receipt(&mut self, index: u64) -> anyhow::Result<()> { - self.trie_update().remove(TrieKey::DelayedReceipt { index }); - Ok(()) + self.remove(TrieKey::DelayedReceipt { index }) } pub(crate) fn should_commit(&self, batch_size: u64) -> bool { - self.num_changes >= batch_size - } - - pub(crate) fn commit(mut self, shard_uid: &ShardUId) -> anyhow::Result { - tracing::info!(?shard_uid, num_changes = ?self.num_changes, "commit"); + self.updates.len() >= batch_size as usize + } + + /// The fake block height is used to allow memtries to garbage collect. + /// Otherwise it would take significantly more memory holding old nodes. + pub(crate) fn commit( + self, + shard_uid: &ShardUId, + fake_block_height: u64, + ) -> anyhow::Result { + let num_updates = self.updates.len(); + tracing::info!(?shard_uid, num_updates, "commit"); let mut update = self.shard_tries.store_update(); - self.trie_update.commit(near_primitives::types::StateChangeCause::Migration); - let (_, trie_updates, raw_changes) = self.trie_update.finalize()?; - let state_root = self.shard_tries.apply_all(&trie_updates, *shard_uid, &mut update); - let flat_state_changes = FlatStateChanges::from_state_changes(&raw_changes); + let flat_state_changes = FlatStateChanges::from_raw_key_value(&self.updates); flat_state_changes.apply_to_flat_state(&mut update, *shard_uid); - tracing::info!(?shard_uid, num_changes = ?self.num_changes, "committing"); + + let trie_changes = self + .shard_tries + .get_trie_for_shard(*shard_uid, self.state_root) + .update(self.updates)?; + tracing::info!( + ?shard_uid, + num_trie_node_insertions = trie_changes.insertions().len(), + num_trie_node_deletions = trie_changes.deletions().len() + ); + let state_root = self.shard_tries.apply_all(&trie_changes, *shard_uid, &mut update); + self.shard_tries.apply_memtrie_changes(&trie_changes, *shard_uid, fake_block_height); + // We may not have loaded memtries (some commands don't need to), so check. + if let Some(mem_tries) = self.shard_tries.get_mem_tries(*shard_uid) { + mem_tries.write().unwrap().delete_until_height(fake_block_height - 1); + } + tracing::info!(?shard_uid, num_updates, "committing"); update.set_ser( DBCol::Misc, format!("FORK_TOOL_SHARD_ID:{}", shard_uid.shard_id).as_bytes(), diff --git a/tools/fork-network/src/storage_mutator.rs b/tools/fork-network/src/storage_mutator.rs index 743aad4aa63..00f292bf8dc 100644 --- a/tools/fork-network/src/storage_mutator.rs +++ b/tools/fork-network/src/storage_mutator.rs @@ -2,8 +2,7 @@ use crate::single_shard_storage_mutator::SingleShardStorageMutator; use near_crypto::PublicKey; use near_epoch_manager::EpochManagerAdapter; use near_primitives::account::{AccessKey, Account}; -use near_primitives::hash::CryptoHash; -use near_primitives::types::{AccountId, EpochId, ShardId, StateRoot}; +use near_primitives::types::{AccountId, EpochId, StateRoot}; use nearcore::NightshadeRuntime; use std::sync::Arc; @@ -20,20 +19,14 @@ impl StorageMutator { epoch_manager: Arc, runtime: &NightshadeRuntime, epoch_id: EpochId, - prev_block_hash: CryptoHash, state_roots: Vec, ) -> anyhow::Result { let shard_layout = epoch_manager.get_shard_layout(&epoch_id)?; assert_eq!(shard_layout.num_shards(), state_roots.len() as u64); let mut mutators = vec![]; - for (shard_id, state_root) in state_roots.iter().enumerate() { - mutators.push(SingleShardStorageMutator::new( - shard_id as ShardId, - runtime, - prev_block_hash, - *state_root, - )?); + for state_root in state_roots { + mutators.push(SingleShardStorageMutator::new(runtime, state_root)?); } Ok(Self { epoch_manager, epoch_id, mutators }) } @@ -68,7 +61,7 @@ impl StorageMutator { let all_shard_uids = shard_layout.get_shard_uids(); let mut state_roots = vec![]; for (mutator, shard_uid) in self.mutators.into_iter().zip(all_shard_uids.into_iter()) { - let state_root = mutator.commit(&shard_uid)?; + let state_root = mutator.commit(&shard_uid, 0)?; state_roots.push(state_root); } Ok(state_roots) From ee9c6ea1735887da1b30411935112c7f0e86c769 Mon Sep 17 00:00:00 2001 From: Andrei <122784628+andrei-near@users.noreply.github.com> Date: Thu, 16 Nov 2023 12:24:46 +0000 Subject: [PATCH 13/30] Standalone dockerfile (#10186) In this PR I split the neard release WF into 2 jobs: 1 for binary and second for docker image. In addition, I make the Dockerfile standalone so it can function independent of the binary build job. test run [here](https://github.com/near/nearcore/actions/runs/6889456250/job/18740477267) --- ...ard_linux_binary.yml => neard_release.yml} | 25 +++++++++---- Dockerfile | 35 ++++++++++++++++++- 2 files changed, 53 insertions(+), 7 deletions(-) rename .github/workflows/{neard_linux_binary.yml => neard_release.yml} (77%) diff --git a/.github/workflows/neard_linux_binary.yml b/.github/workflows/neard_release.yml similarity index 77% rename from .github/workflows/neard_linux_binary.yml rename to .github/workflows/neard_release.yml index dd02dc990db..d4fb54c44e8 100644 --- a/.github/workflows/neard_linux_binary.yml +++ b/.github/workflows/neard_release.yml @@ -1,4 +1,4 @@ -name: Neard Linux binary and Docker image release +name: Neard binary and Docker image release on: # Run when a new release or rc is created @@ -39,8 +39,6 @@ jobs: - name: Checkout nearcore repository if: ${{ github.event_name != 'workflow_dispatch'}} uses: actions/checkout@v4 - with: - ref: ${{ github.ref_name }} - name: Neard binary build and upload to S3 run: ./scripts/binary_release.sh @@ -51,23 +49,38 @@ jobs: BRANCH=$(git branch --show-current) aws s3 cp --acl public-read latest s3://build.nearprotocol.com/nearcore/$(uname)/${BRANCH}/latest + docker-release: + name: "Build and publish nearcore Docker image" + runs-on: "ubuntu-20.04-16core" + environment: deploy + steps: + - name: Checkout ${{ github.event.inputs.branch }} branch + if: ${{ github.event_name == 'workflow_dispatch'}} + uses: actions/checkout@v4 + with: + ref: ${{ github.event.inputs.branch }} + + - name: Checkout nearcore repository + if: ${{ github.event_name != 'workflow_dispatch'}} + uses: actions/checkout@v4 + - name: Login to Docker Hub uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKER_PAT_TOKEN }} - - name: Build and push Docker image to nearprotocol/nearcore + - name: Build and push Docker image to Dockerhub run: | COMMIT=$(git rev-parse HEAD) BRANCH=${{ github.ref_name }} - cp target/release/neard neard - docker build -t nearcore -f Dockerfile --progress=plain . + make docker-nearcore docker tag nearcore nearprotocol/nearcore:${BRANCH}-${COMMIT} docker tag nearcore nearprotocol/nearcore:${BRANCH} docker push nearprotocol/nearcore:${BRANCH}-${COMMIT} docker push nearprotocol/nearcore:${BRANCH} + if [[ ${BRANCH} == "master" ]]; then docker tag nearcore nearprotocol/nearcore:latest diff --git a/Dockerfile b/Dockerfile index df9e342468a..69ee815db26 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,3 +1,36 @@ +# syntax=docker/dockerfile-upstream:experimental + +FROM ubuntu:22.04 as build + +RUN apt-get update -qq && apt-get install -y \ + git \ + cmake \ + g++ \ + pkg-config \ + libssl-dev \ + curl \ + llvm \ + clang \ + && rm -rf /var/lib/apt/lists/* + +COPY ./rust-toolchain.toml /tmp/rust-toolchain.toml + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN curl https://sh.rustup.rs -sSf | \ + sh -s -- -y --no-modify-path --default-toolchain none + +VOLUME [ /near ] +WORKDIR /near +COPY . . + +ENV PORTABLE=ON +ARG make_target= +RUN make CARGO_TARGET_DIR=/tmp/target \ + "${make_target:?make_target not set}" + # Docker image FROM ubuntu:22.04 @@ -8,6 +41,6 @@ RUN apt-get update -qq && apt-get install -y \ && rm -rf /var/lib/apt/lists/* COPY scripts/run_docker.sh /usr/local/bin/run.sh -COPY neard /usr/local/bin/ +COPY --from=build /tmp/target/release/neard /usr/local/bin/ CMD ["/usr/local/bin/run.sh"] From bfb3b586fb3c41c7c8ef0a834428ebcbd39b1f64 Mon Sep 17 00:00:00 2001 From: Jan Ciolek <149345204+jancionear@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:21:13 +0100 Subject: [PATCH 14/30] feat: add a debug page with basic information about split store (#10182) Add a new debug page: `/debug/pages/split_store` This page provides basic information about the state of split store. An example report looks like this: ![image](https://github.com/near/nearcore/assets/149345204/2ec1cfc1-7e31-4a1d-a37c-e843fbf89c81) Fixes: https://github.com/near/nearcore/issues/9549 --- .../src/types/split_storage.rs | 15 +++++++++ chain/jsonrpc-primitives/src/types/status.rs | 4 ++- chain/jsonrpc/res/debug.html | 1 + chain/jsonrpc/res/split_store.html | 33 +++++++++++++++++++ chain/jsonrpc/src/lib.rs | 12 ++++++- 5 files changed, 63 insertions(+), 2 deletions(-) create mode 100644 chain/jsonrpc/res/split_store.html diff --git a/chain/jsonrpc-primitives/src/types/split_storage.rs b/chain/jsonrpc-primitives/src/types/split_storage.rs index 4aeed41a196..7399b00dba7 100644 --- a/chain/jsonrpc-primitives/src/types/split_storage.rs +++ b/chain/jsonrpc-primitives/src/types/split_storage.rs @@ -2,6 +2,8 @@ use near_primitives::views::SplitStorageInfoView; use serde::{Deserialize, Serialize}; use serde_json::Value; +use crate::types::status::RpcStatusError; + #[derive(Serialize, Deserialize, Debug)] pub struct RpcSplitStorageInfoRequest {} @@ -39,3 +41,16 @@ impl From for crate::errors::RpcError { Self::new_internal_or_handler_error(error_data, error_data_value) } } + +impl RpcSplitStorageInfoError { + // Implementing From for RpcStatusError causes cargo to spit out hundreds + // of lines of compilation errors. I don't want to spend time debugging this, so let's use this function instead. + // It's good enough. + pub fn into_rpc_status_error(self) -> RpcStatusError { + match self { + RpcSplitStorageInfoError::InternalError { error_message } => { + RpcStatusError::InternalError { error_message } + } + } + } +} diff --git a/chain/jsonrpc-primitives/src/types/status.rs b/chain/jsonrpc-primitives/src/types/status.rs index fe116a24aa5..c321da3f90c 100644 --- a/chain/jsonrpc-primitives/src/types/status.rs +++ b/chain/jsonrpc-primitives/src/types/status.rs @@ -5,7 +5,8 @@ use near_client_primitives::debug::{ #[cfg(feature = "debug_types")] use near_primitives::views::{ CatchupStatusView, ChainProcessingInfo, NetworkGraphView, NetworkRoutesView, PeerStoreView, - RecentOutboundConnectionsView, RequestedStatePartsView, SnapshotHostsView, SyncStatusView, + RecentOutboundConnectionsView, RequestedStatePartsView, SnapshotHostsView, + SplitStorageInfoView, SyncStatusView, }; #[derive(Debug, serde::Serialize, serde::Deserialize)] @@ -34,6 +35,7 @@ pub enum DebugStatusResponse { RecentOutboundConnections(RecentOutboundConnectionsView), Routes(NetworkRoutesView), SnapshotHosts(SnapshotHostsView), + SplitStoreStatus(SplitStorageInfoView), } #[cfg(feature = "debug_types")] diff --git a/chain/jsonrpc/res/debug.html b/chain/jsonrpc/res/debug.html index 48963b1883a..532c792606e 100644 --- a/chain/jsonrpc/res/debug.html +++ b/chain/jsonrpc/res/debug.html @@ -67,6 +67,7 @@

Chain & Chunk info

Sync info

Validator info

Client Config

+

Split Store

diff --git a/chain/jsonrpc/res/split_store.html b/chain/jsonrpc/res/split_store.html new file mode 100644 index 00000000000..2b671b33ddf --- /dev/null +++ b/chain/jsonrpc/res/split_store.html @@ -0,0 +1,33 @@ + + + + Split Store + + + +

+ Split Store +

+ +
    +
  • Head height:
  • +
  • Cold head height:
  • +
  • Final head height:
  • +
  • Hot db kind:
  • +
+ + + + + diff --git a/chain/jsonrpc/src/lib.rs b/chain/jsonrpc/src/lib.rs index f8d20bd96ba..9e1c60aa004 100644 --- a/chain/jsonrpc/src/lib.rs +++ b/chain/jsonrpc/src/lib.rs @@ -24,7 +24,9 @@ use near_jsonrpc_primitives::message::{Message, Request}; use near_jsonrpc_primitives::types::config::RpcProtocolConfigResponse; use near_jsonrpc_primitives::types::entity_debug::{EntityDebugHandler, EntityQuery}; use near_jsonrpc_primitives::types::query::RpcQueryRequest; -use near_jsonrpc_primitives::types::split_storage::RpcSplitStorageInfoResponse; +use near_jsonrpc_primitives::types::split_storage::{ + RpcSplitStorageInfoRequest, RpcSplitStorageInfoResponse, +}; use near_jsonrpc_primitives::types::transactions::{ RpcSendTransactionRequest, RpcTransactionResponse, }; @@ -771,6 +773,13 @@ impl JsonRpcHandler { .peer_manager_send(near_network::debug::GetDebugStatus::SnapshotHosts) .await? .rpc_into(), + "/debug/api/split_store_info" => { + let split_storage_info: RpcSplitStorageInfoResponse = self + .split_storage_info(RpcSplitStorageInfoRequest {}) + .await + .map_err(|e| e.into_rpc_status_error())?; + near_jsonrpc_primitives::types::status::DebugStatusResponse::SplitStoreStatus(split_storage_info.result) + } _ => return Ok(None), }; Ok(Some(near_jsonrpc_primitives::types::status::RpcDebugStatusResponse { @@ -1444,6 +1453,7 @@ async fn display_debug_html( "sync.css" => Some(debug_page_string!("sync.css", handler)), "validator" => Some(debug_page_string!("validator.html", handler)), "validator.css" => Some(debug_page_string!("validator.css", handler)), + "split_store" => Some(debug_page_string!("split_store.html", handler)), _ => None, }; From 9a7b03a689f28d724d19ef67b7d9dd5030200ee3 Mon Sep 17 00:00:00 2001 From: Jan Ciolek <149345204+jancionear@users.noreply.github.com> Date: Thu, 16 Nov 2023 16:28:14 +0100 Subject: [PATCH 15/30] feat: add error type label to the `near_num_invalid_blocks` metric (#10164) The metric `near_num_invalid_blocks` counts the number of invalid blocks processed by neard. Up until now there was no information why the blocks are invalid. Let's add a label that describes what kind of error caused the block to be invalid. This will make it easier to diagnose what's wrong when there are lots of invalid blocks. The label is called "error", an example prometheus report looks like this: ``` near_num_invalid_blocks{error="chunks_missing"} 1234 ``` Fixes: https://github.com/near/nearcore/issues/9661 --- chain/chain-primitives/src/error.rs | 64 +++++++++++++++++++++++++++++ chain/chain/src/chain.rs | 2 +- chain/chain/src/metrics.rs | 5 ++- 3 files changed, 68 insertions(+), 3 deletions(-) diff --git a/chain/chain-primitives/src/error.rs b/chain/chain-primitives/src/error.rs index dbcac2f4ee9..069ca56f5a2 100644 --- a/chain/chain-primitives/src/error.rs +++ b/chain/chain-primitives/src/error.rs @@ -304,6 +304,70 @@ impl Error { _ => false, } } + + /// Some blockchain errors are reported in the prometheus metrics. In such cases a report might + /// contain a label that specifies the type of error that has occured. For example when the node + /// receives a block with an invalid signature this would be reported as: + /// `near_num_invalid_blocks{error="invalid_signature"}`. + /// This function returns the value of the error label for a specific instance of Error. + pub fn prometheus_label_value(&self) -> &'static str { + match self { + Error::BlockKnown(_) => "block_known", + Error::TooManyProcessingBlocks => "too_many_processing_blocks", + Error::Orphan => "orphan", + Error::ChunkMissing(_) => "chunk_missing", + Error::ChunksMissing(_) => "chunks_missing", + Error::InvalidChunkHeight => "invalid_chunk_height", + Error::IOErr(_) => "io_err", + Error::Other(_) => "other", + Error::ValidatorError(_) => "validator_error", + Error::EpochOutOfBounds(_) => "epoch_out_of_bounds", + Error::ChallengedBlockOnChain => "challenged_block_on_chain", + Error::CannotBeFinalized => "cannot_be_finalized", + Error::StorageError(_) => "storage_error", + Error::GCError(_) => "gc_error", + Error::DBNotFoundErr(_) => "db_not_found_err", + Error::InvalidBlockPastTime(_, _) => "invalid_block_past_time", + Error::InvalidBlockFutureTime(_) => "invalid_block_future_time", + Error::InvalidBlockHeight(_) => "invalid_block_height", + Error::InvalidBlockProposer => "invalid_block_proposer", + Error::InvalidChunk => "invalid_chunk", + Error::InvalidChunkProofs(_) => "invalid_chunk_proofs", + Error::InvalidChunkState(_) => "invalid_chunk_state", + Error::InvalidChunkMask => "invalid_chunk_mask", + Error::InvalidStateRoot => "invalid_state_root", + Error::InvalidTxRoot => "invalid_tx_root", + Error::InvalidChunkReceiptsRoot => "invalid_chunk_receipts_root", + Error::InvalidOutcomesProof => "invalid_outcomes_proof", + Error::InvalidChunkHeadersRoot => "invalid_chunk_headers_root", + Error::InvalidChunkTxRoot => "invalid_chunk_tx_root", + Error::InvalidReceiptsProof => "invalid_receipts_proof", + Error::InvalidStatePayload => "invalid_state_payload", + Error::InvalidTransactions => "invalid_transactions", + Error::InvalidChallenge => "invalid_challenge", + Error::InvalidSplitShardsIds(_, _) => "invalid_split_shard_ids", + Error::MaliciousChallenge => "malicious_challenge", + Error::IncorrectNumberOfChunkHeaders => "incorrect_number_of_chunk_headers", + Error::InvalidEpochHash => "invalid_epoch_hash", + Error::InvalidNextBPHash => "invalid_next_bp_hash", + Error::NotEnoughApprovals => "not_enough_approvals", + Error::InvalidFinalityInfo => "invalid_finality_info", + Error::InvalidValidatorProposals => "invalid_validator_proposals", + Error::InvalidSignature => "invalid_signature", + Error::InvalidApprovals => "invalid_approvals", + Error::InvalidGasLimit => "invalid_gas_limit", + Error::InvalidGasPrice => "invalid_gas_price", + Error::InvalidGasUsed => "invalid_gas_used", + Error::InvalidBalanceBurnt => "invalid_balance_burnt", + Error::InvalidShardId(_) => "invalid_shard_id", + Error::InvalidStateRequest(_) => "invalid_state_request", + Error::InvalidRandomnessBeaconOutput => "invalid_randomness_beacon_output", + Error::InvalidBlockMerkleRoot => "invalid_block_merkele_root", + Error::InvalidProtocolVersion => "invalid_protocol_version", + Error::NotAValidator => "not_a_validator", + Error::InvalidChallengeRoot => "invalid_challenge_root", + } + } } impl From for Error { diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index f1c5d7157ec..e0b364a2142 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -1095,7 +1095,7 @@ impl Chain { } fn maybe_mark_block_invalid(&mut self, block_hash: CryptoHash, error: &Error) { - metrics::NUM_INVALID_BLOCKS.inc(); + metrics::NUM_INVALID_BLOCKS.with_label_values(&[error.prometheus_label_value()]).inc(); // We only mark the block as invalid if the block has bad data (not for other errors that would // not be the fault of the block itself), except when the block has a bad signature which means // the block might not have been what the block producer originally produced. Either way, it's diff --git a/chain/chain/src/metrics.rs b/chain/chain/src/metrics.rs index b3163bde6a8..724829e9e2e 100644 --- a/chain/chain/src/metrics.rs +++ b/chain/chain/src/metrics.rs @@ -102,8 +102,9 @@ pub static STATE_PART_ELAPSED: Lazy = Lazy::new(|| { ) .unwrap() }); -pub static NUM_INVALID_BLOCKS: Lazy = Lazy::new(|| { - try_create_int_gauge("near_num_invalid_blocks", "Number of invalid blocks").unwrap() +pub static NUM_INVALID_BLOCKS: Lazy = Lazy::new(|| { + try_create_int_gauge_vec("near_num_invalid_blocks", "Number of invalid blocks", &["error"]) + .unwrap() }); pub(crate) static SCHEDULED_CATCHUP_BLOCK: Lazy = Lazy::new(|| { try_create_int_gauge( From 811635f820d1636f82c3d15482f617c89eada996 Mon Sep 17 00:00:00 2001 From: ecp88 <109925246+ecp88@users.noreply.github.com> Date: Thu, 16 Nov 2023 12:07:54 -0600 Subject: [PATCH 16/30] [Snyk] Upgrade @types/react-dom from 18.2.12 to 18.2.14 (#10190) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit

This PR was automatically created by Snyk using the credentials of a real user.


Snyk has created this PR to upgrade @types/react-dom from 18.2.12 to 18.2.14.

:information_source: Keep your dependencies up-to-date. This makes it easier to fix existing vulnerabilities and to more quickly identify and fix newly disclosed vulnerabilities when they affect your project.
- The recommended version is **2 versions** ahead of your current version. - The recommended version was released **a month ago**, on 2023-10-18.
Release notes
Package name: @types/react-dom
  • 18.2.14 - 2023-10-18
  • 18.2.13 - 2023-10-10
  • 18.2.12 - 2023-10-09
from @types/react-dom GitHub release notes

**Note:** *You are seeing this because you or someone else with access to this repository has authorized Snyk to open upgrade PRs.* For more information: 🧐 [View latest project report](https://app.snyk.io/org/ecp88/project/98480bdc-d80b-4fd1-89d7-c4c56a706763?utm_source=github&utm_medium=referral&page=upgrade-pr) 🛠 [Adjust upgrade PR settings](https://app.snyk.io/org/ecp88/project/98480bdc-d80b-4fd1-89d7-c4c56a706763/settings/integration?utm_source=github&utm_medium=referral&page=upgrade-pr) 🔕 [Ignore this dependency or unsubscribe from future upgrade PRs](https://app.snyk.io/org/ecp88/project/98480bdc-d80b-4fd1-89d7-c4c56a706763/settings/integration?pkg=@types/react-dom&utm_source=github&utm_medium=referral&page=upgrade-pr#auto-dep-upgrades) Co-authored-by: snyk-bot --- tools/debug-ui/package-lock.json | 14 +++++++------- tools/debug-ui/package.json | 2 +- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/debug-ui/package-lock.json b/tools/debug-ui/package-lock.json index 9e86a861be6..9605a0241e3 100644 --- a/tools/debug-ui/package-lock.json +++ b/tools/debug-ui/package-lock.json @@ -11,7 +11,7 @@ "@patternfly/react-log-viewer": "^4.87.101", "@types/node": "^16.18.3", "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.12", + "@types/react-dom": "^18.2.14", "react": "^18.2.0", "react-dom": "^18.2.0", "react-query": "^3.39.3", @@ -3954,9 +3954,9 @@ } }, "node_modules/@types/react-dom": { - "version": "18.2.12", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.12.tgz", - "integrity": "sha512-QWZuiA/7J/hPIGocXreCRbx7wyoeet9ooxfbSA+zbIWqyQEE7GMtRn4A37BdYyksnN+/NDnWgfxZH9UVGDw1hg==", + "version": "18.2.14", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.14.tgz", + "integrity": "sha512-V835xgdSVmyQmI1KLV2BEIUgqEuinxp9O4G6g3FqO/SqLac049E53aysv0oEFD2kHfejeKU+ZqL2bcFWj9gLAQ==", "dependencies": { "@types/react": "*" } @@ -21011,9 +21011,9 @@ } }, "@types/react-dom": { - "version": "18.2.12", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.12.tgz", - "integrity": "sha512-QWZuiA/7J/hPIGocXreCRbx7wyoeet9ooxfbSA+zbIWqyQEE7GMtRn4A37BdYyksnN+/NDnWgfxZH9UVGDw1hg==", + "version": "18.2.14", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.14.tgz", + "integrity": "sha512-V835xgdSVmyQmI1KLV2BEIUgqEuinxp9O4G6g3FqO/SqLac049E53aysv0oEFD2kHfejeKU+ZqL2bcFWj9gLAQ==", "requires": { "@types/react": "*" } diff --git a/tools/debug-ui/package.json b/tools/debug-ui/package.json index 92c4aa7e54f..a138c151ce9 100644 --- a/tools/debug-ui/package.json +++ b/tools/debug-ui/package.json @@ -6,7 +6,7 @@ "@patternfly/react-log-viewer": "^4.87.101", "@types/node": "^16.18.3", "@types/react": "^18.2.25", - "@types/react-dom": "^18.2.12", + "@types/react-dom": "^18.2.14", "react": "^18.2.0", "react-dom": "^18.2.0", "react-query": "^3.39.3", From e39b82a2fb31fc4c84100834bd83642225e8f21e Mon Sep 17 00:00:00 2001 From: Ekleog-NEAR <96595974+Ekleog-NEAR@users.noreply.github.com> Date: Fri, 17 Nov 2023 12:42:39 +0100 Subject: [PATCH 17/30] bump wasmtime to 14.0.4 (#10189) --- Cargo.lock | 348 +++++++++++++++++++++++++++++++++++------------------ Cargo.toml | 2 +- deny.toml | 26 ++-- 3 files changed, 240 insertions(+), 136 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b50052585dc..7e033e99a74 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -259,7 +259,16 @@ version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a76fd60b23679b7d19bd066031410fb7e458ccc5e958eb5c325888ce4baedc97" dependencies = [ - "gimli", + "gimli 0.27.2", +] + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli 0.28.0", ] [[package]] @@ -429,7 +438,7 @@ checksum = "0e97ce7de6cf12de5d7226c73f5ba9811622f4db3a5b91b55c53e987e5f91cba" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -451,7 +460,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -462,7 +471,7 @@ checksum = "b9ccdd8f2a161be9bd5c023df56f1b2a0bd1d83872ae53b71a84a12c9bf6e842" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -564,12 +573,12 @@ version = "0.3.67" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "233d376d6d185f2a3093e58f283f60f880315b6c60075b01f36b3b85154564ca" dependencies = [ - "addr2line", + "addr2line 0.19.0", "cc", "cfg-if 1.0.0", "libc", "miniz_oxide 0.6.2", - "object", + "object 0.30.4", "rustc-demangle", ] @@ -633,7 +642,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -644,9 +653,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.3.1" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6776fc96284a0bb647b615056fc496d1fe1644a7ab01829818a6d91cae888b84" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" [[package]] name = "bitmaps" @@ -850,7 +859,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", "syn_derive", ] @@ -1162,7 +1171,7 @@ dependencies = [ "heck 0.4.0", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -1328,18 +1337,18 @@ dependencies = [ [[package]] name = "cranelift-bforest" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c064a534a914eb6709d198525321a386dad50627aecfaf64053f369993a3e5a" +checksum = "2b5bb9245ec7dcc04d03110e538d31f0969d301c9d673145f4b4d5c3478539a3" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "619ed4d24acef0bd58b16a1be39077c0b36c65782e6c933892439af5e799110e" +checksum = "ebb18d10e5ddac43ba4ca8fd4e310938569c3e484cc01b6372b27dc5bb4dfd28" dependencies = [ "bumpalo", "cranelift-bforest", @@ -1348,8 +1357,8 @@ dependencies = [ "cranelift-control", "cranelift-entity", "cranelift-isle", - "gimli", - "hashbrown 0.13.2", + "gimli 0.28.0", + "hashbrown 0.14.0", "log", "regalloc2", "smallvec", @@ -1358,42 +1367,43 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c777ce22678ae1869f990b2f31e0cd7ca109049213bfc0baf3e2205a18b21ebb" +checksum = "7a3ce6d22982c1b9b6b012654258bab1a13947bb12703518bef06b1a4867c3d6" dependencies = [ "cranelift-codegen-shared", ] [[package]] name = "cranelift-codegen-shared" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb65884d17a1fa55990dd851c43c140afb4c06c3312cf42cfa1222c3b23f9561" +checksum = "47220fd4f9a0ce23541652b6f16f83868d282602c600d14934b2a4c166b4bd80" [[package]] name = "cranelift-control" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a0cea8abc90934d0a7ee189a29fd35fecd5c40f59ae7e6aab1805e8ab1a535e" +checksum = "ed5a4c42672aea9b6e820046b52e47a1c05d3394a6cdf4cb3c3c4b702f954bd2" dependencies = [ "arbitrary", ] [[package]] name = "cranelift-entity" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2e50bebc05f2401a1320169314b62f91ad811ef20163cac00151d78e0684d4c" +checksum = "0b4e9a3296fc827f9d35135dc2c0c8dd8d8359eb1ef904bae2d55d5bcb0c9f94" dependencies = [ "serde", + "serde_derive", ] [[package]] name = "cranelift-frontend" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b82ccfe704d53f669791399d417928410785132d809ec46f5e2ce069e9d17c8" +checksum = "33ec537d0f0b8e084517f3e7bfa1d89af343d7c7df455573fca9f272d4e01267" dependencies = [ "cranelift-codegen", "log", @@ -1403,15 +1413,15 @@ dependencies = [ [[package]] name = "cranelift-isle" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2515d8e7836f9198b160b2c80aaa1f586d7749d57d6065af86223fb65b7e2c3" +checksum = "45bab6d69919d210a50331d35cc6ce111567bc040aebac63a8ae130d0400a075" [[package]] name = "cranelift-native" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcb47ffdcdac7e9fed6e4a618939773a4dc4a412fa7da9e701ae667431a10af3" +checksum = "f32e81605f352cf37af5463f11cd7deec7b6572741931a8d372f7fdd4a744f5d" dependencies = [ "cranelift-codegen", "libc", @@ -1420,9 +1430,9 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.96.3" +version = "0.101.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852390f92c3eaa457e42be44d174ff5abbbcd10062d5963bda8ffb2505e73a71" +checksum = "0edaa4cbec1bc787395c074233df2652dd62f3e29d3ee60329514a0a51e6b045" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -1430,7 +1440,7 @@ dependencies = [ "itertools", "log", "smallvec", - "wasmparser 0.103.0", + "wasmparser 0.115.0", "wasmtime-types", ] @@ -1681,7 +1691,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -1698,7 +1708,7 @@ checksum = "2345488264226bf682893e25de0769f3360aac9957980ec49361b083ddaa5bc5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -1722,7 +1732,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -1733,7 +1743,7 @@ checksum = "29a358ff9f12ec09c3e61fef9b5a9902623a695a46a917b07f269bff1445611a" dependencies = [ "darling_core", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -1764,7 +1774,7 @@ checksum = "53e0efad4403bfc52dc201159c4b842a246a14b98c64b55dfd0f2d89729dfeb8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -2003,7 +2013,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -2077,6 +2087,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + [[package]] name = "fallible-streaming-iterator" version = "0.1.9" @@ -2303,7 +2319,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.4.1", "debugid", "fxhash", "serde", @@ -2392,9 +2408,15 @@ name = "gimli" version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ad0a93d233ebf96623465aad4046a8d3aa4da22d4f4beba5388838c8a434bbb4" + +[[package]] +name = "gimli" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" dependencies = [ - "fallible-iterator", - "indexmap 1.9.2", + "fallible-iterator 0.3.0", + "indexmap 2.0.0", "stable_deref_trait", ] @@ -2461,6 +2483,9 @@ name = "hashbrown" version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a" +dependencies = [ + "ahash 0.8.3", +] [[package]] name = "hashlink" @@ -2738,6 +2763,7 @@ checksum = "d5477fe2230a79769d8dc68e0eabf5437907c0457a5614a9e8dddb67f65eb65d" dependencies = [ "equivalent", "hashbrown 0.14.0", + "serde", ] [[package]] @@ -2960,9 +2986,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.146" +version = "0.2.150" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92be4933c13fd498862a9e02a3055f8a8d9c039ce33db97306fd5a6caa7f29b" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" [[package]] name = "libfuzzer-sys" @@ -3051,6 +3077,12 @@ version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" +[[package]] +name = "linux-raw-sys" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" + [[package]] name = "local-channel" version = "0.1.3" @@ -3249,6 +3281,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "memoffset" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +dependencies = [ + "autocfg", +] + [[package]] name = "mime" version = "0.3.16" @@ -4083,7 +4124,7 @@ name = "near-performance-metrics-macros" version = "0.0.0" dependencies = [ "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -4219,7 +4260,7 @@ dependencies = [ "quote", "serde", "serde_json", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -4230,7 +4271,7 @@ dependencies = [ "near-rpc-error-core", "serde", "serde_json", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -4420,7 +4461,7 @@ dependencies = [ "pretty_assertions", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", "trybuild", ] @@ -4868,10 +4909,19 @@ name = "object" version = "0.30.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03b4680b86d9cfafba8fc491dc9b6df26b68cf40e9e6cd73909194759a63c385" +dependencies = [ + "memchr", +] + +[[package]] +name = "object" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" dependencies = [ "crc32fast", - "hashbrown 0.13.2", - "indexmap 1.9.2", + "hashbrown 0.14.0", + "indexmap 2.0.0", "memchr", ] @@ -4899,7 +4949,7 @@ version = "0.10.57" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bac25ee399abb46215765b1cb35bc0212377e58a061560d8b29b024fd0430e7c" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.4.1", "cfg-if 1.0.0", "foreign-types", "libc", @@ -4916,7 +4966,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -5342,7 +5392,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ceca8aaf45b5c46ec7ed39fff75f57290368c1846d33d24a122ca81416ab058" dependencies = [ "proc-macro2", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -5580,9 +5630,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.26" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4424af4bf778aae2051a77b60283332f386554255d722233d09fbfc7e30da2fc" +checksum = "5907a1b7c277254a8b15170f6e7c97cfa60ee7872a3217663bb81151e48184bb" dependencies = [ "proc-macro2", ] @@ -5766,9 +5816,9 @@ dependencies = [ [[package]] name = "regalloc2" -version = "0.8.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4a52e724646c6c0800fc456ec43b4165d2f91fba88ceaca06d9e0b400023478" +checksum = "ad156d539c879b7a24a363a2016d77961786e71f48f2e2fc8302a92abd2429a6" dependencies = [ "hashbrown 0.13.2", "log", @@ -6034,9 +6084,9 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "549b9d036d571d42e6e85d1c1425e2ac83491075078ca9a15be021c56b1641f2" dependencies = [ - "bitflags 2.3.1", + "bitflags 2.4.1", "chrono", - "fallible-iterator", + "fallible-iterator 0.2.0", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", @@ -6149,6 +6199,19 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "rustix" +version = "0.38.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ad981d6c340a49cdc40a1028d9c6084ec7e9fa33fcb839cab656a267071e234" +dependencies = [ + "bitflags 2.4.1", + "errno 0.3.1", + "libc", + "linux-raw-sys 0.4.11", + "windows-sys 0.48.0", +] + [[package]] name = "rustversion" version = "1.0.6" @@ -6285,9 +6348,9 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.160" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2f3770c8bce3bcda7e149193a069a0f4365bda1fa5cd88e03bca26afc1216c" +checksum = "bca2a08484b285dcb282d0f67b26cadc0df8b19f8c12502c13d966bf9482f001" dependencies = [ "serde_derive", ] @@ -6335,13 +6398,13 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.160" +version = "1.0.192" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291a097c63d8497e00160b166a967a4a79c64f3facdd01cbd7502231688d77df" +checksum = "d6c7207fbec9faa48073f3e3074cbe553af6ea512d7c21ba46e434e70ea9fbc1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -6413,7 +6476,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -6668,6 +6731,12 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" +[[package]] +name = "sptr" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -6826,9 +6895,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.15" +version = "2.0.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a34fcf3e8b60f57e6a14301a2e916d323af98b0ea63c599441eec8558660c822" +checksum = "239814284fd6f1a4ffe4ca893952cdd93c224b6a1571c9a9eadd670295c0c9e2" dependencies = [ "proc-macro2", "quote", @@ -6844,7 +6913,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -6961,22 +7030,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.32" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5f6586b7f764adc0231f4c79be7b920e766bb2f3e51b3661cdb263828f19994" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.32" +version = "1.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12bafc5b54507e0149cdf1b145a5d80ab80a90bcd9275df43d4fff68460f6c21" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" dependencies = [ "proc-macro2", "quote", - "syn 1.0.103", + "syn 2.0.32", ] [[package]] @@ -7110,7 +7179,7 @@ checksum = "630bdcf245f78637c13ec01ffae6187cca34625e8c63150d424b59e55af2675e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.15", + "syn 2.0.32", ] [[package]] @@ -7708,6 +7777,15 @@ dependencies = [ "leb128", ] +[[package]] +name = "wasm-encoder" +version = "0.35.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ca90ba1b5b0a70d3d49473c5579951f3bddc78d47b59256d2f9d4922b150aca" +dependencies = [ + "leb128", +] + [[package]] name = "wasm-smith" version = "0.10.0" @@ -7936,9 +8014,9 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.103.0" +version = "0.105.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c437373cac5ea84f1113d648d51f71751ffbe3d90c00ae67618cf20d0b5ee7b" +checksum = "83be9e0b3f9570dc1979a33ae7b89d032c73211564232b99976553e5c155ec32" dependencies = [ "indexmap 1.9.2", "url", @@ -7946,12 +8024,12 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.105.0" +version = "0.115.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83be9e0b3f9570dc1979a33ae7b89d032c73211564232b99976553e5c155ec32" +checksum = "e06c0641a4add879ba71ccb3a1e4278fd546f76f1eafb21d8f7b07733b547cd5" dependencies = [ - "indexmap 1.9.2", - "url", + "indexmap 2.0.0", + "semver 1.0.9", ] [[package]] @@ -7966,26 +8044,28 @@ dependencies = [ [[package]] name = "wasmtime" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa0f72886c3264eb639f50188d1eb98b975564130292fea8deb4facf91ca7258" +checksum = "ca54f6090ce46973f33a79f265924b204f248f91aec09229bce53d19d567c1a6" dependencies = [ "anyhow", "bincode", "bumpalo", "cfg-if 1.0.0", "fxprof-processed-profile", - "indexmap 1.9.2", + "indexmap 2.0.0", "libc", "log", - "object", + "object 0.32.1", "once_cell", "paste", "psm", "serde", + "serde_derive", "serde_json", "target-lexicon 0.12.3", - "wasmparser 0.103.0", + "wasm-encoder 0.35.0", + "wasmparser 0.115.0", "wasmtime-cranelift", "wasmtime-environ", "wasmtime-jit", @@ -7995,87 +8075,92 @@ dependencies = [ [[package]] name = "wasmtime-asm-macros" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18391ed41ca957eecdbe64c51879b75419cbc52e2d8663fe82945b28b4f19da" +checksum = "54984bc0b5689da87a43d7c181d23092b4d5cfcbb7ae3eb6b917dd55865d95e6" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "wasmtime-cranelift" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2495036d05eb1e79ecf22e092eeacd279dcf24b4fcab77fb4cf8ef9bd42c3ea" +checksum = "1cf3cee8be02f5006d21b773ffd6802f96a0b7d661ff2ad8a01fb93df458b1aa" dependencies = [ "anyhow", + "cfg-if 1.0.0", "cranelift-codegen", "cranelift-control", "cranelift-entity", "cranelift-frontend", "cranelift-native", "cranelift-wasm", - "gimli", + "gimli 0.28.0", "log", - "object", + "object 0.32.1", "target-lexicon 0.12.3", "thiserror", - "wasmparser 0.103.0", + "wasmparser 0.115.0", "wasmtime-cranelift-shared", "wasmtime-environ", + "wasmtime-versioned-export-macros", ] [[package]] name = "wasmtime-cranelift-shared" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef677f7b0d3f3b73275675486d791f1e85e7c24afe8dd367c6b9950028906330" +checksum = "420fd2a69bc162957f4c94f21c7fa08ecf60d916f4e87b56332507c555da381d" dependencies = [ "anyhow", "cranelift-codegen", "cranelift-control", "cranelift-native", - "gimli", - "object", + "gimli 0.28.0", + "object 0.32.1", "target-lexicon 0.12.3", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d03356374ffafa881c5f972529d2bb11ce48fe2736285e2b0ad72c6d554257b" +checksum = "fb6a445ce2b2810127caee6c1b79b8da4ae57712b05556a674592c18b7500a14" dependencies = [ "anyhow", "cranelift-entity", - "gimli", - "indexmap 1.9.2", + "gimli 0.28.0", + "indexmap 2.0.0", "log", - "object", + "object 0.32.1", "serde", + "serde_derive", "target-lexicon 0.12.3", "thiserror", - "wasmparser 0.103.0", + "wasmparser 0.115.0", "wasmtime-types", ] [[package]] name = "wasmtime-jit" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5374f0d2ee0069391dd9348f148802846b2b3e0af650385f9c56b3012d3c5d1" +checksum = "1f0f6586c61125fbfc13c3108c3dd565d21f314dd5bac823b9a5b7ab576d21f1" dependencies = [ - "addr2line", + "addr2line 0.21.0", "anyhow", "bincode", "cfg-if 1.0.0", "cpp_demangle", - "gimli", + "gimli 0.28.0", "log", - "object", + "object 0.32.1", "rustc-demangle", + "rustix 0.38.24", "serde", + "serde_derive", "target-lexicon 0.12.3", "wasmtime-environ", "wasmtime-jit-icache-coherence", @@ -8085,18 +8170,19 @@ dependencies = [ [[package]] name = "wasmtime-jit-debug" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "102653b177225bfdd2da41cc385965d4bf6bc10cf14ec7b306bc9b015fb01c22" +checksum = "109a9e46afe33580b952b14a4207354355f19bcdf0b47485b397b68409eaf553" dependencies = [ "once_cell", + "wasmtime-versioned-export-macros", ] [[package]] name = "wasmtime-jit-icache-coherence" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "374ff63b3eb41db57c56682a9ef7737d2c9efa801f5dbf9da93941c9dd436a06" +checksum = "f67e6be36375c39cff57ed3b137ab691afbf2d9ba8ee1c01f77888413f218749" dependencies = [ "cfg-if 1.0.0", "libc", @@ -8105,40 +8191,62 @@ dependencies = [ [[package]] name = "wasmtime-runtime" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b1b832f19099066ebd26e683121d331f12cf98f158eac0f889972854413b46f" +checksum = "1d07986b2327b5e7f535ed638fbde25990fc8f85400194fda0d26db71c7b685e" dependencies = [ "anyhow", "cc", "cfg-if 1.0.0", - "indexmap 1.9.2", + "indexmap 2.0.0", "libc", "log", "mach", "memfd", - "memoffset 0.8.0", + "memoffset 0.9.0", "paste", "rand 0.8.5", - "rustix 0.37.20", + "rustix 0.38.24", + "sptr", + "wasm-encoder 0.35.0", "wasmtime-asm-macros", "wasmtime-environ", "wasmtime-jit-debug", + "wasmtime-versioned-export-macros", + "wasmtime-wmemcheck", "windows-sys 0.48.0", ] [[package]] name = "wasmtime-types" -version = "9.0.3" +version = "14.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c574221440e05bbb04efa09786d049401be2eb10081ecf43eb72fbd637bd12f" +checksum = "e810a0d2e869abd1cb42bd232990f6bd211672b3d202d2ae7e70ffb97ed70ea3" dependencies = [ "cranelift-entity", "serde", + "serde_derive", "thiserror", - "wasmparser 0.103.0", + "wasmparser 0.115.0", ] +[[package]] +name = "wasmtime-versioned-export-macros" +version = "14.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09b5575a75e711ca6c36bb9ad647c93541cdc8e34218031acba5da3f35919dd3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.32", +] + +[[package]] +name = "wasmtime-wmemcheck" +version = "14.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dafab2db172a53e23940e0fa3078c202f567ee5f13f4b42f66b694fab43c658" + [[package]] name = "wast" version = "40.0.0" diff --git a/Cargo.toml b/Cargo.toml index 683aa306db9..45e2829995a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -338,7 +338,7 @@ wasmer-vm = { package = "wasmer-vm-near", version = "=2.4.1" } wasmparser = "0.78" # TODO: unify at least the versions of wasmparser we have in our codebase wasmprinter = "0.2" wasm-smith = "0.10" -wasmtime = { version = "9.0.3", default-features = false, features = ["cranelift"] } +wasmtime = { version = "14.0.4", default-features = false, features = ["cranelift"] } wast = "40.0" wat = "1.0.40" webrtc-util = "0.7" diff --git a/deny.toml b/deny.toml index 7283057c457..bb49890cb27 100644 --- a/deny.toml +++ b/deny.toml @@ -37,12 +37,6 @@ deny = [ ] skip = [ - { name = "clap", version = "=2.34.0" }, - # criterion uses clap=2.34.0 which relies on an older textwrap - { name = "textwrap", version = "=0.11.0" }, - - # wasmer 0.17 and wasmtime 0.17 use conflicting versions of those - { name = "wasmparser", version = "=0.51.4" }, # wasmer 0.17 and wasmtime 0.17 uses older versions of some crates { name = "generic-array", version = "=0.12.4" }, @@ -54,8 +48,6 @@ skip = [ { name = "digest", version = "=0.9.0" }, { name = "sha2", version = "=0.9.9" }, - # near-vm-runner and wasmer-compiler-near use 0.78.2 - { name = "wasmparser", version = "=0.78.2" }, # Need this specific version of pwasm-utils for backwards-compatible # stack limiting. { name = "pwasm-utils", version = "=0.12.0" }, @@ -66,12 +58,18 @@ skip = [ # wasmer and wasmtime { name = "target-lexicon", version = "=0.10.0" }, + { name = "wasmparser", version = "=0.51.4" }, + { name = "wasmparser", version = "=0.78.2" }, { name = "wasmparser", version = "=0.84.0" }, { name = "wasmparser", version = "=0.99.0" }, - - # wasmtime v9 - { name = "wasmparser", version = "=0.103.0" }, - + { name = "wasmparser", version = "=0.105.0" }, + { name = "wasm-encoder", version = "=0.27.0" }, + { name = "object", version = "=0.30.4" }, + { name = "memoffset", version = "=0.6.5" }, + { name = "memoffset", version = "=0.8.0" }, + { name = "linux-raw-sys", version = "=0.3.8" }, + { name = "addr2line", version = "=0.19.0" }, + { name = "gimli", version = "=0.27.2" }, # wasmer 0.17.x { name = "parking_lot", version = "=0.10.2" }, @@ -93,6 +91,7 @@ skip = [ # old version of rustix, wasmtime, is-terminal, etc. { name = "rustix", version = "0.36.6" }, + { name = "rustix", version = "0.37.20" }, { name = "linux-raw-sys", version = "0.1.4" }, { name = "windows-sys", version = "=0.42.0" }, { name = "windows-sys", version = "=0.45.0" }, @@ -161,9 +160,6 @@ skip = [ # actix-http hasn't upgraded iminz_oxide/flate2 yet. { name = "miniz_oxide", version = "=0.5.1" }, - # webrtc-util depends on old version of nix depends on old version of memoffset - { name = "memoffset", version = "=0.6.5" }, - # cloud-storage { name = "base64", version = "=0.12.3" }, { name = "num-bigint", version = "=0.2.6" }, From 3eff9716f10895bf80021f853ee56682b57e783f Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Fri, 17 Nov 2023 17:41:59 +0530 Subject: [PATCH 18/30] [garbage_colection] Fix crash in clear_resharding_data while fetching prev_epoch_id (#10195) We had an issue in the `clear_resharding_data` while fetching the `prev_epoch_id`. The implementation of `prev_epoch_id` relied on fetching the block_info of the last block of prev_epoch to get the epoch_id. This unfortunately failed for the case of GC as the block_info was already garbage collected. The new implementation here relies on using the block_header to get the epoch_id instead of block_info. This was unfortunately only caught in mocknet and not integration tests as having a small enough epoch_length lead to the block_info being cached in the epoch_manager (even though it was GC'd) Zulip post: https://near.zulipchat.com/#narrow/stream/295558-pagoda.2Fcore/topic/Master.20binary.20Can't.20clear.20old.20data/near/402240517 Pending: Figure out a way to include test from this PR: https://github.com/near/nearcore/pull/10193 We would need to enable no_cache feature for the test. --- chain/chain/src/store.rs | 23 +++++++++++++++++++---- chain/chain/src/test_utils/kv_runtime.rs | 7 ------- chain/epoch-manager/src/adapter.rs | 7 ------- 3 files changed, 19 insertions(+), 18 deletions(-) diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs index 9324c38d46b..3b433f316c9 100644 --- a/chain/chain/src/store.rs +++ b/chain/chain/src/store.rs @@ -2343,10 +2343,25 @@ impl<'a> ChainStoreUpdate<'a> { if !epoch_manager.is_next_block_epoch_start(&block_hash)? { return Ok(()); } - let epoch_id = epoch_manager.get_epoch_id(&block_hash)?; - let shard_layout = epoch_manager.get_shard_layout(&epoch_id)?; - let prev_epoch_id = epoch_manager.get_prev_epoch_id(&block_hash)?; - let prev_shard_layout = epoch_manager.get_shard_layout(&prev_epoch_id)?; + + // Since this code is related to GC, we need to be careful about accessing block_infos. Note + // that the BlockInfo exists for the current block_hash as it's not been GC'd yet. + // However, we need to use the block header to get the epoch_id and shard_layout for + // first_block_epoch_header and last_block_prev_epoch_hash as BlockInfo for these blocks is + // already GC'd while BlockHeader isn't GC'd. + let block_info = epoch_manager.get_block_info(&block_hash)?; + let first_block_epoch_hash = block_info.epoch_first_block(); + if first_block_epoch_hash == &CryptoHash::default() { + return Ok(()); + } + let first_block_epoch_header = self.get_block_header(first_block_epoch_hash)?; + let last_block_prev_epoch_header = + self.get_block_header(first_block_epoch_header.prev_hash())?; + + let epoch_id = first_block_epoch_header.epoch_id(); + let shard_layout = epoch_manager.get_shard_layout(epoch_id)?; + let prev_epoch_id = last_block_prev_epoch_header.epoch_id(); + let prev_shard_layout = epoch_manager.get_shard_layout(prev_epoch_id)?; if shard_layout == prev_shard_layout { return Ok(()); } diff --git a/chain/chain/src/test_utils/kv_runtime.rs b/chain/chain/src/test_utils/kv_runtime.rs index f6e2cc44355..e27a1bf7a16 100644 --- a/chain/chain/src/test_utils/kv_runtime.rs +++ b/chain/chain/src/test_utils/kv_runtime.rs @@ -615,13 +615,6 @@ impl EpochManagerAdapter for MockEpochManager { } } - fn get_prev_epoch_id(&self, block_hash: &CryptoHash) -> Result { - let header = self - .get_block_header(block_hash)? - .ok_or_else(|| EpochError::MissingBlock(*block_hash))?; - self.get_prev_epoch_id_from_prev_block(header.prev_hash()) - } - fn get_prev_epoch_id_from_prev_block( &self, prev_block_hash: &CryptoHash, diff --git a/chain/epoch-manager/src/adapter.rs b/chain/epoch-manager/src/adapter.rs index 42fe23e31e4..481c9787491 100644 --- a/chain/epoch-manager/src/adapter.rs +++ b/chain/epoch-manager/src/adapter.rs @@ -128,8 +128,6 @@ pub trait EpochManagerAdapter: Send + Sync { /// Get epoch start from a block belonging to the epoch. fn get_epoch_start_height(&self, block_hash: &CryptoHash) -> Result; - fn get_prev_epoch_id(&self, block_hash: &CryptoHash) -> Result; - /// Get previous epoch id by hash of previous block. fn get_prev_epoch_id_from_prev_block( &self, @@ -561,11 +559,6 @@ impl EpochManagerAdapter for EpochManagerHandle { epoch_manager.get_epoch_start_height(block_hash) } - fn get_prev_epoch_id(&self, block_hash: &CryptoHash) -> Result { - let epoch_manager = self.read(); - epoch_manager.get_prev_epoch_id(block_hash) - } - fn get_prev_epoch_id_from_prev_block( &self, prev_block_hash: &CryptoHash, From cac2d74f2d892978e76b79764928b939e349a413 Mon Sep 17 00:00:00 2001 From: wacban Date: Fri, 17 Nov 2023 12:17:40 +0000 Subject: [PATCH 19/30] feat(resharding): added state split statistics to the state stats tool (#10197) todo --- tools/state-viewer/src/commands.rs | 31 ++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/tools/state-viewer/src/commands.rs b/tools/state-viewer/src/commands.rs index 630c0069221..b9aa4a68be1 100644 --- a/tools/state-viewer/src/commands.rs +++ b/tools/state-viewer/src/commands.rs @@ -1107,11 +1107,13 @@ fn print_state_stats_for_shard_uid( let iter = get_state_stats_account_iter(&group_by); let mut current_size = ByteSize::default(); for state_stats_account in iter { - current_size += state_stats_account.size; - if 2 * current_size.as_u64() > state_stats.total_size.as_u64() { - state_stats.middle_state_record = Some(state_stats_account); + let new_size = current_size + state_stats_account.size; + if 2 * new_size.as_u64() > state_stats.total_size.as_u64() { + state_stats.middle_account = Some(state_stats_account); + state_stats.middle_account_leading_size = Some(current_size); break; } + current_size = new_size; } tracing::info!(target: "state_viewer", "{shard_uid:?}"); @@ -1209,7 +1211,12 @@ pub struct StateStats { pub total_size: ByteSize, pub total_count: usize, - pub middle_state_record: Option, + // The account that is in the middle of the state in respect to storage. + pub middle_account: Option, + // The total size of all accounts leading to the middle account. + // Can be used to determin how does the middle account split the state. + pub middle_account_leading_size: Option, + pub top_accounts: BinaryHeap, } @@ -1221,11 +1228,23 @@ impl core::fmt::Debug for StateStats { .checked_div(self.total_count as u64) .map(ByteSize::b) .unwrap_or_default(); + + let left_size = self.middle_account_leading_size.unwrap_or_default(); + let middle_size = self.middle_account.as_ref().map(|a| a.size).unwrap_or_default(); + let right_size = self.total_size.as_u64() - left_size.as_u64() - middle_size.as_u64(); + let right_size = ByteSize::b(right_size); + + let left_percent = 100 * left_size.as_u64() / self.total_size.as_u64(); + let middle_percent = 100 * middle_size.as_u64() / self.total_size.as_u64(); + let right_percent = 100 * right_size.as_u64() / self.total_size.as_u64(); + f.debug_struct("StateStats") .field("total_size", &self.total_size) .field("total_count", &self.total_count) .field("average_size", &average_size) - .field("middle_state_record", &self.middle_state_record.as_ref().unwrap()) + .field("middle_account", &self.middle_account.as_ref().unwrap()) + .field("split_size", &format!("{left_size:?} : {middle_size:?} : {right_size:?}")) + .field("split_percent", &format!("{left_percent}:{middle_percent}:{right_percent}")) .field("top_accounts", &self.top_accounts) .finish() } @@ -1286,7 +1305,7 @@ impl PartialOrd for StateStatsAccount { impl std::fmt::Debug for StateStatsAccount { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("StateStatsStateRecord") + f.debug_struct("StateStatsAccount") .field("account_id", &self.account_id.as_str()) .field("size", &self.size) .finish() From 2534fdb386741a248cc453231a9ee15b86f5b273 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Fri, 17 Nov 2023 12:25:55 +0000 Subject: [PATCH 20/30] toolchain: 1.73.0 -> 1.74.0 (#10188) I think discussion is warranted on whether we want to use the lint inheritance. The previous way was definitely quite a bit more terse, but it was also much less composable (e.g. it was less straightforward to exclude or include a specific crate from a specific lint) --- Cargo.toml | 14 ++++++++++- chain/chain-primitives/Cargo.toml | 3 ++- chain/chain/Cargo.toml | 3 +++ chain/chunks-primitives/Cargo.toml | 3 +++ chain/chunks/Cargo.toml | 3 +++ chain/client-primitives/Cargo.toml | 3 +++ chain/client/Cargo.toml | 3 +++ chain/epoch-manager/Cargo.toml | 3 +++ chain/indexer-primitives/Cargo.toml | 3 ++- chain/indexer/Cargo.toml | 3 +++ .../jsonrpc-adversarial-primitives/Cargo.toml | 3 +++ chain/jsonrpc-primitives/Cargo.toml | 3 +++ chain/jsonrpc/Cargo.toml | 3 +++ chain/jsonrpc/client/Cargo.toml | 3 +++ chain/jsonrpc/fuzz/Cargo.toml | 3 +++ chain/jsonrpc/jsonrpc-tests/Cargo.toml | 3 +++ chain/network/Cargo.toml | 3 +++ chain/pool/Cargo.toml | 3 +++ chain/rosetta-rpc/Cargo.toml | 3 +++ chain/telemetry/Cargo.toml | 3 +++ core/async/Cargo.toml | 3 +++ core/chain-configs/Cargo.toml | 3 +++ core/crypto/Cargo.toml | 3 +++ core/dyn-configs/Cargo.toml | 3 +++ core/o11y/Cargo.toml | 3 +++ core/primitives-core/Cargo.toml | 3 +++ core/primitives/Cargo.toml | 3 +++ core/store/Cargo.toml | 3 +++ genesis-tools/genesis-csv-to-json/Cargo.toml | 3 +++ genesis-tools/genesis-populate/Cargo.toml | 3 +++ genesis-tools/keypair-generator/Cargo.toml | 3 +++ integration-tests/Cargo.toml | 3 +++ .../src/tests/client/sync_state_nodes.rs | 3 +++ nearcore/Cargo.toml | 3 +++ neard/Cargo.toml | 3 +++ pytest/tests/loadtest/contract/Cargo.toml | 3 +++ runtime/near-test-contracts/Cargo.toml | 3 +++ runtime/near-vm-runner/Cargo.toml | 3 +++ runtime/near-vm-runner/fuzz/Cargo.toml | 3 +++ .../near-vm/compiler-singlepass/Cargo.toml | 3 +++ .../near-vm/compiler-test-derive/Cargo.toml | 3 +++ runtime/near-vm/compiler/Cargo.toml | 3 +++ runtime/near-vm/engine/Cargo.toml | 3 +++ runtime/near-vm/test-api/Cargo.toml | 3 +++ runtime/near-vm/test-generator/Cargo.toml | 3 +++ runtime/near-vm/types/Cargo.toml | 3 +++ runtime/near-vm/vm/Cargo.toml | 3 +++ runtime/near-vm/wast/Cargo.toml | 3 +++ runtime/runtime-params-estimator/Cargo.toml | 3 +++ .../emu-cost/Dockerfile | 2 +- .../estimator-warehouse/Cargo.toml | 3 +++ runtime/runtime/Cargo.toml | 3 +++ rust-toolchain.toml | 2 +- test-utils/actix-test-utils/Cargo.toml | 3 +++ test-utils/runtime-tester/Cargo.toml | 3 +++ test-utils/runtime-tester/fuzz/Cargo.toml | 3 +++ test-utils/store-validator/Cargo.toml | 3 +++ test-utils/style/Cargo.toml | 3 +++ test-utils/style/src/lib.rs | 12 +-------- test-utils/testlib/Cargo.toml | 3 +++ tools/amend-genesis/Cargo.toml | 3 +++ tools/chainsync-loadtest/Cargo.toml | 3 +++ tools/cold-store/Cargo.toml | 3 +++ tools/database/Cargo.toml | 3 +++ tools/flat-storage/Cargo.toml | 3 +++ tools/fork-network/Cargo.toml | 3 +++ tools/indexer/example/Cargo.toml | 3 +++ tools/mirror/Cargo.toml | 3 +++ tools/mock-node/Cargo.toml | 3 +++ tools/ping/Cargo.toml | 3 +++ tools/restaked/Cargo.toml | 3 +++ tools/rpctypegen/core/Cargo.toml | 3 +++ tools/rpctypegen/macro/Cargo.toml | 3 +++ tools/speedy_sync/Cargo.toml | 3 +++ tools/state-parts-dump-check/Cargo.toml | 3 ++- tools/state-parts/Cargo.toml | 3 +++ tools/state-viewer/Cargo.toml | 3 +++ .../storage-usage-delta-calculator/Cargo.toml | 3 +++ tools/themis/Cargo.toml | 3 +++ tools/themis/src/main.rs | 1 + tools/themis/src/rules.rs | 25 +++++++++++++++++++ tools/undo-block/Cargo.toml | 3 +++ utils/config/Cargo.toml | 3 +++ utils/fmt/Cargo.toml | 3 +++ utils/mainnet-res/Cargo.toml | 3 +++ utils/near-cache/Cargo.toml | 3 +++ .../Cargo.toml | 3 +++ utils/near-performance-metrics/Cargo.toml | 3 +++ utils/near-stable-hasher/Cargo.toml | 3 +++ utils/stdx/Cargo.toml | 3 +++ 90 files changed, 291 insertions(+), 17 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 45e2829995a..58265670f24 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ version = "0.0.0" # managed by cargo-workspaces, see below authors = ["Near Inc "] edition = "2021" -rust-version = "1.73.0" +rust-version = "1.74.0" repository = "https://github.com/near/nearcore" license = "MIT OR Apache-2.0" @@ -88,6 +88,18 @@ members = [ "utils/stdx", ] +[workspace.lints.rust] +warnings = "deny" + +[workspace.lints.clippy] +all = { level = "allow", priority = -1 } +clone_on_copy = "deny" +correctness = "deny" +derivable_impls = "deny" +redundant_clone = "deny" +suspicious = "deny" +len_zero = "deny" + [workspace.dependencies] actix = "0.13.0" actix-cors = "0.6.1" diff --git a/chain/chain-primitives/Cargo.toml b/chain/chain-primitives/Cargo.toml index 659bf09b52e..cd86f5811c8 100644 --- a/chain/chain-primitives/Cargo.toml +++ b/chain/chain-primitives/Cargo.toml @@ -9,7 +9,8 @@ repository.workspace = true license.workspace = true publish = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lints] +workspace = true [dependencies] chrono.workspace = true diff --git a/chain/chain/Cargo.toml b/chain/chain/Cargo.toml index e06c05c9762..6df4b42c01d 100644 --- a/chain/chain/Cargo.toml +++ b/chain/chain/Cargo.toml @@ -5,6 +5,9 @@ authors.workspace = true edition.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true ansi_term.workspace = true diff --git a/chain/chunks-primitives/Cargo.toml b/chain/chunks-primitives/Cargo.toml index 4ece0a9a6f0..8b102524e01 100644 --- a/chain/chunks-primitives/Cargo.toml +++ b/chain/chunks-primitives/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] near-chain-primitives.workspace = true near-primitives.workspace = true diff --git a/chain/chunks/Cargo.toml b/chain/chunks/Cargo.toml index 1a611e79966..c49b903dc81 100644 --- a/chain/chunks/Cargo.toml +++ b/chain/chunks/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true borsh.workspace = true diff --git a/chain/client-primitives/Cargo.toml b/chain/client-primitives/Cargo.toml index 02f0e6b91b2..32984ddc037 100644 --- a/chain/client-primitives/Cargo.toml +++ b/chain/client-primitives/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] actix.workspace = true ansi_term.workspace = true diff --git a/chain/client/Cargo.toml b/chain/client/Cargo.toml index 9ebe33f88d9..9157816b6ed 100644 --- a/chain/client/Cargo.toml +++ b/chain/client/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-rt.workspace = true actix.workspace = true diff --git a/chain/epoch-manager/Cargo.toml b/chain/epoch-manager/Cargo.toml index 670531bc892..9135ff25a36 100644 --- a/chain/epoch-manager/Cargo.toml +++ b/chain/epoch-manager/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] borsh.workspace = true chrono = { workspace = true, optional = true } diff --git a/chain/indexer-primitives/Cargo.toml b/chain/indexer-primitives/Cargo.toml index 75d0c383d3c..2b962a793bb 100644 --- a/chain/indexer-primitives/Cargo.toml +++ b/chain/indexer-primitives/Cargo.toml @@ -9,7 +9,8 @@ repository.workspace = true license.workspace = true publish = true -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lints] +workspace = true [dependencies] serde.workspace = true diff --git a/chain/indexer/Cargo.toml b/chain/indexer/Cargo.toml index 1870f1388c6..7f3e9d4b333 100644 --- a/chain/indexer/Cargo.toml +++ b/chain/indexer/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true anyhow.workspace = true diff --git a/chain/jsonrpc-adversarial-primitives/Cargo.toml b/chain/jsonrpc-adversarial-primitives/Cargo.toml index 5d56f6d1fe6..ff5cb74ac79 100644 --- a/chain/jsonrpc-adversarial-primitives/Cargo.toml +++ b/chain/jsonrpc-adversarial-primitives/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] serde.workspace = true diff --git a/chain/jsonrpc-primitives/Cargo.toml b/chain/jsonrpc-primitives/Cargo.toml index b2501364d2e..0f1e5b2e423 100644 --- a/chain/jsonrpc-primitives/Cargo.toml +++ b/chain/jsonrpc-primitives/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] arbitrary.workspace = true serde.workspace = true diff --git a/chain/jsonrpc/Cargo.toml b/chain/jsonrpc/Cargo.toml index e802d10b7e3..15427e71e06 100644 --- a/chain/jsonrpc/Cargo.toml +++ b/chain/jsonrpc/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-cors.workspace = true actix-web.workspace = true diff --git a/chain/jsonrpc/client/Cargo.toml b/chain/jsonrpc/client/Cargo.toml index 4cac0499a5d..eecf28c60f9 100644 --- a/chain/jsonrpc/client/Cargo.toml +++ b/chain/jsonrpc/client/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-http.workspace = true awc.workspace = true diff --git a/chain/jsonrpc/fuzz/Cargo.toml b/chain/jsonrpc/fuzz/Cargo.toml index 15feb6a6b56..3645ef68abf 100644 --- a/chain/jsonrpc/fuzz/Cargo.toml +++ b/chain/jsonrpc/fuzz/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [package.metadata] cargo-fuzz = true diff --git a/chain/jsonrpc/jsonrpc-tests/Cargo.toml b/chain/jsonrpc/jsonrpc-tests/Cargo.toml index c1be52958c4..fad79cb2470 100644 --- a/chain/jsonrpc/jsonrpc-tests/Cargo.toml +++ b/chain/jsonrpc/jsonrpc-tests/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true awc.workspace = true diff --git a/chain/network/Cargo.toml b/chain/network/Cargo.toml index 42800f8756b..a9fd0063e25 100644 --- a/chain/network/Cargo.toml +++ b/chain/network/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [build-dependencies] anyhow.workspace = true protobuf-codegen.workspace = true diff --git a/chain/pool/Cargo.toml b/chain/pool/Cargo.toml index 77f2f52a4e9..117c4650361 100644 --- a/chain/pool/Cargo.toml +++ b/chain/pool/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] borsh.workspace = true once_cell.workspace = true diff --git a/chain/rosetta-rpc/Cargo.toml b/chain/rosetta-rpc/Cargo.toml index 331f06d5ac0..fde635c820b 100644 --- a/chain/rosetta-rpc/Cargo.toml +++ b/chain/rosetta-rpc/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-cors.workspace = true actix-http.workspace = true diff --git a/chain/telemetry/Cargo.toml b/chain/telemetry/Cargo.toml index a86a1730d35..0ce6fc127d1 100644 --- a/chain/telemetry/Cargo.toml +++ b/chain/telemetry/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true awc.workspace = true diff --git a/core/async/Cargo.toml b/core/async/Cargo.toml index e28051a0b82..7734de74d40 100644 --- a/core/async/Cargo.toml +++ b/core/async/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true derive-enum-from-into.workspace = true diff --git a/core/chain-configs/Cargo.toml b/core/chain-configs/Cargo.toml index 1b32dd9c817..b61d88082bc 100644 --- a/core/chain-configs/Cargo.toml +++ b/core/chain-configs/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] anyhow.workspace = true bytesize.workspace = true diff --git a/core/crypto/Cargo.toml b/core/crypto/Cargo.toml index ae5ead4ddb9..ebb3dfa349f 100644 --- a/core/crypto/Cargo.toml +++ b/core/crypto/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] blake2.workspace = true borsh.workspace = true diff --git a/core/dyn-configs/Cargo.toml b/core/dyn-configs/Cargo.toml index 597e868e8fc..57c12dc3646 100644 --- a/core/dyn-configs/Cargo.toml +++ b/core/dyn-configs/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] anyhow.workspace = true once_cell.workspace = true diff --git a/core/o11y/Cargo.toml b/core/o11y/Cargo.toml index 907b5f2c5a6..585c0de3470 100644 --- a/core/o11y/Cargo.toml +++ b/core/o11y/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] near-crypto.workspace = true near-fmt.workspace = true diff --git a/core/primitives-core/Cargo.toml b/core/primitives-core/Cargo.toml index dc0a9875e57..860774b3ec9 100644 --- a/core/primitives-core/Cargo.toml +++ b/core/primitives-core/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] arbitrary.workspace = true base64.workspace = true diff --git a/core/primitives/Cargo.toml b/core/primitives/Cargo.toml index 8fb4761a712..c56527abde8 100644 --- a/core/primitives/Cargo.toml +++ b/core/primitives/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] arbitrary.workspace = true base64.workspace = true diff --git a/core/store/Cargo.toml b/core/store/Cargo.toml index 6401e51a708..a95e47a3f31 100644 --- a/core/store/Cargo.toml +++ b/core/store/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-rt.workspace = true actix.workspace = true diff --git a/genesis-tools/genesis-csv-to-json/Cargo.toml b/genesis-tools/genesis-csv-to-json/Cargo.toml index 186c6e04bdf..68620e8656e 100644 --- a/genesis-tools/genesis-csv-to-json/Cargo.toml +++ b/genesis-tools/genesis-csv-to-json/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] chrono.workspace = true clap.workspace = true diff --git a/genesis-tools/genesis-populate/Cargo.toml b/genesis-tools/genesis-populate/Cargo.toml index 10903d9145d..e253eec3127 100644 --- a/genesis-tools/genesis-populate/Cargo.toml +++ b/genesis-tools/genesis-populate/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] borsh.workspace = true clap.workspace = true diff --git a/genesis-tools/keypair-generator/Cargo.toml b/genesis-tools/keypair-generator/Cargo.toml index 27f9ab7a109..bdab716f7b8 100644 --- a/genesis-tools/keypair-generator/Cargo.toml +++ b/genesis-tools/keypair-generator/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] clap.workspace = true diff --git a/integration-tests/Cargo.toml b/integration-tests/Cargo.toml index 49ebe01b5a1..7463a1af386 100644 --- a/integration-tests/Cargo.toml +++ b/integration-tests/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-rt.workspace = true actix.workspace = true diff --git a/integration-tests/src/tests/client/sync_state_nodes.rs b/integration-tests/src/tests/client/sync_state_nodes.rs index af7aa5a213e..f822efe6331 100644 --- a/integration-tests/src/tests/client/sync_state_nodes.rs +++ b/integration-tests/src/tests/client/sync_state_nodes.rs @@ -413,6 +413,9 @@ fn sync_empty_state() { #[test] #[cfg_attr(not(feature = "expensive_tests"), ignore)] +// FIXME(#9650): locks should not be held across await points, allowed currently only because the +// lint started triggering during a toolchain bump. +#[allow(clippy::await_holding_lock)] /// Runs one node for some time, which dumps state to a temp directory. /// Start the second node which gets state parts from that temp directory. fn sync_state_dump() { diff --git a/nearcore/Cargo.toml b/nearcore/Cargo.toml index 87c5938a6ea..06d0e13d6b2 100644 --- a/nearcore/Cargo.toml +++ b/nearcore/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-rt.workspace = true actix-web.workspace = true diff --git a/neard/Cargo.toml b/neard/Cargo.toml index c9e00bbbab7..a7616a679b1 100644 --- a/neard/Cargo.toml +++ b/neard/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [[bin]] path = "src/main.rs" name = "neard" diff --git a/pytest/tests/loadtest/contract/Cargo.toml b/pytest/tests/loadtest/contract/Cargo.toml index 183bf7a3a38..c0e52d83616 100644 --- a/pytest/tests/loadtest/contract/Cargo.toml +++ b/pytest/tests/loadtest/contract/Cargo.toml @@ -4,6 +4,9 @@ version = "0.1.0" authors = ["Near Inc "] edition = "2018" +[lints] +workspace = true + [workspace] members = [] diff --git a/runtime/near-test-contracts/Cargo.toml b/runtime/near-test-contracts/Cargo.toml index 241ef60627f..c45d5b64fe3 100644 --- a/runtime/near-test-contracts/Cargo.toml +++ b/runtime/near-test-contracts/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] once_cell.workspace = true wat.workspace = true diff --git a/runtime/near-vm-runner/Cargo.toml b/runtime/near-vm-runner/Cargo.toml index 2798b6c68bd..edbe3494290 100644 --- a/runtime/near-vm-runner/Cargo.toml +++ b/runtime/near-vm-runner/Cargo.toml @@ -10,6 +10,9 @@ license.workspace = true categories = ["wasm"] publish = true +[lints] +workspace = true + [dependencies] anyhow = { workspace = true, optional = true } base64.workspace = true diff --git a/runtime/near-vm-runner/fuzz/Cargo.toml b/runtime/near-vm-runner/fuzz/Cargo.toml index 9127527c5dd..e7ce44a5d0d 100644 --- a/runtime/near-vm-runner/fuzz/Cargo.toml +++ b/runtime/near-vm-runner/fuzz/Cargo.toml @@ -11,6 +11,9 @@ publish = false [package.metadata] cargo-fuzz = true +[lints] +workspace = true + [dependencies] arbitrary.workspace = true libfuzzer-sys.workspace = true diff --git a/runtime/near-vm/compiler-singlepass/Cargo.toml b/runtime/near-vm/compiler-singlepass/Cargo.toml index a9e4cbdf10c..17c1a5e5ce1 100644 --- a/runtime/near-vm/compiler-singlepass/Cargo.toml +++ b/runtime/near-vm/compiler-singlepass/Cargo.toml @@ -12,6 +12,9 @@ edition = "2021" publish = true rust-version.workspace = true +[lints] +workspace = true + [dependencies] finite-wasm.workspace = true near-vm-compiler.workspace = true diff --git a/runtime/near-vm/compiler-test-derive/Cargo.toml b/runtime/near-vm/compiler-test-derive/Cargo.toml index e3bcabaadfd..c15ad76628c 100644 --- a/runtime/near-vm/compiler-test-derive/Cargo.toml +++ b/runtime/near-vm/compiler-test-derive/Cargo.toml @@ -12,6 +12,9 @@ keywords = ["unsafe", "body", "fn", "safety", "hygiene"] categories = ["rust-patterns"] publish = false +[lints] +workspace = true + [dependencies] proc-macro2.workspace = true quote.workspace = true diff --git a/runtime/near-vm/compiler/Cargo.toml b/runtime/near-vm/compiler/Cargo.toml index d7d501230b8..3f9d13a4689 100644 --- a/runtime/near-vm/compiler/Cargo.toml +++ b/runtime/near-vm/compiler/Cargo.toml @@ -12,6 +12,9 @@ edition = "2021" publish = true rust-version.workspace = true +[lints] +workspace = true + [dependencies] finite-wasm.workspace = true near-vm-vm.workspace = true diff --git a/runtime/near-vm/engine/Cargo.toml b/runtime/near-vm/engine/Cargo.toml index 3a346cfb1e5..27b727d3fe9 100644 --- a/runtime/near-vm/engine/Cargo.toml +++ b/runtime/near-vm/engine/Cargo.toml @@ -12,6 +12,9 @@ edition = "2021" publish = true rust-version.workspace = true +[lints] +workspace = true + [dependencies] backtrace.workspace = true enumset.workspace = true diff --git a/runtime/near-vm/test-api/Cargo.toml b/runtime/near-vm/test-api/Cargo.toml index e789bba7bb5..00a4b3cb92e 100644 --- a/runtime/near-vm/test-api/Cargo.toml +++ b/runtime/near-vm/test-api/Cargo.toml @@ -12,6 +12,9 @@ edition = "2021" publish = false rust-version.workspace = true +[lints] +workspace = true + # Shared dependencies. [dependencies] # - Mandatory shared dependencies. diff --git a/runtime/near-vm/test-generator/Cargo.toml b/runtime/near-vm/test-generator/Cargo.toml index 805ae8a9936..f03a27c7836 100644 --- a/runtime/near-vm/test-generator/Cargo.toml +++ b/runtime/near-vm/test-generator/Cargo.toml @@ -6,6 +6,9 @@ authors = ["Wasmer Engineering Team ", "Near Inc "] +[lints] +workspace = true + [dependencies] diff --git a/test-utils/style/src/lib.rs b/test-utils/style/src/lib.rs index fc3f93ed774..125f9fa16b0 100644 --- a/test-utils/style/src/lib.rs +++ b/test-utils/style/src/lib.rs @@ -50,17 +50,7 @@ fn clippy() { let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); let mut cmd = Command::new(cargo); cargo_env(&mut cmd); - cmd.args(&["clippy", "--all-targets", "--all-features", "--locked", "--"]); - cmd.args(&[ - "-Aclippy::all", - "-Dwarnings", - "-Dclippy::clone_on_copy", - "-Dclippy::correctness", - "-Dclippy::derivable_impls", - "-Dclippy::redundant_clone", - "-Dclippy::suspicious", - "-Dclippy::len_zero", - ]); + cmd.args(&["clippy", "--all-targets", "--all-features", "--locked"]); ensure_success(cmd); } diff --git a/test-utils/testlib/Cargo.toml b/test-utils/testlib/Cargo.toml index 1ba085379f7..36be0ad0c5d 100644 --- a/test-utils/testlib/Cargo.toml +++ b/test-utils/testlib/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] once_cell.workspace = true diff --git a/tools/amend-genesis/Cargo.toml b/tools/amend-genesis/Cargo.toml index aaddc473d95..70d7b98aaef 100644 --- a/tools/amend-genesis/Cargo.toml +++ b/tools/amend-genesis/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true borsh.workspace = true diff --git a/tools/chainsync-loadtest/Cargo.toml b/tools/chainsync-loadtest/Cargo.toml index 3971db9766c..ff6351de154 100644 --- a/tools/chainsync-loadtest/Cargo.toml +++ b/tools/chainsync-loadtest/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [[bin]] path = "src/main.rs" name = "chainsync-loadtest" diff --git a/tools/cold-store/Cargo.toml b/tools/cold-store/Cargo.toml index b09689d99f9..c1c35103958 100644 --- a/tools/cold-store/Cargo.toml +++ b/tools/cold-store/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true borsh.workspace = true diff --git a/tools/database/Cargo.toml b/tools/database/Cargo.toml index 9c9d9fc6fc9..90dcd4b537d 100644 --- a/tools/database/Cargo.toml +++ b/tools/database/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true borsh.workspace = true diff --git a/tools/flat-storage/Cargo.toml b/tools/flat-storage/Cargo.toml index 9bfeeb82e98..5853aaba4e3 100644 --- a/tools/flat-storage/Cargo.toml +++ b/tools/flat-storage/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true borsh.workspace = true diff --git a/tools/fork-network/Cargo.toml b/tools/fork-network/Cargo.toml index cbb535d0dae..1f7a3efaea2 100644 --- a/tools/fork-network/Cargo.toml +++ b/tools/fork-network/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true anyhow.workspace = true diff --git a/tools/indexer/example/Cargo.toml b/tools/indexer/example/Cargo.toml index 1baea8d9e20..72e71654b6d 100644 --- a/tools/indexer/example/Cargo.toml +++ b/tools/indexer/example/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true anyhow.workspace = true diff --git a/tools/mirror/Cargo.toml b/tools/mirror/Cargo.toml index e6aaaba5833..71411868d6b 100644 --- a/tools/mirror/Cargo.toml +++ b/tools/mirror/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true anyhow.workspace = true diff --git a/tools/mock-node/Cargo.toml b/tools/mock-node/Cargo.toml index ac7953d32f1..ef4501f6d59 100644 --- a/tools/mock-node/Cargo.toml +++ b/tools/mock-node/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-rt.workspace = true actix.workspace = true diff --git a/tools/ping/Cargo.toml b/tools/ping/Cargo.toml index 2f2d6f146cf..690e933aa82 100644 --- a/tools/ping/Cargo.toml +++ b/tools/ping/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix-web.workspace = true anyhow.workspace = true diff --git a/tools/restaked/Cargo.toml b/tools/restaked/Cargo.toml index d64cef619de..8cc8eb2aee7 100644 --- a/tools/restaked/Cargo.toml +++ b/tools/restaked/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] clap.workspace = true tokio.workspace = true diff --git a/tools/rpctypegen/core/Cargo.toml b/tools/rpctypegen/core/Cargo.toml index a9f260dd889..c9dc8b47539 100644 --- a/tools/rpctypegen/core/Cargo.toml +++ b/tools/rpctypegen/core/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] quote.workspace = true serde.workspace = true diff --git a/tools/rpctypegen/macro/Cargo.toml b/tools/rpctypegen/macro/Cargo.toml index 776d80bf437..8cfd58f2f2d 100644 --- a/tools/rpctypegen/macro/Cargo.toml +++ b/tools/rpctypegen/macro/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [lib] proc-macro = true diff --git a/tools/speedy_sync/Cargo.toml b/tools/speedy_sync/Cargo.toml index bf7b1efb3b7..76d196488cd 100644 --- a/tools/speedy_sync/Cargo.toml +++ b/tools/speedy_sync/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] near-store.workspace = true near-chain-primitives.workspace = true diff --git a/tools/state-parts-dump-check/Cargo.toml b/tools/state-parts-dump-check/Cargo.toml index 6ef455d2255..8c363ed7258 100644 --- a/tools/state-parts-dump-check/Cargo.toml +++ b/tools/state-parts-dump-check/Cargo.toml @@ -8,7 +8,8 @@ repository.workspace = true license.workspace = true publish = false -# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html +[lints] +workspace = true [dependencies] actix-web.workspace = true diff --git a/tools/state-parts/Cargo.toml b/tools/state-parts/Cargo.toml index 92a94007fd2..39e7fd0fc91 100644 --- a/tools/state-parts/Cargo.toml +++ b/tools/state-parts/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true chrono.workspace = true diff --git a/tools/state-viewer/Cargo.toml b/tools/state-viewer/Cargo.toml index 7c2e03b9e53..9e66d4586a9 100644 --- a/tools/state-viewer/Cargo.toml +++ b/tools/state-viewer/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true ansi_term.workspace = true diff --git a/tools/storage-usage-delta-calculator/Cargo.toml b/tools/storage-usage-delta-calculator/Cargo.toml index aaa033db0f4..a0e8a98490f 100644 --- a/tools/storage-usage-delta-calculator/Cargo.toml +++ b/tools/storage-usage-delta-calculator/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true serde_json.workspace = true diff --git a/tools/themis/Cargo.toml b/tools/themis/Cargo.toml index 7765058e54a..aa43cdf07ae 100644 --- a/tools/themis/Cargo.toml +++ b/tools/themis/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] toml.workspace = true serde.workspace = true diff --git a/tools/themis/src/main.rs b/tools/themis/src/main.rs index e5d94fc5986..a6ed42f5279 100644 --- a/tools/themis/src/main.rs +++ b/tools/themis/src/main.rs @@ -12,6 +12,7 @@ fn main() -> anyhow::Result<()> { rules::is_unversioned, rules::has_publish_spec, rules::has_rust_version, + rules::has_lint_inheritance, rules::rust_version_matches_toolchain, rules::has_unified_rust_edition, rules::author_is_near, diff --git a/tools/themis/src/rules.rs b/tools/themis/src/rules.rs index 6e20c1c8c6d..e36916e305f 100644 --- a/tools/themis/src/rules.rs +++ b/tools/themis/src/rules.rs @@ -49,6 +49,31 @@ pub fn has_rust_version(workspace: &Workspace) -> anyhow::Result<()> { Ok(()) } +/// Ensure all crates inherit workspace-wide lint definitions +pub fn has_lint_inheritance(workspace: &Workspace) -> anyhow::Result<()> { + let outliers: Vec<_> = workspace + .members + .iter() + .filter(|pkg| match pkg.manifest.read(&["lints", "workspace"]) { + None | Some(&toml::Value::Boolean(false)) => { + pkg.manifest.read(&["workspace"]).is_some() + } + Some(_) => false, + }) + .map(|pkg| Outlier { path: pkg.parsed.manifest_path.clone(), found: None, extra: None }) + .collect(); + + if !outliers.is_empty() { + bail!(ComplianceError { + msg: "These packages should specify `lints.workspace = true`".to_string(), + expected: None, + outliers, + }); + } + + Ok(()) +} + /// Ensure rust-version is the same in Cargo.toml and rust-toolchain.toml pub fn rust_version_matches_toolchain(workspace: &Workspace) -> anyhow::Result<()> { fn get<'a>(mut val: &'a toml::Value, indexes: &[&str]) -> anyhow::Result<&'a toml::Value> { diff --git a/tools/undo-block/Cargo.toml b/tools/undo-block/Cargo.toml index d7519bfc7f9..18707f6f313 100644 --- a/tools/undo-block/Cargo.toml +++ b/tools/undo-block/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] anyhow.workspace = true clap.workspace = true diff --git a/utils/config/Cargo.toml b/utils/config/Cargo.toml index 6fd62fc75e3..f08e15a1356 100644 --- a/utils/config/Cargo.toml +++ b/utils/config/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] anyhow.workspace = true json_comments.workspace = true diff --git a/utils/fmt/Cargo.toml b/utils/fmt/Cargo.toml index 6714a07c0ec..1cf0b0c94ee 100644 --- a/utils/fmt/Cargo.toml +++ b/utils/fmt/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] near-primitives-core.workspace = true diff --git a/utils/mainnet-res/Cargo.toml b/utils/mainnet-res/Cargo.toml index 648800eb9e1..6a5d3694b28 100644 --- a/utils/mainnet-res/Cargo.toml +++ b/utils/mainnet-res/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] serde_json.workspace = true diff --git a/utils/near-cache/Cargo.toml b/utils/near-cache/Cargo.toml index 9b36edd403d..f14347753d8 100644 --- a/utils/near-cache/Cargo.toml +++ b/utils/near-cache/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] lru.workspace = true diff --git a/utils/near-performance-metrics-macros/Cargo.toml b/utils/near-performance-metrics-macros/Cargo.toml index 2079f6d4faa..77a622d1046 100644 --- a/utils/near-performance-metrics-macros/Cargo.toml +++ b/utils/near-performance-metrics-macros/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] quote.workspace = true syn.workspace = true diff --git a/utils/near-performance-metrics/Cargo.toml b/utils/near-performance-metrics/Cargo.toml index 140fff972ad..79a0def6707 100644 --- a/utils/near-performance-metrics/Cargo.toml +++ b/utils/near-performance-metrics/Cargo.toml @@ -8,6 +8,9 @@ repository.workspace = true license.workspace = true publish = false +[lints] +workspace = true + [dependencies] actix.workspace = true bitflags.workspace = true diff --git a/utils/near-stable-hasher/Cargo.toml b/utils/near-stable-hasher/Cargo.toml index 4734081b84d..d9c67aef477 100644 --- a/utils/near-stable-hasher/Cargo.toml +++ b/utils/near-stable-hasher/Cargo.toml @@ -8,3 +8,6 @@ description = "`near-stable-hasher` is a library that is essentially a wrapper a repository.workspace = true license.workspace = true publish = true + +[lints] +workspace = true diff --git a/utils/stdx/Cargo.toml b/utils/stdx/Cargo.toml index 2a94cef2dba..281b90cee41 100644 --- a/utils/stdx/Cargo.toml +++ b/utils/stdx/Cargo.toml @@ -9,6 +9,9 @@ repository.workspace = true license.workspace = true publish = true +[lints] +workspace = true + [dependencies] # Absolutely must not depend on any crates from nearcore workspace, # and should have as few dependencies as possible otherwise. From 1b0bfca16859feab5e57f0d4d5b7669cdbf2720d Mon Sep 17 00:00:00 2001 From: wacban Date: Fri, 17 Nov 2023 12:54:02 +0000 Subject: [PATCH 21/30] feat(resharding): set the shard boundary in the new shard layout (#10196) based on the analysis done in #10119 it's not yet set in stone but it's the most likely candidate so let's set it and test it --- core/primitives/src/shard_layout.rs | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/core/primitives/src/shard_layout.rs b/core/primitives/src/shard_layout.rs index 7f2252b63c7..cce0b8bf031 100644 --- a/core/primitives/src/shard_layout.rs +++ b/core/primitives/src/shard_layout.rs @@ -160,10 +160,7 @@ impl ShardLayout { /// This is work in progress and the exact way of splitting is yet to be determined. pub fn get_simple_nightshade_layout_v2() -> ShardLayout { ShardLayout::v1( - // TODO(resharding) - find the right boundary to split shards in - // place of just "sweat". Likely somewhere in between near.social - // and sweatcoin. - vec!["aurora", "aurora-0", "kkuuue2akv_1630967379.near", "sweat"] + vec!["aurora", "aurora-0", "kkuuue2akv_1630967379.near", "tge-lockup.sweat"] .into_iter() .map(|s| s.parse().unwrap()) .collect(), @@ -611,7 +608,7 @@ mod tests { "aurora", "aurora-0", "kkuuue2akv_1630967379.near", - "sweat" + "tge-lockup.sweat" ], "shards_split_map": [ [ From 4eb4d0dae4b59bcfdde02eed4d827cc1aa85b005 Mon Sep 17 00:00:00 2001 From: Simonas Kazlauskas Date: Fri, 17 Nov 2023 13:08:53 +0000 Subject: [PATCH 22/30] style: improve how CARGO_TARGET_DIR is handled (#10200) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously cargo themis (builds code) and cargo clippy (checks entire crate) condended with each other for the target directory lock and most likely invalidated each other’s cache, at least to some extent. Keeping target directories between the two separate resolves this problem altogether. --- test-utils/style/src/lib.rs | 52 +++++++++++++++++++++---------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/test-utils/style/src/lib.rs b/test-utils/style/src/lib.rs index 125f9fa16b0..d0ce3832e94 100644 --- a/test-utils/style/src/lib.rs +++ b/test-utils/style/src/lib.rs @@ -6,23 +6,37 @@ use std::{ }; /// Add common cargo arguments for tests run by this code. -fn cargo_env(cmd: &mut Command) { +fn cargo_env(cmd: &mut Command, target_dir: Option<&str>) { // Set the working directory to the project root, rather than using whatever default nextest // gives us. let style_root = std::env::var_os("CARGO_MANIFEST_DIR").unwrap_or(OsString::from("./")); let wp_root: PathBuf = [&style_root, OsStr::new(".."), OsStr::new("..")].into_iter().collect(); cmd.current_dir(&wp_root); - // Use a different target directory to avoid invalidating any cache after tests are run (so - // that running `cargo nextest` twice does not rebuild half of the workspace on the 2nd - // rebuild. Unfortunately cargo itself does not readily expose this information to us, so we - // have to guess a little as to where this directory might end up. - // - // NB: We aren't using a temporary directory proper here in order to *allow* keeping cache - // between individual `clippy` runs and such. - let target_dir: PathBuf = - [wp_root.as_os_str(), OsStr::new("target"), OsStr::new("style")].into_iter().collect(); - cmd.env("CARGO_TARGET_DIR", target_dir.as_path()); + if let Some(tgt_dir) = target_dir { + // Use a different target directory to avoid invalidating any cache after tests are run (so + // that running `cargo nextest` twice does not rebuild half of the workspace on the 2nd + // rebuild. Unfortunately cargo itself does not readily expose this information to us, so + // we have to guess a little as to where this directory might end up. + // + // NB: We aren't using a temporary directory proper here in order to *allow* keeping cache + // between individual `clippy` runs and such. + let target_dir: PathBuf = + [wp_root.as_os_str(), OsStr::new("target"), OsStr::new(tgt_dir)].into_iter().collect(); + cmd.env("CARGO_TARGET_DIR", target_dir.as_path()); + } +} + +/// Create a cargo command. +/// +/// You will want to set `target_dir` to some unique `Some` value whenever there’s a chance that +/// this invocation of `cargo` will build any project code. Setting unique values avoids lock +/// contention and unintentional cache invalidation. +fn cargo(target_dir: Option<&str>) -> Command { + let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); + let mut cmd = Command::new(cargo); + cargo_env(&mut cmd, target_dir); + cmd } fn ensure_success(mut cmd: std::process::Command) { @@ -38,36 +52,28 @@ fn ensure_success(mut cmd: std::process::Command) { #[test] fn rustfmt() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(None); cmd.args(&["fmt", "--", "--check"]); ensure_success(cmd); } #[test] fn clippy() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(Some("style")); cmd.args(&["clippy", "--all-targets", "--all-features", "--locked"]); ensure_success(cmd); } #[test] fn deny() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(None); cmd.args(&["deny", "--all-features", "--locked", "check", "bans"]); ensure_success(cmd); } #[test] fn themis() { - let cargo = std::env::var_os("CARGO").unwrap_or(OsString::from("cargo")); - let mut cmd = Command::new(cargo); - cargo_env(&mut cmd); + let mut cmd = cargo(Some("themis")); cmd.args(&["run", "--locked", "-p", "themis"]); ensure_success(cmd); } From 2c7243ddca0801fb94e402ba9f1655b928fd73d3 Mon Sep 17 00:00:00 2001 From: wacban Date: Fri, 17 Nov 2023 14:08:24 +0000 Subject: [PATCH 23/30] feat(resharding): set defaults for state split config (#10202) Without the individual defaults for fields it was impossible to set only some fields while leaving others out. Now by setting only some fields the rest will get the sensible defaults. It was annoying for mocknet and would be for node maintainers if they need to adjust this config. --- core/chain-configs/src/client_config.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index a85c0f62e39..76d9184f1f3 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -163,6 +163,7 @@ impl SyncConfig { } #[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)] +#[serde(default)] pub struct StateSplitConfig { /// The soft limit on the size of a single batch. The batch size can be /// decreased if resharding is consuming too many resources and interfering From 23aa2ca948b7c9fda6b491aa6728151b4dee5619 Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Fri, 17 Nov 2023 20:13:58 +0530 Subject: [PATCH 24/30] [cleanup] Minor cleanup in serde default (#10203) Discovered simplified way to have default values in serde for structs only when ALL fields have a default. We can have a #[serde(default)] attribute for the whole struct and any missing fields are picked up from the Default::default() implementation of the struct. Playground example ``` #[derive(Serialize, Deserialize, Debug)] #[serde(default)] struct Point { x: i32, y: i32, } impl Default for Point { fn default() -> Self { Self { x: 1, y: 2 } } } #[test] fn serde() { let point = Point { x: 10, y: 20 }; let serialized = serde_json::to_string(&point).unwrap(); println!("serialized = {}", serialized); let deserialized: Point = serde_json::from_str(&serialized).unwrap(); println!("deserialized = {:?}", deserialized); // I get Point { x: 10, y: 20 } let serialized = "{\"x\":15}"; let deserialized: Point = serde_json::from_str(&serialized).unwrap(); println!("deserialized = {:?}", deserialized); // I get Point { x: 15, y: 2 } } ``` Bit more context here: https://github.com/near/nearcore/pull/10202 --- chain/network/src/config_json.rs | 38 ++++--------------------- core/chain-configs/src/client_config.rs | 16 +---------- nearcore/src/config.rs | 13 +++------ 3 files changed, 10 insertions(+), 57 deletions(-) diff --git a/chain/network/src/config_json.rs b/chain/network/src/config_json.rs index a1c10453da0..1d69a722a1f 100644 --- a/chain/network/src/config_json.rs +++ b/chain/network/src/config_json.rs @@ -60,11 +60,6 @@ fn default_peer_expiration_duration() -> Duration { Duration::from_secs(7 * 24 * 60 * 60) } -/// If non-zero - we'll skip sending tombstones during initial sync and for that many seconds after start. -fn default_skip_tombstones() -> i64 { - 0 -} - /// This is a list of public STUN servers provided by Google, /// which are known to have good availability. To avoid trusting /// a centralized entity (and DNS used for domain resolution), @@ -201,28 +196,11 @@ pub struct Config { pub experimental: ExperimentalConfig, } -fn default_tier1_enable_inbound() -> bool { - true -} -fn default_tier1_enable_outbound() -> bool { - true -} - -fn default_tier1_connect_interval() -> Duration { - Duration::from_secs(60) -} - -fn default_tier1_new_connections_per_attempt() -> u64 { - 50 -} - #[derive(serde::Serialize, serde::Deserialize, Clone, Debug)] pub struct ExperimentalConfig { // If true - don't allow any inbound connections. - #[serde(default)] pub inbound_disabled: bool, // If true - connect only to the boot nodes. - #[serde(default)] pub connect_only_to_boot_nodes: bool, // If greater than 0, then system will no longer send or receive tombstones @@ -230,28 +208,22 @@ pub struct ExperimentalConfig { // // The better name is `skip_tombstones_seconds`, but we keep send for // compatibility. - #[serde(default = "default_skip_tombstones")] pub skip_sending_tombstones_seconds: i64, /// See `near_network::config::Tier1::enable_inbound`. - #[serde(default = "default_tier1_enable_inbound")] pub tier1_enable_inbound: bool, /// See `near_network::config::Tier1::enable_outbound`. - #[serde(default = "default_tier1_enable_outbound")] pub tier1_enable_outbound: bool, /// See `near_network::config::Tier1::connect_interval`. - #[serde(default = "default_tier1_connect_interval")] pub tier1_connect_interval: Duration, /// See `near_network::config::Tier1::new_connections_per_attempt`. - #[serde(default = "default_tier1_new_connections_per_attempt")] pub tier1_new_connections_per_attempt: u64, /// See `NetworkConfig`. /// Fields set here will override the NetworkConfig fields. - #[serde(default)] pub network_config_overrides: NetworkConfigOverrides, } @@ -277,11 +249,11 @@ impl Default for ExperimentalConfig { ExperimentalConfig { inbound_disabled: false, connect_only_to_boot_nodes: false, - skip_sending_tombstones_seconds: default_skip_tombstones(), - tier1_enable_inbound: default_tier1_enable_inbound(), - tier1_enable_outbound: default_tier1_enable_outbound(), - tier1_connect_interval: default_tier1_connect_interval(), - tier1_new_connections_per_attempt: default_tier1_new_connections_per_attempt(), + skip_sending_tombstones_seconds: 0, + tier1_enable_inbound: true, + tier1_enable_outbound: true, + tier1_connect_interval: Duration::from_secs(60), + tier1_new_connections_per_attempt: 50, network_config_overrides: Default::default(), } } diff --git a/core/chain-configs/src/client_config.rs b/core/chain-configs/src/client_config.rs index 76d9184f1f3..b61e64a2b57 100644 --- a/core/chain-configs/src/client_config.rs +++ b/core/chain-configs/src/client_config.rs @@ -30,19 +30,17 @@ pub const DEFAULT_STATE_SYNC_NUM_CONCURRENT_REQUESTS_ON_CATCHUP_EXTERNAL: u32 = /// Configuration for garbage collection. #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, PartialEq)] +#[serde(default)] pub struct GCConfig { /// Maximum number of blocks to garbage collect at every garbage collection /// call. - #[serde(default = "default_gc_blocks_limit")] pub gc_blocks_limit: NumBlocks, /// Maximum number of height to go through at each garbage collection step /// when cleaning forks during garbage collection. - #[serde(default = "default_gc_fork_clean_step")] pub gc_fork_clean_step: u64, /// Number of epochs for which we keep store data. - #[serde(default = "default_gc_num_epochs_to_keep")] pub gc_num_epochs_to_keep: u64, } @@ -56,18 +54,6 @@ impl Default for GCConfig { } } -fn default_gc_blocks_limit() -> NumBlocks { - GCConfig::default().gc_blocks_limit -} - -fn default_gc_fork_clean_step() -> u64 { - GCConfig::default().gc_fork_clean_step -} - -fn default_gc_num_epochs_to_keep() -> u64 { - GCConfig::default().gc_num_epochs_to_keep() -} - impl GCConfig { pub fn gc_num_epochs_to_keep(&self) -> u64 { max(MIN_GC_NUM_EPOCHS_TO_KEEP, self.gc_num_epochs_to_keep) diff --git a/nearcore/src/config.rs b/nearcore/src/config.rs index 0418c57cad6..e9a4023522e 100644 --- a/nearcore/src/config.rs +++ b/nearcore/src/config.rs @@ -302,19 +302,15 @@ pub struct Config { #[serde(skip_serializing_if = "Option::is_none")] pub save_trie_changes: Option, pub log_summary_style: LogSummaryStyle, - #[serde(default = "default_log_summary_period")] pub log_summary_period: Duration, // Allows more detailed logging, for example a list of orphaned blocks. pub enable_multiline_logging: Option, /// Garbage collection configuration. - #[serde(default, flatten)] + #[serde(flatten)] pub gc: GCConfig, - #[serde(default = "default_view_client_threads")] pub view_client_threads: usize, pub epoch_sync_enabled: bool, - #[serde(default = "default_view_client_throttle_period")] pub view_client_throttle_period: Duration, - #[serde(default = "default_trie_viewer_state_size_limit")] pub trie_viewer_state_size_limit: Option, /// If set, overrides value in genesis configuration. #[serde(skip_serializing_if = "Option::is_none")] @@ -323,14 +319,14 @@ pub struct Config { pub store: near_store::StoreConfig, /// Different parameters to configure underlying cold storage. /// This feature is under development, do not use in production. - #[serde(default, skip_serializing_if = "Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub cold_store: Option, /// Configuration for the split storage. - #[serde(default, skip_serializing_if = "Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub split_storage: Option, /// The node will stop after the head exceeds this height. /// The node usually stops within several seconds after reaching the target height. - #[serde(default, skip_serializing_if = "Option::is_none")] + #[serde(skip_serializing_if = "Option::is_none")] pub expected_shutdown: Option, /// Whether to use state sync (unreliable and corrupts the DB if fails) or do a block sync instead. #[serde(skip_serializing_if = "Option::is_none")] @@ -344,7 +340,6 @@ pub struct Config { /// guarantees that the node will use bounded resources to store incoming transactions. /// Setting this value too low (<1MB) on the validator might lead to production of smaller /// chunks and underutilizing the capacity of the network. - #[serde(default = "default_transaction_pool_size_limit")] pub transaction_pool_size_limit: Option, pub state_split_config: StateSplitConfig, } From 1e4c2fdfa615a5dcf2878124cc407ac1ec65ddee Mon Sep 17 00:00:00 2001 From: Marcelo Diop-Gonzalez Date: Fri, 17 Nov 2023 10:20:43 -0500 Subject: [PATCH 25/30] fix(pytests): fix rpc_state_changes.py (#10194) https://github.com/near/nearcore/pull/9658 stabilized a protocol feature that restricts creating top level accounts unless you're the registrar, and in this test we were trying to create one. change it to be a subaccount of the tx signer --- pytest/tests/sanity/rpc_state_changes.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pytest/tests/sanity/rpc_state_changes.py b/pytest/tests/sanity/rpc_state_changes.py index bf20b2be5a5..009bc9d08f7 100755 --- a/pytest/tests/sanity/rpc_state_changes.py +++ b/pytest/tests/sanity/rpc_state_changes.py @@ -54,9 +54,10 @@ def test_changes_with_new_account_with_access_key(): 4. Observe the changes in the block where the receipt lands. """ + base_account_id = nodes[0].signer_key.account_id # re-use the key as a new account access key new_key = Key( - account_id='rpc_key_value_changes_full_access', + account_id=f'rpc_key_value_changes.{base_account_id}', pk=nodes[1].signer_key.pk, sk=nodes[1].signer_key.sk, ) From 9816a75aa853e9deecf3cebc3506dd67bdf3d1b9 Mon Sep 17 00:00:00 2001 From: Andrei <122784628+andrei-near@users.noreply.github.com> Date: Fri, 17 Nov 2023 15:24:57 +0000 Subject: [PATCH 26/30] Monthly nearcore issue metrics (#10199) --- .github/workflows/issue-metrics.yml | 40 +++++++++++++++++++++++++++++ gh_metrics/.gitkeep | 0 2 files changed, 40 insertions(+) create mode 100644 .github/workflows/issue-metrics.yml create mode 100644 gh_metrics/.gitkeep diff --git a/.github/workflows/issue-metrics.yml b/.github/workflows/issue-metrics.yml new file mode 100644 index 00000000000..99e79c00307 --- /dev/null +++ b/.github/workflows/issue-metrics.yml @@ -0,0 +1,40 @@ +name: Monthly issue metrics + +on: + workflow_dispatch: + schedule: + - cron: '3 2 1 * *' + +permissions: + issues: write + +jobs: + build: + name: issue metrics + runs-on: ubuntu-latest + steps: + - name: Get dates for last month + shell: bash + id: last-month + run: | + # Calculate the first day of the previous month + first_day=$(date -d "last month" +%Y-%m-01) + + # Calculate the last day of the previous month + last_day=$(date -d "$first_day +1 month -1 day" +%Y-%m-%d) + + #Set an environment variable with the date range + echo "$first_day..$last_day" + echo "LAST_MONTH=$first_day..$last_day" >> $GITHUB_OUTPUT + + - name: Run issue-metrics tool + uses: github/issue-metrics@v2 + env: + SEARCH_QUERY: 'repo:owner/repo is:issue created: ${{ steps.last-month.outputs.LAST_MONTH }} -reason:"not planned"' + + - name: Create issue + uses: peter-evans/create-issue-from-file@v4 + with: + title: Monthly issue metrics report + token: ${{ secrets.GITHUB_TOKEN }} + content-filepath: ./gh_metrics/issue_metrics.md diff --git a/gh_metrics/.gitkeep b/gh_metrics/.gitkeep new file mode 100644 index 00000000000..e69de29bb2d From ed3dc7eb7b2237ea3215c816cfd6c687a7219a37 Mon Sep 17 00:00:00 2001 From: Shreyan Gupta Date: Fri, 17 Nov 2023 22:48:59 +0530 Subject: [PATCH 27/30] [resharding] Revert call to clear_resharding_data (#10207) Reverting the GC code for resharding till we figure out what's wrong... Context: https://near.zulipchat.com/#narrow/stream/295558-pagoda.2Fcore/topic/Master.20binary.20Can't.20clear.20old.20data/near/402240517 --- chain/chain/src/chain.rs | 11 ++++++----- integration-tests/src/tests/client/resharding.rs | 2 ++ 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs index e0b364a2142..a4216689100 100644 --- a/chain/chain/src/chain.rs +++ b/chain/chain/src/chain.rs @@ -1003,11 +1003,12 @@ impl Chain { *block_hash, GCMode::Canonical(tries.clone()), )?; - chain_store_update.clear_resharding_data( - self.runtime_adapter.as_ref(), - self.epoch_manager.as_ref(), - *block_hash, - )?; + // TODO(resharding): Call clear_resharding_data once we figure out what's wrong + // chain_store_update.clear_resharding_data( + // self.runtime_adapter.as_ref(), + // self.epoch_manager.as_ref(), + // *block_hash, + // )?; gc_blocks_remaining -= 1; } else { return Err(Error::GCError( diff --git a/integration-tests/src/tests/client/resharding.rs b/integration-tests/src/tests/client/resharding.rs index 97cc92d2404..e8433ca398b 100644 --- a/integration-tests/src/tests/client/resharding.rs +++ b/integration-tests/src/tests/client/resharding.rs @@ -1059,11 +1059,13 @@ fn test_shard_layout_upgrade_gc_impl(resharding_type: ReshardingType, rng_seed: } #[test] +#[ignore] fn test_shard_layout_upgrade_gc() { test_shard_layout_upgrade_gc_impl(ReshardingType::V1, 44); } #[test] +#[ignore] fn test_shard_layout_upgrade_gc_v2() { // TODO(resharding) remove those checks once rolled out if checked_feature!("stable", SimpleNightshadeV2, PROTOCOL_VERSION) { From 87cca9a78e9ca32303dc99391f1317c1410cc118 Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Fri, 17 Nov 2023 21:24:11 +0100 Subject: [PATCH 28/30] Take advantage of a new std::io::Error::other (#10198) Now that std::io::Error::other has stabilised, use it to simplify bunch of code. --- core/store/src/db/colddb.rs | 2 +- core/store/src/db/rocksdb.rs | 24 +++++---------- core/store/src/db/rocksdb/instance_tracker.rs | 8 ++--- core/store/src/db/rocksdb/snapshot.rs | 4 +-- core/store/src/flat/store_helper.rs | 12 +++----- core/store/src/metadata.rs | 25 ++++++---------- core/store/src/opener.rs | 2 +- runtime/near-test-contracts/build.rs | 30 +++++++++---------- 8 files changed, 42 insertions(+), 65 deletions(-) diff --git a/core/store/src/db/colddb.rs b/core/store/src/db/colddb.rs index 90aa9e7ac5d..57c2a2903d9 100644 --- a/core/store/src/db/colddb.rs +++ b/core/store/src/db/colddb.rs @@ -29,7 +29,7 @@ impl ColdDB { // Checks if the column is is the cold db and returns an error if not. fn check_is_in_colddb(col: DBCol) -> std::io::Result<()> { if !col.is_in_colddb() { - return Err(std::io::Error::new(std::io::ErrorKind::Other, Self::err_msg(col))); + return Err(std::io::Error::other(Self::err_msg(col))); } Ok(()) } diff --git a/core/store/src/db/rocksdb.rs b/core/store/src/db/rocksdb.rs index 35e73a6c156..0a6e0a12771 100644 --- a/core/store/src/db/rocksdb.rs +++ b/core/store/src/db/rocksdb.rs @@ -114,7 +114,7 @@ impl RocksDB { columns: &[DBCol], ) -> io::Result { let counter = instance_tracker::InstanceTracker::try_new(store_config.max_open_files) - .map_err(other_error)?; + .map_err(io::Error::other)?; let (db, db_opt) = Self::open_db(path, store_config, mode, temp, columns)?; let cf_handles = Self::get_cf_handles(&db, columns); Ok(Self { db, db_opt, cf_handles, _instance_tracker: counter }) @@ -144,7 +144,7 @@ impl RocksDB { } else { DB::open_cf_descriptors(&options, path, cf_descriptors) } - .map_err(into_other)?; + .map_err(io::Error::other)?; if cfg!(feature = "single_thread_rocksdb") { // These have to be set after open db let mut env = Env::new().unwrap(); @@ -200,7 +200,7 @@ impl RocksDB { } else if cfg!(debug_assertions) { panic!("The database instance isn’t setup to access {col}"); } else { - Err(other_error(format!("{col}: no such column"))) + Err(io::Error::other(format!("{col}: no such column"))) } } @@ -269,7 +269,7 @@ impl<'a> Iterator for RocksDBIterator<'a> { type Item = io::Result<(Box<[u8]>, Box<[u8]>)>; fn next(&mut self) -> Option { - Some(self.0.next()?.map_err(into_other)) + Some(self.0.next()?.map_err(io::Error::other)) } } @@ -314,7 +314,7 @@ impl Database for RocksDB { let result = self .db .get_pinned_cf_opt(self.cf_handle(col)?, key, &read_options) - .map_err(into_other)? + .map_err(io::Error::other)? .map(DBSlice::from_rocksdb_slice); timer.observe_duration(); Ok(result) @@ -366,7 +366,7 @@ impl Database for RocksDB { } DBOp::DeleteAll { col } => { let cf_handle = self.cf_handle(col)?; - let range = self.get_cf_key_range(cf_handle).map_err(into_other)?; + let range = self.get_cf_key_range(cf_handle).map_err(io::Error::other)?; if let Some(range) = range { batch.delete_range_cf(cf_handle, range.start(), range.end()); // delete_range_cf deletes ["begin_key", "end_key"), so need one more delete @@ -378,7 +378,7 @@ impl Database for RocksDB { } } } - self.db.write(batch).map_err(into_other) + self.db.write(batch).map_err(io::Error::other) } fn compact(&self) -> io::Result<()> { @@ -392,7 +392,7 @@ impl Database for RocksDB { // Need to iterator over all CFs because the normal `flush()` only // flushes the default column family. for col in DBCol::iter() { - self.db.flush_cf(self.cf_handle(col)?).map_err(into_other)?; + self.db.flush_cf(self.cf_handle(col)?).map_err(io::Error::other)?; } Ok(()) } @@ -640,14 +640,6 @@ fn parse_statistics( Ok(()) } -fn other_error(msg: String) -> io::Error { - io::Error::new(io::ErrorKind::Other, msg) -} - -fn into_other(error: rocksdb::Error) -> io::Error { - io::Error::new(io::ErrorKind::Other, error.into_string()) -} - /// Returns name of a RocksDB column family corresponding to given column. /// /// Historically we used `col##` names (with `##` being index of the column). diff --git a/core/store/src/db/rocksdb/instance_tracker.rs b/core/store/src/db/rocksdb/instance_tracker.rs index 784bbfd65c5..31d18880224 100644 --- a/core/store/src/db/rocksdb/instance_tracker.rs +++ b/core/store/src/db/rocksdb/instance_tracker.rs @@ -205,17 +205,13 @@ impl NoFile for RealNoFile { #[test] fn test_ensure_max_open_files_limit() { - fn other_error(msg: &str) -> std::io::Error { - super::other_error(msg.to_string()) - } - /// Mock implementation of NoFile interface. struct MockNoFile<'a>(&'a mut (u64, u64)); impl<'a> NoFile for MockNoFile<'a> { fn get(&self) -> std::io::Result<(u64, u64)> { if self.0 .0 == 666 { - Err(other_error("error")) + Err(std::io::ErrorKind::Other.into()) } else { Ok(*self.0) } @@ -224,7 +220,7 @@ fn test_ensure_max_open_files_limit() { fn set(&mut self, soft: u64, hard: u64) -> std::io::Result<()> { let (old_soft, old_hard) = self.get().unwrap(); if old_hard == 666000 { - Err(other_error("error")) + Err(std::io::ErrorKind::Other.into()) } else { assert!(soft != old_soft, "Pointless call to set"); *self.0 = (soft, hard); diff --git a/core/store/src/db/rocksdb/snapshot.rs b/core/store/src/db/rocksdb/snapshot.rs index d36c27ad95f..76be0806b55 100644 --- a/core/store/src/db/rocksdb/snapshot.rs +++ b/core/store/src/db/rocksdb/snapshot.rs @@ -51,7 +51,7 @@ impl std::convert::From for SnapshotError { impl std::convert::From<::rocksdb::Error> for SnapshotError { fn from(err: ::rocksdb::Error) -> Self { - super::into_other(err).into() + io::Error::other(err).into() } } @@ -94,7 +94,7 @@ impl Snapshot { } let db = super::RocksDB::open(db_path, config, crate::Mode::ReadWriteExisting, temp)?; - let cp = Checkpoint::new(&db.db).map_err(super::into_other)?; + let cp = Checkpoint::new(&db.db).map_err(io::Error::other)?; cp.create_checkpoint(&snapshot_path)?; Ok(Self(Some(snapshot_path))) diff --git a/core/store/src/flat/store_helper.rs b/core/store/src/flat/store_helper.rs index d09b9300469..3c4510634db 100644 --- a/core/store/src/flat/store_helper.rs +++ b/core/store/src/flat/store_helper.rs @@ -139,17 +139,13 @@ pub fn encode_flat_state_db_key(shard_uid: ShardUId, key: &[u8]) -> Vec { pub fn decode_flat_state_db_key(key: &[u8]) -> io::Result<(ShardUId, Vec)> { if key.len() < 8 { - return Err(io::Error::new( - io::ErrorKind::Other, - format!("expected FlatState key length to be at least 8: {key:?}"), - )); + return Err(io::Error::other(format!( + "expected FlatState key length to be at least 8: {key:?}" + ))); } let (shard_uid_bytes, trie_key) = key.split_at(8); let shard_uid = shard_uid_bytes.try_into().map_err(|err| { - io::Error::new( - io::ErrorKind::Other, - format!("failed to decode shard_uid as part of FlatState key: {err}"), - ) + io::Error::other(format!("failed to decode shard_uid as part of FlatState key: {err}")) })?; Ok((shard_uid, trie_key.to_vec())) } diff --git a/core/store/src/metadata.rs b/core/store/src/metadata.rs index eab2c05041c..30a25a20ef3 100644 --- a/core/store/src/metadata.rs +++ b/core/store/src/metadata.rs @@ -103,7 +103,7 @@ fn read( match result { Some(value) => Ok(value), - None => Err(other_error(format!("missing {what}; {msg}"))), + None => Err(std::io::Error::other(format!("missing {what}; {msg}"))), } } @@ -118,19 +118,12 @@ fn maybe_read( key: &[u8], ) -> std::io::Result> { let msg = "it’s not a neard database or database is corrupted"; - if let Some(bytes) = db.get_raw_bytes(crate::DBCol::DbVersion, key)? { - let value = std::str::from_utf8(&bytes) - .map_err(|_err| format!("invalid {what}: {bytes:?}; {msg}")) - .map_err(other_error)?; - let value = T::from_str(value) - .map_err(|_err| format!("invalid {what}: ‘{value}’; {msg}")) - .map_err(other_error)?; - Ok(Some(value)) - } else { - Ok(None) - } -} - -fn other_error(msg: String) -> std::io::Error { - std::io::Error::new(std::io::ErrorKind::Other, msg) + db.get_raw_bytes(crate::DBCol::DbVersion, key)? + .map(|bytes| { + let value = std::str::from_utf8(&bytes) + .map_err(|_err| format!("invalid {what}: {bytes:?}; {msg}"))?; + T::from_str(value).map_err(|_err| format!("invalid {what}: ‘{value}’; {msg}")) + }) + .transpose() + .map_err(std::io::Error::other) } diff --git a/core/store/src/opener.rs b/core/store/src/opener.rs index 6a5926758e5..a4c59e4637f 100644 --- a/core/store/src/opener.rs +++ b/core/store/src/opener.rs @@ -524,7 +524,7 @@ impl<'a> DBOpener<'a> { let metadata = DbMetadata::read(&db)?; if want_version != metadata.version { let msg = format!("unexpected DbVersion {}; expected {want_version}", metadata.version); - Err(std::io::Error::new(std::io::ErrorKind::Other, msg)) + Err(std::io::Error::other(msg)) } else { Ok((db, metadata)) } diff --git a/runtime/near-test-contracts/build.rs b/runtime/near-test-contracts/build.rs index e551f5ff07b..41e062e8bb6 100644 --- a/runtime/near-test-contracts/build.rs +++ b/runtime/near-test-contracts/build.rs @@ -1,14 +1,11 @@ -use std::path::Path; -use std::path::PathBuf; use std::process::Command; -use std::{env, fs, io, process}; type Error = Box; fn main() { if let Err(err) = try_main() { eprintln!("{}", err); - process::exit(1); + std::process::exit(1); } } @@ -39,14 +36,14 @@ fn build_contract(dir: &str, args: &[&str], output: &str) -> Result<(), Error> { let src = target_dir.join(format!("wasm32-unknown-unknown/release/{}.wasm", dir.replace('-', "_"))); - fs::copy(&src, format!("./res/{}.wasm", output)) + std::fs::copy(&src, format!("./res/{}.wasm", output)) .map_err(|err| format!("failed to copy `{}`: {}", src.display(), err))?; println!("cargo:rerun-if-changed=./{}/src/lib.rs", dir); println!("cargo:rerun-if-changed=./{}/Cargo.toml", dir); Ok(()) } -fn cargo_build_cmd(target_dir: &Path) -> Command { +fn cargo_build_cmd(target_dir: &std::path::Path) -> Command { let mut res = Command::new("cargo"); res.env_remove("CARGO_BUILD_RUSTFLAGS"); @@ -61,15 +58,18 @@ fn cargo_build_cmd(target_dir: &Path) -> Command { } fn check_status(mut cmd: Command) -> Result<(), Error> { - let status = cmd.status().map_err(|err| { - io::Error::new(io::ErrorKind::Other, format!("command `{:?}` failed to run: {}", cmd, err)) - })?; - if !status.success() { - return Err(format!("command `{:?}` exited with non-zero status: {:?}", cmd, status).into()); - } - Ok(()) + cmd.status() + .map_err(|err| format!("command `{cmd:?}` failed to run: {err}")) + .and_then(|status| { + if status.success() { + Ok(()) + } else { + Err(format!("command `{cmd:?}` exited with non-zero status: {status:?}")) + } + }) + .map_err(Error::from) } -fn out_dir() -> PathBuf { - env::var("OUT_DIR").unwrap().into() +fn out_dir() -> std::path::PathBuf { + std::env::var("OUT_DIR").unwrap().into() } From 65fa82cf566efcbe6148df626fa335774eedfbb0 Mon Sep 17 00:00:00 2001 From: Andrei <122784628+andrei-near@users.noreply.github.com> Date: Sat, 18 Nov 2023 10:14:15 +0000 Subject: [PATCH 29/30] GH metrics adjustments (#10209) Add GH_TOKEN environment. Required by the action even though the repo is public Report example: https://github.com/near/nearcore/issues/10208 --- .github/workflows/issue-metrics.yml | 9 +++++---- gh_metrics/.gitkeep | 0 2 files changed, 5 insertions(+), 4 deletions(-) delete mode 100644 gh_metrics/.gitkeep diff --git a/.github/workflows/issue-metrics.yml b/.github/workflows/issue-metrics.yml index 99e79c00307..1065168cc56 100644 --- a/.github/workflows/issue-metrics.yml +++ b/.github/workflows/issue-metrics.yml @@ -9,8 +9,8 @@ permissions: issues: write jobs: - build: - name: issue metrics + monthly-issue-metrics: + name: past month issue metrics runs-on: ubuntu-latest steps: - name: Get dates for last month @@ -30,11 +30,12 @@ jobs: - name: Run issue-metrics tool uses: github/issue-metrics@v2 env: - SEARCH_QUERY: 'repo:owner/repo is:issue created: ${{ steps.last-month.outputs.LAST_MONTH }} -reason:"not planned"' + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + SEARCH_QUERY: 'repo:near/nearcore is:issue created:${{ steps.last-month.outputs.LAST_MONTH }}' - name: Create issue uses: peter-evans/create-issue-from-file@v4 with: title: Monthly issue metrics report token: ${{ secrets.GITHUB_TOKEN }} - content-filepath: ./gh_metrics/issue_metrics.md + content-filepath: ./issue_metrics.md diff --git a/gh_metrics/.gitkeep b/gh_metrics/.gitkeep deleted file mode 100644 index e69de29bb2d..00000000000 From 4158229de53972631f27bb063e615491af2efdf8 Mon Sep 17 00:00:00 2001 From: Ekleog-NEAR <96595974+Ekleog-NEAR@users.noreply.github.com> Date: Mon, 20 Nov 2023 09:21:13 +0100 Subject: [PATCH 30/30] remove the currently omnipresent red mark (#10218) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Considering in four weeks of always seeing red on all PRs, we still have not fixed the nayduck tests… I’m suggesting we remove it so that we get the signal again, of whether a PR passes tests or not. Indeed, this failing means that we no longer see the actual value added by other tests, and especially the green checkmark that would otherwise be a good indicator that a PR is ready to be merged: we now need to manually check and scroll the statuses list in order to figure out whether a PR is actually ready for merge. Currently, this is a net negative in developer experience. I’m not questioning the nayduck problem, but the solution seemed wrong to me, and time has shown it does not actually works. We need another solution, like a round-robin of everyone having to look at nayduck once a week and figure out a way to make it green, even if it is by (temporarily) disabling tests (with a plan for re-enabling them). So, I’m petitioning to remove this check from PRs (but keep it on master CI), it would at least make the statu quo more livable for all the developers who have literally no idea how to even check which nayduck tests are failing, and whose code is unrelated to these tests anyway. --- .github/workflows/nightly_nayduck.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/nightly_nayduck.yml b/.github/workflows/nightly_nayduck.yml index 6ddba8a473d..f692428f16b 100644 --- a/.github/workflows/nightly_nayduck.yml +++ b/.github/workflows/nightly_nayduck.yml @@ -1,6 +1,5 @@ name: Nightly Nayduck tests check on: - pull_request: merge_group: jobs: