From 4db483c0483ed08eff43b1bcf0d4ffd3b198eefe Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 10 Aug 2024 16:27:17 -0400 Subject: [PATCH 01/48] Caplin announce MEV and Checkpoint (#1899) --- erigon/docker-entrypoint.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/erigon/docker-entrypoint.sh b/erigon/docker-entrypoint.sh index cb294ad4..35cc44ff 100755 --- a/erigon/docker-entrypoint.sh +++ b/erigon/docker-entrypoint.sh @@ -77,12 +77,14 @@ if [[ "${DOCKER_TAG}" =~ "v3" || "${DOCKER_TAG}" = "latest" ]]; then __caplin+=" --beacon.api.addr=0.0.0.0 --beacon.api.port=${CL_REST_PORT} --beacon.api.cors.allow-origins=*" if [ "${MEV_BOOST}" = "true" ]; then __caplin+=" --caplin.mev-relay-url=${MEV_NODE}" + echo "MEV Boost enabled" fi if [ "${ARCHIVE_NODE}" = "true" ]; then __caplin+=" --caplin.archive=true" fi if [ -n "${RAPID_SYNC_URL}" ]; then __caplin+=" --caplin.checkpoint-sync-url=${RAPID_SYNC_URL}" + echo "Checkpoint sync enabled" else __caplin+=" --caplin.checkpoint-sync.disable=true" fi From af7a45083e36de6804d8ef956a62173bac5f6098 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 10 Aug 2024 16:50:40 -0400 Subject: [PATCH 02/48] Version 2.12.1.0 (#1900) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f51d5c79..b28ba0a4 100644 --- a/README.md +++ b/README.md @@ -34,4 +34,4 @@ Eth Docker uses a "semver-ish" scheme. large. - Second through fourth digit, [semver](https://semver.org/). -This is Eth Docker v2.12.0.0 +This is Eth Docker v2.12.1.0 From 292f698d5bf8c00625f5d6d0224e826152ca70cb Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 10 Aug 2024 20:09:19 -0400 Subject: [PATCH 03/48] Build Teku and Besu on Noble Numbat (#1901) --- besu/Dockerfile.source | 5 +++-- default.env | 2 +- teku/Dockerfile.source | 5 +++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/besu/Dockerfile.source b/besu/Dockerfile.source index 054236d3..34f0768f 100644 --- a/besu/Dockerfile.source +++ b/besu/Dockerfile.source @@ -1,5 +1,5 @@ # Build Besu in a stock Ubuntu container -FROM eclipse-temurin:21-jdk-jammy AS builder +FROM eclipse-temurin:21-jdk-noble AS builder # This is here to avoid build-time complaints ARG DOCKER_TAG @@ -14,7 +14,7 @@ WORKDIR /usr/src RUN bash -c "git clone --recurse-submodules -j8 ${SRC_REPO} besu && cd besu && git config advice.detachedHead false && git fetch --all --tags && if [[ ${BUILD_TARGET} =~ pr-.+ ]]; then git fetch origin pull/$(echo ${BUILD_TARGET} | cut -d '-' -f 2)/head:besu-pr; git checkout besu-pr; else git checkout ${BUILD_TARGET}; fi && ./gradlew installDist" # Pull all binaries into a second stage deploy Ubuntu container -FROM eclipse-temurin:21-jre-jammy +FROM eclipse-temurin:21-jre-noble ARG USER=besu ARG UID=10001 @@ -28,6 +28,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install libjemalloc-dev \ git \ wget \ + adduser \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/default.env b/default.env index d1653952..58c9ce8f 100644 --- a/default.env +++ b/default.env @@ -301,7 +301,7 @@ DEPCLI_SRC_REPO=https://github.com/ethereum/staking-deposit-cli DEPCLI_DOCKER_TAG=nonesuch # traefik and ddns-updater -TRAEFIK_TAG=v3.0 +TRAEFIK_TAG=v3.1 DDNS_TAG=v2 # For the Node Dashboard, define a regex of mount points to ignore for the diskspace check. diff --git a/teku/Dockerfile.source b/teku/Dockerfile.source index 073105b3..23de739e 100644 --- a/teku/Dockerfile.source +++ b/teku/Dockerfile.source @@ -1,5 +1,5 @@ # Build Teku in a stock Ubuntu container -FROM eclipse-temurin:21-jdk-jammy AS builder +FROM eclipse-temurin:21-jdk-noble AS builder # This is here to avoid build-time complaints ARG DOCKER_TAG @@ -21,7 +21,7 @@ RUN bash -c "git clone ${SRC_REPO} teku \ && ./gradlew installDist" # Pull all binaries into a second stage deploy Ubuntu container -FROM eclipse-temurin:21-jre-jammy +FROM eclipse-temurin:21-jre-noble ARG USER=teku ARG UID=10002 @@ -30,6 +30,7 @@ RUN apt-get update && DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get install ca-certificates \ tzdata \ git \ + adduser \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* From ffbe1933b45d3be3bea343f26a5b1eab3626ca4f Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sun, 11 Aug 2024 14:17:19 -0400 Subject: [PATCH 04/48] OCD eth1 rename (#1902) --- besu.yml | 6 ++--- besu/Dockerfile.binary | 1 + besu/Dockerfile.source | 1 + besu/docker-entrypoint.sh | 11 ++++++++-- ethd | 29 ++++++++++++++++++++---- geth.yml | 8 +++---- geth/Dockerfile.binary | 3 ++- geth/Dockerfile.source | 3 ++- geth/docker-entrypoint.sh | 39 +++++++++++++++++++-------------- nethermind.yml | 6 ++--- nethermind/Dockerfile.binary | 2 ++ nethermind/Dockerfile.source | 2 ++ nethermind/docker-entrypoint.sh | 9 +++++++- 13 files changed, 85 insertions(+), 35 deletions(-) diff --git a/besu.yml b/besu.yml index a7cd8956..6cb6db41 100644 --- a/besu.yml +++ b/besu.yml @@ -28,7 +28,8 @@ services: - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} volumes: - - besu-eth1-data:/var/lib/besu + - besu-el-data:/var/lib/besu + - besu-eth1-data:/var/lib/besu-og - /etc/localtime:/etc/localtime:ro - jwtsecret:/var/lib/besu/ee-secret ports: @@ -43,8 +44,6 @@ services: entrypoint: - docker-entrypoint.sh - /opt/besu/bin/besu - - --data-path - - /var/lib/besu - --p2p-port - ${EL_P2P_PORT:-30303} - --rpc-http-enabled @@ -92,6 +91,7 @@ services: command: /bin/sh volumes: + besu-el-data: besu-eth1-data: jwtsecret: diff --git a/besu/Dockerfile.binary b/besu/Dockerfile.binary index ff024867..113c2f86 100644 --- a/besu/Dockerfile.binary +++ b/besu/Dockerfile.binary @@ -23,6 +23,7 @@ RUN set -eux; \ gosu nobody true # Create data mount point with permissions +RUN mkdir -p /var/lib/besu-og && chown -R ${USER}:${USER} /var/lib/besu-og && chmod -R 700 /var/lib/besu-og RUN mkdir -p /var/lib/besu/ee-secret && chown -R ${USER}:${USER} /var/lib/besu && chmod -R 700 /var/lib/besu && chmod 777 /var/lib/besu/ee-secret # Cannot assume buildkit, hence no chmod diff --git a/besu/Dockerfile.source b/besu/Dockerfile.source index 34f0768f..9ca521c4 100644 --- a/besu/Dockerfile.source +++ b/besu/Dockerfile.source @@ -47,6 +47,7 @@ RUN adduser \ --ingroup "${USER}" \ "${USER}" +RUN mkdir -p /var/lib/besu-og && chown -R ${USER}:${USER} /var/lib/besu-og && chmod -R 700 /var/lib/besu-og RUN mkdir -p /var/lib/besu/ee-secret && chown -R besu:besu /var/lib/besu && chmod -R 700 /var/lib/besu && chmod 777 /var/lib/besu/ee-secret # Cannot assume buildkit, hence no chmod diff --git a/besu/docker-entrypoint.sh b/besu/docker-entrypoint.sh index aa5ec239..e4a131be 100755 --- a/besu/docker-entrypoint.sh +++ b/besu/docker-entrypoint.sh @@ -65,6 +65,13 @@ else __spec="" fi +# New or old datadir +if [ -d /var/lib/besu-og/database ]; then + __datadir="--data-path /var/lib/besu-og" +else + __datadir="--data-path /var/lib/besu" +fi + if [ -f /var/lib/besu/prune-marker ]; then rm -f /var/lib/besu/prune-marker if [ "${ARCHIVE_NODE}" = "true" ]; then @@ -73,9 +80,9 @@ if [ -f /var/lib/besu/prune-marker ]; then fi # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__prune} ${EL_EXTRAS} storage trie-log prune + exec "$@" ${__datadir} ${__network} ${__prune} ${EL_EXTRAS} storage trie-log prune else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__prune} ${__spec} ${EL_EXTRAS} + exec "$@" ${__datadir} ${__network} ${__prune} ${__spec} ${EL_EXTRAS} fi diff --git a/ethd b/ethd index 61fbdde3..cb610956 100755 --- a/ethd +++ b/ethd @@ -1361,10 +1361,10 @@ resync-execution() { case "${value}" in *erigon.yml* ) __el_volume='erigon-el-data'; __el_client="erigon";; - *geth.yml* ) __el_volume='geth-eth1-data'; __el_client="geth";; + *geth.yml* ) __el_volume='geth-el-data'; __el_client="geth";; *reth.yml* ) __el_volume='reth-el-data'; __el_client="reth";; - *besu.yml* ) __el_volume='besu-eth1-data'; __el_client="besu";; - *nethermind.yml* ) __el_volume='nm-eth1-data'; __el_client="nethermind";; + *besu.yml* ) __el_volume='besu-el-data'; __el_client="besu";; + *nethermind.yml* ) __el_volume='nethermind-el-data'; __el_client="nethermind";; * ) echo "You do not appear to be running an execution layer client. Nothing to do."; return 0;; esac @@ -1384,6 +1384,20 @@ resync-execution() { echo "Stopping ${__el_client} container" docompose stop execution && docompose rm -f execution dodocker volume rm "$(dodocker volume ls -q -f "name=${__el_volume}")" + __volume_id="" + if [[ "${__el_volume}" =~ geth-el-data ]]; then + __legacy_volume="$(basename "$(realpath .)")_geth-eth1-data" + __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + elif [[ "${__el_volume}" =~ besu-el-data ]]; then + __legacy_volume="$(basename "$(realpath .)")_besu-eth1-data" + __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + elif [[ "${__el_volume}" =~ nethermind-el-data ]]; then + __legacy_volume="$(basename "$(realpath .)")_nm-eth1-data" + __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + fi + if [ -n "${__volume_id}" ]; then + dodocker volume rm "${__volume_id}" + fi echo echo "${__el_client} stopped and database deleted." echo @@ -1467,8 +1481,15 @@ attach-geth() { echo "You do not appear to be using Geth, aborting." exit 1 fi + __legacy_datadir=$(dodocker run --rm -v "$(dodocker volume ls -q -f \ + "name=$(basename "$(realpath .)")[_-]geth-eth1-data")":"/var/lib/goethereum" \ + alpine:3 sh -c 'if [ -d "/var/lib/goethereum/geth/chaindata" ]; then echo true; else echo false; fi') - docompose exec -it execution bash -c "geth attach /var/lib/goethereum/geth.ipc" + if [ "${__legacy_datadir}" = "true" ]; then + docompose exec -it execution bash -c "geth attach /var/lib/goethereum/geth.ipc" + else + docompose exec -it execution bash -c "geth attach /var/lib/geth/geth.ipc" + fi } diff --git a/geth.yml b/geth.yml index 638f8b1c..b0c7ad5d 100644 --- a/geth.yml +++ b/geth.yml @@ -31,9 +31,10 @@ services: - ANCIENT_DIR=${ANCIENT_DIR:-} volumes: - geth-eth1-data:/var/lib/goethereum + - geth-el-data:/var/lib/geth - ${ANCIENT_DIR:-.nada}:/var/lib/ancient - /etc/localtime:/etc/localtime:ro - - jwtsecret:/var/lib/goethereum/ee-secret + - jwtsecret:/var/lib/geth/ee-secret ports: - ${HOST_IP:-}:${EL_P2P_PORT:-30303}:${EL_P2P_PORT:-30303}/tcp - ${HOST_IP:-}:${EL_P2P_PORT:-30303}:${EL_P2P_PORT:-30303}/udp @@ -51,8 +52,6 @@ services: - 0.0.0.0 - --http.vhosts=* - --http.corsdomain=* - - --datadir - - /var/lib/goethereum - --port - ${EL_P2P_PORT:-30303} - --http.port @@ -68,7 +67,7 @@ services: - --pprof.addr - 0.0.0.0 - --authrpc.jwtsecret - - /var/lib/goethereum/ee-secret/jwtsecret + - /var/lib/geth/ee-secret/jwtsecret - --authrpc.addr - 0.0.0.0 - --authrpc.port @@ -84,6 +83,7 @@ services: - metrics.network=${NETWORK} volumes: + geth-el-data: geth-eth1-data: jwtsecret: diff --git a/geth/Dockerfile.binary b/geth/Dockerfile.binary index 9d2263e2..a30f82a3 100644 --- a/geth/Dockerfile.binary +++ b/geth/Dockerfile.binary @@ -27,7 +27,8 @@ RUN adduser \ --ingroup "${USER}" \ "${USER}" -RUN mkdir -p /var/lib/goethereum/ee-secret && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum && chmod 777 /var/lib/goethereum/ee-secret +RUN mkdir -p /var/lib/goethereum && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum +RUN mkdir -p /var/lib/geth/ee-secret && chown -R ${USER}:${USER} /var/lib/geth && chmod -R 700 /var/lib/geth && chmod 777 /var/lib/geth/ee-secret # Cannot assume buildkit, hence no chmod COPY --chown=${USER}:${USER} ./docker-entrypoint.sh /usr/local/bin/ diff --git a/geth/Dockerfile.source b/geth/Dockerfile.source index a9ead3c3..c9d96960 100644 --- a/geth/Dockerfile.source +++ b/geth/Dockerfile.source @@ -38,7 +38,8 @@ RUN adduser \ --ingroup "${USER}" \ "${USER}" -RUN mkdir -p /var/lib/goethereum/ee-secret && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum && chmod 777 /var/lib/goethereum/ee-secret +RUN mkdir -p /var/lib/goethereum && chown -R ${USER}:${USER} /var/lib/goethereum && chmod -R 700 /var/lib/goethereum +RUN mkdir -p /var/lib/geth/ee-secret && chown -R ${USER}:${USER} /var/lib/geth && chmod -R 700 /var/lib/geth && chmod 777 /var/lib/geth/ee-secret # Cannot assume buildkit, hence no chmod COPY --from=builder --chown=${USER}:${USER} /src/go-ethereum/build/bin/geth /usr/local/bin/ diff --git a/geth/docker-entrypoint.sh b/geth/docker-entrypoint.sh index 1238801c..d7410a4b 100755 --- a/geth/docker-entrypoint.sh +++ b/geth/docker-entrypoint.sh @@ -2,28 +2,28 @@ set -euo pipefail if [ "$(id -u)" = '0' ]; then - chown -R geth:geth /var/lib/goethereum + chown -R geth:geth /var/lib/geth exec su-exec geth docker-entrypoint.sh "$@" fi if [ -n "${JWT_SECRET}" ]; then - echo -n "${JWT_SECRET}" > /var/lib/goethereum/ee-secret/jwtsecret + echo -n "${JWT_SECRET}" > /var/lib/geth/ee-secret/jwtsecret echo "JWT secret was supplied in .env" fi -if [[ ! -f /var/lib/goethereum/ee-secret/jwtsecret ]]; then +if [[ ! -f /var/lib/geth/ee-secret/jwtsecret ]]; then echo "Generating JWT secret" __secret1=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) __secret2=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) - echo -n "${__secret1}""${__secret2}" > /var/lib/goethereum/ee-secret/jwtsecret + echo -n "${__secret1}""${__secret2}" > /var/lib/geth/ee-secret/jwtsecret fi -if [[ -O "/var/lib/goethereum/ee-secret" ]]; then +if [[ -O "/var/lib/geth/ee-secret" ]]; then # In case someone specifies JWT_SECRET but it's not a distributed setup - chmod 777 /var/lib/goethereum/ee-secret + chmod 777 /var/lib/geth/ee-secret fi -if [[ -O "/var/lib/goethereum/ee-secret/jwtsecret" ]]; then - chmod 666 /var/lib/goethereum/ee-secret/jwtsecret +if [[ -O "/var/lib/geth/ee-secret/jwtsecret" ]]; then + chmod 666 /var/lib/geth/ee-secret/jwtsecret fi __ancient="" @@ -41,26 +41,33 @@ if [[ "${NETWORK}" =~ ^https?:// ]]; then echo "This appears to be the ${repo} repo, branch ${branch} and config directory ${config_dir}." # For want of something more amazing, let's just fail if git fails to pull this set -e - if [ ! -d "/var/lib/goethereum/testnet/${config_dir}" ]; then - mkdir -p /var/lib/goethereum/testnet - cd /var/lib/goethereum/testnet + if [ ! -d "/var/lib/geth/testnet/${config_dir}" ]; then + mkdir -p /var/lib/geth/testnet + cd /var/lib/geth/testnet git init --initial-branch="${branch}" git remote add origin "${repo}" git config core.sparseCheckout true echo "${config_dir}" > .git/info/sparse-checkout git pull origin "${branch}" fi - bootnodes="$(paste -s -d, "/var/lib/goethereum/testnet/${config_dir}/bootnode.txt")" - networkid="$(jq -r '.config.chainId' "/var/lib/goethereum/testnet/${config_dir}/genesis.json")" + bootnodes="$(paste -s -d, "/var/lib/geth/testnet/${config_dir}/bootnode.txt")" + networkid="$(jq -r '.config.chainId' "/var/lib/geth/testnet/${config_dir}/genesis.json")" set +e __network="--bootnodes=${bootnodes} --networkid=${networkid} --http.api=eth,net,web3,debug,admin,txpool" - if [ ! -d "/var/lib/goethereum/geth/chaindata/" ]; then - geth init --state.scheme path --datadir /var/lib/goethereum "/var/lib/goethereum/testnet/${config_dir}/genesis.json" + if [ ! -d "/var/lib/geth/geth/chaindata/" ]; then + geth init --datadir /var/lib/geth "/var/lib/geth/testnet/${config_dir}/genesis.json" fi else __network="--${NETWORK}" fi +# New or old datadir +if [ -d /var/lib/goethereum/geth/chaindata ]; then + __datadir="--datadir /var/lib/goethereum" +else + __datadir="--datadir /var/lib/geth" +fi + # Set verbosity shopt -s nocasematch case ${LOG_LEVEL} in @@ -101,4 +108,4 @@ fi # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 -exec "$@" ${__ancient} ${__ipv6} ${__network} ${__prune} ${__verbosity} ${EL_EXTRAS} +exec "$@" ${__datadir} ${__ancient} ${__ipv6} ${__network} ${__prune} ${__verbosity} ${EL_EXTRAS} diff --git a/nethermind.yml b/nethermind.yml index f85d92b7..0a233b2c 100644 --- a/nethermind.yml +++ b/nethermind.yml @@ -29,7 +29,8 @@ services: - AUTOPRUNE_NM=${AUTOPRUNE_NM:-true} - NETWORK=${NETWORK} volumes: - - nm-eth1-data:/var/lib/nethermind + - nethermind-el-data:/var/lib/nethermind + - nm-eth1-data:/var/lib/nethermind-og - /etc/localtime:/etc/localtime:ro - jwtsecret:/var/lib/nethermind/ee-secret ports: @@ -44,8 +45,6 @@ services: entrypoint: - docker-entrypoint.sh - /nethermind/nethermind - - --datadir - - /var/lib/nethermind - --Init.WebSocketsEnabled - "true" - --Network.DiscoveryPort @@ -92,6 +91,7 @@ services: - metrics.network=${NETWORK} volumes: + nethermind-el-data: nm-eth1-data: jwtsecret: diff --git a/nethermind/Dockerfile.binary b/nethermind/Dockerfile.binary index 24bad15d..a2724458 100644 --- a/nethermind/Dockerfile.binary +++ b/nethermind/Dockerfile.binary @@ -36,6 +36,8 @@ RUN adduser \ # This only goes so far. keystore, logs and nethermind_db are volumes and need to be chown'd in the entrypoint RUN chown -R ${USER}:${USER} /nethermind +RUN mkdir -p /var/lib/nethermind-og && chown -R ${USER}:${USER} /var/lib/nethermind-og \ +&& chmod -R 700 /var/lib/nethermind-og RUN mkdir -p /var/lib/nethermind/ee-secret && chown -R ${USER}:${USER} /var/lib/nethermind \ && chmod -R 700 /var/lib/nethermind && chmod 777 /var/lib/nethermind/ee-secret diff --git a/nethermind/Dockerfile.source b/nethermind/Dockerfile.source index d86f92be..c27891c8 100644 --- a/nethermind/Dockerfile.source +++ b/nethermind/Dockerfile.source @@ -49,6 +49,8 @@ WORKDIR /nethermind COPY --from=builder --chown=${USER}:${USER} /nethermind/out . RUN chown -R ${USER}:${USER} /nethermind +RUN mkdir -p /var/lib/nethermind-og && chown -R ${USER}:${USER} /var/lib/nethermind-og \ +&& chmod -R 700 /var/lib/nethermind-og RUN mkdir -p /var/lib/nethermind/ee-secret && chown -R ${USER}:${USER} /var/lib/nethermind \ && chmod -R 700 /var/lib/nethermind && chmod 777 /var/lib/nethermind/ee-secret diff --git a/nethermind/docker-entrypoint.sh b/nethermind/docker-entrypoint.sh index a8bf6533..cea716ba 100755 --- a/nethermind/docker-entrypoint.sh +++ b/nethermind/docker-entrypoint.sh @@ -82,6 +82,13 @@ else echo "${__prune}" fi +# New or old datadir +if [ -d /var/lib/nethermind-og/nethermind_db ]; then + __datadir="--datadir /var/lib/nethermind-og" +else + __datadir="--datadir /var/lib/nethermind" +fi + # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 -exec "$@" ${__network} ${__prune} ${EL_EXTRAS} +exec "$@" ${__datadir} ${__network} ${__prune} ${EL_EXTRAS} From 7e9e8fa4b334b81185f5fcdc8ff33117baa6e7f1 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 12 Aug 2024 08:47:48 -0400 Subject: [PATCH 05/48] Besu rely on defaults for sync mode and rpc API (#1904) --- besu/docker-entrypoint.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/besu/docker-entrypoint.sh b/besu/docker-entrypoint.sh index e4a131be..5fa93b13 100755 --- a/besu/docker-entrypoint.sh +++ b/besu/docker-entrypoint.sh @@ -48,14 +48,14 @@ if [[ "${NETWORK}" =~ ^https?:// ]]; then __network="--genesis-file=/var/lib/besu/testnet/${config_dir}/besu.json --bootnodes=${bootnodes} \ --Xfilter-on-enr-fork-id=true --rpc-http-api=ADMIN,CLIQUE,MINER,ETH,NET,DEBUG,TXPOOL,ENGINE,TRACE,WEB3" else - __network="--network ${NETWORK} --rpc-http-api WEB3,ETH,NET" + __network="--network ${NETWORK}" fi if [ "${ARCHIVE_NODE}" = "true" ]; then echo "Besu archive node without pruning" __prune="--data-storage-format=FOREST --sync-mode=FULL" else - __prune="--data-storage-format=BONSAI --sync-mode=SNAP" + __prune="" fi __memtotal=$(awk '/MemTotal/ {printf "%d", int($2/1024/1024)}' /proc/meminfo) From 374df8d954dc863ff7dd342c04fdeeb1a7f035e7 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 12 Aug 2024 09:23:16 -0400 Subject: [PATCH 06/48] Change selection order during config (#1905) --- ethd | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/ethd b/ethd index cb610956..3eb0344f 100755 --- a/ethd +++ b/ethd @@ -2597,8 +2597,8 @@ query_consensus_client() { if [ "${NETWORK}" = "gnosis" ]; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ - "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ "teku.yml" "Teku (Java) - consensus and validator client" \ + "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ "lodestar.yml" "Lodestar (Javascript) - consensus and validator client" \ "nimbus.yml" "Nimbus (Nim) - consensus and validator client" 3>&1 1>&2 2>&3) elif uname -m | grep -q aarch64 || uname -m | grep -q arm64; then @@ -2607,8 +2607,9 @@ query_consensus_client() { "nimbus.yml" "Nimbus (Nim) - consensus and validator client" \ "grandine-allin1.yml" "Grandine (Rust) - consensus with built-in validator client" \ "lodestar.yml" "Lodestar (Javascript) - consensus and validator client" \ + "teku.yml" "Teku (Java) - consensus and validator client" \ "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ - "teku.yml" "Teku (Java) - consensus and validator client" 3>&1 1>&2 2>&3) + 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ @@ -2618,10 +2619,11 @@ query_consensus_client() { "Which consensus client do you want to run?" 13 65 6 \ "teku.yml" "Teku (Java) - consensus and validator client" \ "grandine-allin1.yml" "Grandine (Rust) - consensus with built-in validator client" \ - "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ - "nimbus.yml" "Nimbus (Nim) - consensus and validator client" \ "lodestar.yml" "Lodestar (Javascript) - consensus and validator client" \ - "prysm.yml" "Prysm (Go) - consensus and validator client" 3>&1 1>&2 2>&3) + "nimbus.yml" "Nimbus (Nim) - consensus and validator client" \ + "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ + "prysm.yml" "Prysm (Go) - consensus and validator client" \ + 3>&1 1>&2 2>&3) fi echo "Your consensus client file is:" "${CONSENSUS_CLIENT}" @@ -2725,8 +2727,8 @@ query_execution_client() { "reth.yml" "Reth (Rust)" \ "besu.yml" "Besu (Java)" \ "nethermind.yml" "Nethermind (.NET)" \ - "geth.yml" "Geth (Go)" \ "erigon.yml" "Erigon (Go)" \ + "geth.yml" "Geth (Go)" \ "NONE" "Custom - Distributed" 3>&1 1>&2 2>&3) fi From 8da5f91cbc6d1146458277b6e1342c2c0f847f4e Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 14 Aug 2024 08:01:12 -0400 Subject: [PATCH 07/48] Erigon default to v2.60.6 (#1906) --- default.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/default.env b/default.env index 58c9ce8f..94082ae2 100644 --- a/default.env +++ b/default.env @@ -257,7 +257,7 @@ BESU_DOCKERFILE=Dockerfile.binary # SRC build target can be a tag, a branch, or a pr as "pr-ID" ERIGON_SRC_BUILD_TARGET='$(git describe --tags $(git rev-list --tags --max-count=1))' ERIGON_SRC_REPO=https://github.com/ledgerwatch/erigon -ERIGON_DOCKER_TAG=v2.60.4 +ERIGON_DOCKER_TAG=v2.60.6 ERIGON_DOCKER_REPO=thorax/erigon ERIGON_DOCKERFILE=Dockerfile.binary From bc0df4794dbc014fed5410c032ed175141dea1bc Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 14 Aug 2024 08:28:23 -0400 Subject: [PATCH 08/48] Use Go 1.23 for source builds (#1907) --- default.env | 2 +- erigon/Dockerfile.source | 2 +- flashbots/Dockerfile.source | 2 +- geth/Dockerfile.source | 2 +- prysm/Dockerfile.source | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/default.env b/default.env index 94082ae2..5df787ca 100644 --- a/default.env +++ b/default.env @@ -163,7 +163,7 @@ MEV_NODE=http://mev-boost:18550 # You can set specific version targets and choose binary or compiled from source builds below, # via "Dockerfile.binary" or "Dockerfile.source" -# These settings are only migrated when running "./ethd update --keep-targets" +# These settings can be reset to defaults with "./ethd update --refresh-targets" # The default source build targets build from the latest github tag # Eth Docker updates its code to latest by default. diff --git a/erigon/Dockerfile.source b/erigon/Dockerfile.source index 87fbd92b..4abb9b64 100644 --- a/erigon/Dockerfile.source +++ b/erigon/Dockerfile.source @@ -1,5 +1,5 @@ # Build Erigon in a stock Go build container -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder # Unused, this is here to avoid build time complaints ARG DOCKER_TAG diff --git a/flashbots/Dockerfile.source b/flashbots/Dockerfile.source index 5bce47be..653e596c 100644 --- a/flashbots/Dockerfile.source +++ b/flashbots/Dockerfile.source @@ -1,5 +1,5 @@ # Build in a stock Go build container -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder # Unused, this is here to avoid build time complaints ARG DOCKER_TAG diff --git a/geth/Dockerfile.source b/geth/Dockerfile.source index c9d96960..69f55a81 100644 --- a/geth/Dockerfile.source +++ b/geth/Dockerfile.source @@ -1,5 +1,5 @@ # Build Geth in a stock Go build container -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder # Unused, this is here to avoid build time complaints ARG DOCKER_TAG diff --git a/prysm/Dockerfile.source b/prysm/Dockerfile.source index 41c05663..11ce138b 100644 --- a/prysm/Dockerfile.source +++ b/prysm/Dockerfile.source @@ -1,5 +1,5 @@ # Build Prysm in a stock Go build container -FROM golang:1.22-bookworm AS builder +FROM golang:1.23-bookworm AS builder # Here only to avoid build-time errors ARG DOCKER_TAG From fef169fc168bcf1859b325e769a116a8d5089a3c Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 14 Aug 2024 11:38:16 -0400 Subject: [PATCH 09/48] Ethereum Metrics Exporter 0.24.0 (#1908) --- central-metrics.yml | 2 +- grafana-cloud.yml | 2 +- grafana-rootless.yml | 2 +- grafana.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/central-metrics.yml b/central-metrics.yml index fee0bf0b..17857f58 100644 --- a/central-metrics.yml +++ b/central-metrics.yml @@ -10,7 +10,7 @@ x-logging: &logging services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} diff --git a/grafana-cloud.yml b/grafana-cloud.yml index 97f78b45..4925f24b 100644 --- a/grafana-cloud.yml +++ b/grafana-cloud.yml @@ -43,7 +43,7 @@ services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} diff --git a/grafana-rootless.yml b/grafana-rootless.yml index 4fc2f2c9..18408291 100644 --- a/grafana-rootless.yml +++ b/grafana-rootless.yml @@ -34,7 +34,7 @@ services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} diff --git a/grafana.yml b/grafana.yml index a7a8bab4..5a1fc9fe 100644 --- a/grafana.yml +++ b/grafana.yml @@ -35,7 +35,7 @@ services: ethereum-metrics-exporter: restart: "unless-stopped" - image: samcm/ethereum-metrics-exporter:0.23.0-debian + image: samcm/ethereum-metrics-exporter:0.24.0-debian entrypoint: - /ethereum-metrics-exporter - --consensus-url=${CL_NODE} From 186be0410e0c08057f3e765377becac9d24dbcec Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 14 Aug 2024 12:04:52 -0400 Subject: [PATCH 10/48] Support IPv6 for traefik (#1909) --- traefik-aws.yml | 4 ++++ traefik-cf.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/traefik-aws.yml b/traefik-aws.yml index 39cf51b5..4154491a 100644 --- a/traefik-aws.yml +++ b/traefik-aws.yml @@ -56,3 +56,7 @@ services: volumes: certs: + +networks: + default: + enable_ipv6: ${IPV6:-false} diff --git a/traefik-cf.yml b/traefik-cf.yml index e4066e56..c3338541 100644 --- a/traefik-cf.yml +++ b/traefik-cf.yml @@ -69,3 +69,7 @@ services: volumes: certs: + +networks: + default: + enable_ipv6: ${IPV6:-false} From c6320f5f572fbe103ef1c3d54e0cbfd8ed87733d Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 14 Aug 2024 16:01:59 -0400 Subject: [PATCH 11/48] Clarify the OS nag (#1910) --- ethd | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ethd b/ethd index 3eb0344f..b3f6f8e9 100755 --- a/ethd +++ b/ethd @@ -1163,6 +1163,7 @@ nag_os_version() { echo echo "Ubuntu ${__os_major_version} is older than the recommended 24.04 or 22.04 version." echo + echo "Updating is neither urgent nor required, merely recommended." fi fi @@ -1171,6 +1172,7 @@ nag_os_version() { echo echo "Debian ${__os_major_version} is older than the recommended 12 or 11 version." echo + echo "Updating is neither urgent nor required, merely recommended." fi fi } From 3066819a7d476341d99f299c72738db35a837776 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 14 Aug 2024 16:06:56 -0400 Subject: [PATCH 12/48] Link to OS upgrade guides (#1911) --- ethd | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ethd b/ethd index b3f6f8e9..0d427d5b 100755 --- a/ethd +++ b/ethd @@ -1164,6 +1164,8 @@ nag_os_version() { echo "Ubuntu ${__os_major_version} is older than the recommended 24.04 or 22.04 version." echo echo "Updating is neither urgent nor required, merely recommended." + echo + echo "Guide to upgrading to 24.04: https://gist.github.com/yorickdowne/94f1e5538007f4c9d3da7b22b0dc28a4" fi fi @@ -1173,6 +1175,8 @@ nag_os_version() { echo "Debian ${__os_major_version} is older than the recommended 12 or 11 version." echo echo "Updating is neither urgent nor required, merely recommended." + echo + echo "Guide to upgrading to 12: https://gist.github.com/yorickdowne/ec9e2c6f4f8a2ee93193469d285cd54c" fi fi } From 498774e0cb46769e0c083a3018c62ef46e72e54c Mon Sep 17 00:00:00 2001 From: Jacob Shufro <116244+jshufro@users.noreply.github.com> Date: Wed, 28 Aug 2024 13:40:22 -0400 Subject: [PATCH 13/48] Fix dkg_output mod update asking for sudo when not needed (#1915) --- ethd | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ethd b/ethd index 0d427d5b..db8092ab 100755 --- a/ethd +++ b/ethd @@ -203,9 +203,12 @@ prep_conffiles() { ${__as_owner} cp ssv-config/dkg-config-sample.yaml ssv-config/dkg-config.yaml fi # Make sure local user owns the dkg output dir and everything in it - if find .eth/dkg_output \! -user "${OWNER}" -o \! -group "${OWNER_GROUP}" -o \! -perm 755 | grep -q .; then + if find .eth/dkg_output \! -user "${OWNER}" -o \! -group "${OWNER_GROUP}" | grep -q .; then ${__auto_sudo} chown -R "${OWNER}:${OWNER_GROUP}" .eth/dkg_output - ${__auto_sudo} chmod -R 755 .eth/dkg_output + fi +# Make sure the dkg output dir and its contents are mod 0755 + if find .eth/dkg_output \! -perm 755 | grep -q .; then + chmod -R 755 .eth/dkg_output fi # Create ext-network.yml if it doesn't exist if [ ! -f "ext-network.yml" ]; then From 3ef43937080dcd10cea7d9c9cf4d4dbbc1a5e6cc Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 28 Aug 2024 13:49:19 -0400 Subject: [PATCH 14/48] Updated SSV dashboards (#1914) * Updated SSV dashboards * Detect if sudo is not available --- ethd | 51 ++++++++++++++++++++++++++++++++++++-------- grafana/provision.sh | 31 ++++++++++++++++----------- 2 files changed, 60 insertions(+), 22 deletions(-) diff --git a/ethd b/ethd index db8092ab..f68103f8 100755 --- a/ethd +++ b/ethd @@ -43,15 +43,29 @@ determine_distro() { __distro=$(echo "$__distro" | tr "[:upper:]" "[:lower:]") if [[ "$__distro" = "ubuntu" ]]; then - if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then - ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + if [ "$__cannot_sudo" -eq 0 ]; then + if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then + echo "Installing lsb-release" + ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + fi + fi + if [ -n "$(command -v lsb_release 2>/dev/null)" ]; then + __os_major_version=$(lsb_release -r | cut -d: -f2 | sed s/'^\t'// | cut -d. -f1) + else + __os_major_version=24 # Without sudo and lsb_release let's just skip the check fi - __os_major_version=$(lsb_release -r | cut -d: -f2 | sed s/'^\t'// | cut -d. -f1) elif [[ "$__distro" =~ "debian" ]]; then - if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then - ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + if [ "$__cannot_sudo" -eq 0 ]; then + if ! dpkg-query -W -f='${Status}' lsb-release 2>/dev/null | grep -q "ok installed"; then + echo "Installing lsb-release" + ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get -y install lsb-release + fi + fi + if [ -n "$(command -v lsb_release 2>/dev/null)" ]; then + __os_major_version=$(lsb_release -r | cut -f2) + else + __os_major_version=12 # Without sudo and lsb_release let's just skip the check fi - __os_major_version=$(lsb_release -r | cut -f2) fi } @@ -80,6 +94,10 @@ handle_docker_sudo() { fi __docker_sudo="" if ! docker images >/dev/null 2>&1; then + if [ "$__cannot_sudo" -eq 1 ]; then + echo "Cannot call Docker and cannot use sudo. Please make your user part of the docker group" + exit 1 + fi echo "Will use sudo to access Docker" __docker_sudo="sudo" fi @@ -87,12 +105,18 @@ handle_docker_sudo() { handle_root() { + __cannot_sudo=0 if [ "${EUID}" -eq 0 ]; then __as_owner="sudo -u ${OWNER}" __auto_sudo="" else __as_owner="" - __auto_sudo="sudo" + if groups | grep -q '\bsudo\b' || groups | grep -q '\badmin\b'; then + __auto_sudo="sudo" + else + __auto_sudo="" + __cannot_sudo=1 + fi fi } @@ -204,7 +228,13 @@ prep_conffiles() { fi # Make sure local user owns the dkg output dir and everything in it if find .eth/dkg_output \! -user "${OWNER}" -o \! -group "${OWNER_GROUP}" | grep -q .; then - ${__auto_sudo} chown -R "${OWNER}:${OWNER_GROUP}" .eth/dkg_output + if [ "$__cannot_sudo" -eq 0 ]; then + echo "Fixing ownership of .eth/dkg_output" + ${__auto_sudo} chown -R "${OWNER}:${OWNER_GROUP}" .eth/dkg_output + ${__auto_sudo} chmod -R 755 .eth/dkg_output + else + echo "Ownership of .eth/dkg_output should be fixed, but this user can't sudo" + fi fi # Make sure the dkg output dir and its contents are mod 0755 if find .eth/dkg_output \! -perm 755 | grep -q .; then @@ -251,7 +281,10 @@ install-bash-completions() { install() { - + if [ "$__cannot_sudo" -eq 1 ]; then + echo "The install command requires the user to be part of the sudo group, or on macOS the admin group" + exit 1 + fi if [[ "$__distro" = "ubuntu" ]]; then ${__auto_sudo} apt-get update ${__auto_sudo} apt-get install -y ca-certificates curl gnupg whiptail chrony pkg-config diff --git a/grafana/provision.sh b/grafana/provision.sh index 65be2f84..7dc085bc 100755 --- a/grafana/provision.sh +++ b/grafana/provision.sh @@ -27,15 +27,18 @@ case "$CLIENT" in # lighthouse_summary __url='https://raw.githubusercontent.com/sigp/lighthouse-metrics/master/dashboards/Summary.json' __file='/etc/grafana/provisioning/dashboards/lighthouse_summary.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Summary"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Summary"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" # lighthouse_validator_client __url='https://raw.githubusercontent.com/sigp/lighthouse-metrics/master/dashboards/ValidatorClient.json' __file='/etc/grafana/provisioning/dashboards/lighthouse_validator_client.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Client"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Client"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" # lighthouse_validator_monitor __url='https://raw.githubusercontent.com/sigp/lighthouse-metrics/master/dashboards/ValidatorMonitor.json' __file='/etc/grafana/provisioning/dashboards/lighthouse_validator_monitor.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Monitor"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lighthouse Validator Monitor"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& *teku* ) # teku_overview @@ -49,13 +52,15 @@ case "$CLIENT" in # nimbus_dashboard __url='https://raw.githubusercontent.com/status-im/nimbus-eth2/master/grafana/beacon_nodes_Grafana_dashboard.json' __file='/etc/grafana/provisioning/dashboards/nimbus_dashboard.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Nimbus Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS-PROXY}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Nimbus Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS-PROXY}" then "Prometheus" else . end)' >"${__file}" ;;& *lodestar* ) # lodestar summary __url='https://raw.githubusercontent.com/ChainSafe/lodestar/stable/dashboards/lodestar_summary.json' __file='/etc/grafana/provisioning/dashboards/lodestar_summary.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lodestar Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' \ + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Lodestar Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' \ | jq '.templating.list[3].query |= "consensus" | .templating.list[4].query |= "validator"' \ | jq 'walk(if . == "prometheus_local" then "Prometheus" else . end)' >"${__file}" ;;& @@ -63,7 +68,8 @@ case "$CLIENT" in # geth_dashboard __url='https://gist.githubusercontent.com/karalabe/e7ca79abdec54755ceae09c08bd090cd/raw/3a400ab90f9402f2233280afd086cb9d6aac2111/dashboard.json' __file='/etc/grafana/provisioning/dashboards/geth_dashboard.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Geth Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Geth Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& *erigon* ) # erigon_dashboard @@ -77,7 +83,8 @@ case "$CLIENT" in __revision=$(wget -t 3 -T 10 -qO - https://grafana.com/api/dashboards/${__id} | jq .revision) __url="https://grafana.com/api/dashboards/${__id}/revisions/${__revision}/download" __file='/etc/grafana/provisioning/dashboards/besu_dashboard.json' - wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Besu Dashboard"' | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "Besu Dashboard"' \ + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& *reth* ) # reth_dashboard @@ -106,16 +113,14 @@ case "$CLIENT" in ;;& *ssv.yml* ) # SSV Operator Dashboard - __url='https://raw.githubusercontent.com/bloxapp/ssv/main/monitoring/grafana/dashboard_ssv_operator_performance.json' + __url='https://raw.githubusercontent.com/ssvlabs/ssv/main/monitoring/grafana/dashboard_ssv_operator_performance.json' __file='/etc/grafana/provisioning/dashboards/ssv_operator_dashboard.json' wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "SSV Operator Performance Dashboard"' \ - | jq '.templating.list[0].current |= {selected: false, text: "ssv-node", value: "ssv-node"} | .templating.list[0].options = [ { "selected": true, "text": "ssv-node", "value": "ssv-node" } ] | .templating.list[0].query = "ssv-node"' \ - | sed 's/eXfXfqH7z/Prometheus/g' >"${__file}" - __url='https://raw.githubusercontent.com/bloxapp/ssv/main/monitoring/grafana/dashboard_ssv_node.json' + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" + __url='https://raw.githubusercontent.com/ssvlabs/ssv/main/monitoring/grafana/dashboard_ssv_node.json' __file='/etc/grafana/provisioning/dashboards/ssv_node_dashboard.json' wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "SSV Node Dashboard"' \ - | jq '.templating.list[0].current |= {selected: false, text: "ssv-node", value: "ssv-node"} | .templating.list[0].options = [ { "selected": true, "text": "ssv-node", "value": "ssv-node" } ] | .templating.list[0].query = "ssv-node"' \ - | sed 's/eXfXfqH7z/Prometheus/g' >"${__file}" + | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& !(*grafana-rootless*) ) # cadvisor and node exporter dashboard From 197d42b99864fa61a4e3aff03723510a0352b117 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 31 Aug 2024 10:51:00 -0400 Subject: [PATCH 15/48] MEV source target defaults to stable (#1916) `develop` was used pre-Dencun, and `stable` is a better default --- default.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/default.env b/default.env index 5df787ca..db0fe546 100644 --- a/default.env +++ b/default.env @@ -176,7 +176,7 @@ SSV_DKG_TAG=latest # MEV-Boost # SRC build target can be a tag, a branch, or a pr as "pr-ID" -MEV_SRC_BUILD_TARGET=develop +MEV_SRC_BUILD_TARGET=stable MEV_SRC_REPO=https://github.com/flashbots/mev-boost MEV_DOCKER_TAG=latest MEV_DOCKER_REPO=flashbots/mev-boost From 04a95c0c246fba9a086f2e9a4a5922806da677b9 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 31 Aug 2024 15:36:57 -0400 Subject: [PATCH 16/48] Prysm persist w3s keys loaded via keymanager (#1917) --- README.md | 2 +- prysm/docker-entrypoint-vc.sh | 8 +++++++- vc-utils/keymanager.sh | 12 ++---------- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index b28ba0a4..525906f7 100644 --- a/README.md +++ b/README.md @@ -34,4 +34,4 @@ Eth Docker uses a "semver-ish" scheme. large. - Second through fourth digit, [semver](https://semver.org/). -This is Eth Docker v2.12.1.0 +This is Eth Docker v2.12.2.0 diff --git a/prysm/docker-entrypoint-vc.sh b/prysm/docker-entrypoint-vc.sh index 5706c6a5..d3525e98 100755 --- a/prysm/docker-entrypoint-vc.sh +++ b/prysm/docker-entrypoint-vc.sh @@ -47,7 +47,13 @@ fi # Web3signer URL if [ "${WEB3SIGNER}" = "true" ]; then - __w3s_url="--validators-external-signer-url http://web3signer:9000 --validators-external-signer-public-keys http://web3signer:9000/api/v1/eth2/publicKeys" + __w3s_url="--validators-external-signer-url http://web3signer:9000 \ + --validators-external-signer-public-keys http://web3signer:9000/api/v1/eth2/publicKeys \ + --validators-external-signer-key-file=/var/lib/prysm/w3s-keys.txt" + + if [ ! -f /var/lib/prysm/w3s-keys.txt ]; then + touch /var/lib/prysm/w3s-keys.txt + fi else __w3s_url="" fi diff --git a/vc-utils/keymanager.sh b/vc-utils/keymanager.sh index d8a62482..bc903a75 100755 --- a/vc-utils/keymanager.sh +++ b/vc-utils/keymanager.sh @@ -886,11 +886,7 @@ and secrets directories into .eth/validator_keys instead." __api_port=${__vc_api_port} __api_tls=${__vc_api_tls} - if [ -z "${PRYSM:+x}" ]; then - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt - else - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value}]' <<< '{}' >/tmp/apidata.txt - fi + jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt get-token __api_data=@/tmp/apidata.txt @@ -991,11 +987,7 @@ validator-register() { __w3s_pubkeys="$(echo "$__result" | jq -r '.data[].validating_pubkey')" while IFS= read -r __pubkey; do - if [ -z "${PRYSM:+x}" ]; then - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt - else - jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value}]' <<< '{}' >/tmp/apidata.txt - fi + jq --arg pubkey_value "$__pubkey" --arg url_value "http://web3signer:9000" '. | .remote_keys += [{"pubkey": $pubkey_value, "url": $url_value}]' <<< '{}' >/tmp/apidata.txt __api_data=@/tmp/apidata.txt __api_path=eth/v1/remotekeys From cb17902feb18dbbf29b95de40459784b5350f538 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sun, 1 Sep 2024 07:00:29 -0400 Subject: [PATCH 17/48] Refactor ethd to __ var names (#1921) --- ethd | 1380 +++++++++++++++++++++++++++++----------------------------- 1 file changed, 690 insertions(+), 690 deletions(-) diff --git a/ethd b/ethd index f68103f8..94435286 100755 --- a/ethd +++ b/ethd @@ -9,19 +9,19 @@ __compose_exe="docker compose" __compose_upgraded=0 -dodocker() { +__dodocker() { $__docker_sudo $__docker_exe "$@" } -docompose() { +__docompose() { # I want word splitting here # shellcheck disable=SC2086 $__docker_sudo $__compose_exe "$@" } -determine_distro() { +__determine_distro() { # Determine OS platform __uname=$(uname | tr "[:upper:]" "[:lower:]") # If Linux, try to determine specific distribution @@ -70,12 +70,12 @@ determine_distro() { } -handle_docker_sudo() { +__handle_docker_sudo() { set +e if [[ "$__distro" =~ "debian" || "$__distro" = "ubuntu" ]]; then systemctl status docker >/dev/null - result=$? - if [ ! "${result}" -eq 0 ]; then + __result=$? + if [ ! "${__result}" -eq 0 ]; then echo "The Docker daemon is not running. Please check Docker installation." echo "\"sudo systemctl status docker\" and \"sudo journalctl -fu docker\" will be helpful." echo "Aborting." @@ -104,7 +104,7 @@ handle_docker_sudo() { } -handle_root() { +__handle_root() { __cannot_sudo=0 if [ "${EUID}" -eq 0 ]; then __as_owner="sudo -u ${OWNER}" @@ -121,7 +121,7 @@ handle_root() { } -upgrade_compose() { +__upgrade_compose() { if ! type -P docker-compose >/dev/null 2>&1; then echo "Docker Compose has already been updated to V2" return @@ -177,7 +177,7 @@ upgrade_compose() { } -check_compose_version() { +__check_compose_version() { # Check for Compose V2 (docker compose) vs Compose V1 (docker-compose) if docker compose version >/dev/null 2>&1; then __compose_version=$($__docker_sudo docker compose version | sed -n -E -e "s/.*version [v]?([0-9.-]*).*/\1/ip") @@ -199,10 +199,10 @@ check_compose_version() { echo echo "It is recommended that you replace Compose V1 with Compose V2." while true; do - read -rp "Do you want to update Docker Compose to V2? (yes/no) " yn - case $yn in + read -rp "Do you want to update Docker Compose to V2? (yes/no) " __yn + case $__yn in [Nn]* ) echo "Please be sure to update Docker Compose yourself!"; break;; - * ) upgrade_compose; break;; + * ) __upgrade_compose; break;; esac done fi @@ -210,7 +210,7 @@ check_compose_version() { } -prep_conffiles() { +__prep_conffiles() { # Create custom-prom.yml if it doesn't exist if [ ! -f "./prometheus/custom-prom.yml" ]; then ${__as_owner} touch "./prometheus/custom-prom.yml" @@ -247,7 +247,7 @@ prep_conffiles() { } -check_for_snap() { +__check_for_snap() { if [[ "$__distro" = "ubuntu" && -n "$(command -v snap)" ]] && snap list 2>/dev/null | grep -qw 'docker'; then echo echo "WARNING! Snap Docker package detected. This WILL result in issues." @@ -267,7 +267,7 @@ check_for_snap() { } -install-bash-completions() { +__install_bash_completions() { if [[ "$OSTYPE" == "darwin"* ]]; then echo "Skipping installation of tab completions (not supported on macOS)" else @@ -297,8 +297,8 @@ install() { exit 1 fi read -rp "This will attempt to install Docker and make your user part of the docker group. Do you wish to \ -continue? (no/yes) " yn - case $yn in +continue? (no/yes) " __yn + case $__yn in [Yy]* ) ;; * ) echo "Aborting, no changes made"; return 0;; esac @@ -336,8 +336,8 @@ continue? (no/yes) " yn exit 1 fi read -rp "This will attempt to install Docker and make your user part of the docker group. Do you wish to \ -continue? (no/yes) " yn - case $yn in +continue? (no/yes) " __yn + case $__yn in [Yy]* ) ;; * ) echo "Aborting, no changes made"; return 0;; esac @@ -372,11 +372,11 @@ continue? (no/yes) " yn fi # We only get here on Ubuntu or Debian - install-bash-completions + __install_bash_completions __install_base=$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")") if [ "${__install_base}" = "eth-docker" ]; then - read -rp "Do you want to be able to call 'ethd' from anywhere? (yes/no) " yn - case $yn in + read -rp "Do you want to be able to call 'ethd' from anywhere? (yes/no) " __yn + case $__yn in [Nn]* ) return 0;; * ) ;; esac @@ -398,17 +398,17 @@ continue? (no/yes) " yn __get_docker_free_space() { # set __free_space to what's available to Docker if [[ "$OSTYPE" == "darwin"* ]]; then # macOS doesn't expose docker root dir to the OS - __free_space=$(dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy | awk '/[0-9]%/{print $(NF-2)}') + __free_space=$(__dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy | awk '/[0-9]%/{print $(NF-2)}') else - __docker_dir=$(dodocker system info --format '{{.DockerRootDir}}') + __docker_dir=$(__dodocker system info --format '{{.DockerRootDir}}') __free_space=$(df -P "${__docker_dir}" | awk '/[0-9]%/{print $(NF-2)}') fi - re='^[0-9]+$' - if ! [[ "${__free_space}" =~ $re ]] ; then + __regex='^[0-9]+$' + if ! [[ "${__free_space}" =~ $__regex ]] ; then echo "Unable to determine free disk space. This is likely a bug." if [[ "$OSTYPE" == "darwin"* ]]; then - echo "df reports $(dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy) and __free_space is ${__free_space}" + echo "df reports $(__dodocker run --rm -v macos-space-check:/dummy busybox df -P /dummy) and __free_space is ${__free_space}" else echo "df reports $(df -P "${__docker_dir}") and __free_space is ${__free_space}" fi @@ -420,7 +420,7 @@ __get_docker_free_space() { # set __free_space to what's available to Docker __display_docker_dir() { if [[ "$OSTYPE" == "darwin"* ]]; then # macOS doesn't expose docker root dir to the OS echo "Here's total and used space on Docker's virtual volume" - dodocker run --rm -v macos-space-check:/dummy busybox df -h /dummy + __dodocker run --rm -v macos-space-check:/dummy busybox df -h /dummy else echo "Here's total and used space on ${__docker_dir}" df -h "${__docker_dir}" @@ -430,12 +430,12 @@ __display_docker_dir() { __display_docker_volumes() { echo - if [ -z "$(dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then + if [ -z "$(__dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then echo "There are no Docker volumes for this copy of ${__project_name}" echo else echo "Here are the Docker volumes used by this copy of ${__project_name} and their space usage:" - dodocker system df -v | grep -A 50 "VOLUME NAME" | grep "^$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")" + __dodocker system df -v | grep -A 50 "VOLUME NAME" | grep "^$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")" echo echo "If your Consensus Layer client takes more than 300 GiB, you can resync it with" echo "\"${__me} resync-consensus\"." @@ -461,15 +461,15 @@ space() { # Warn user if space is low, so they can prune -check_disk_space() { +__check_disk_space() { __get_docker_free_space - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - var="AUTOPRUNE_NM" - auto_prune=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - var="NETWORK" - NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="AUTOPRUNE_NM" + __auto_prune=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="NETWORK" + NETWORK=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ "${NETWORK}" = "mainnet" ] || [ "${NETWORK}" = "gnosis" ]; then __min_free=314572800 @@ -483,10 +483,10 @@ check_disk_space() { # Literal match intended # shellcheck disable=SC2076 - if [[ "${value}" =~ "nethermind.yml" ]] && [[ "${__free_space}" -lt "${__min_free}" ]]; then + if [[ "${__value}" =~ "nethermind.yml" ]] && [[ "${__free_space}" -lt "${__min_free}" ]]; then echo echo "You are running Nethermind and have less than ${__min_gib} GiB of free disk space." - if [ "${auto_prune}" = true ]; then + if [ "${__auto_prune}" = true ]; then echo "It should currently be auto-pruning, check logs with \"$__me logs -f --tail 500 execution | grep \ Full\". Free space:" else @@ -495,14 +495,14 @@ Full\". Free space:" echo __display_docker_dir __display_docker_volumes - elif [[ "${value}" =~ "geth.yml" ]] && [[ "${__free_space}" -lt 104857600 ]]; then + elif [[ "${__value}" =~ "geth.yml" ]] && [[ "${__free_space}" -lt 104857600 ]]; then echo echo "You are running Geth and have less than 100 GiB of free disk space." echo "You may resync from scratch to use PBSS and slow on-disk DB growth, with \"$__me resync-execution\"." echo __display_docker_dir __display_docker_volumes - elif [[ "${value}" =~ "besu.yml" ]] && [[ "${__free_space}" -lt 52428800 ]]; then + elif [[ "${__value}" =~ "besu.yml" ]] && [[ "${__free_space}" -lt 52428800 ]]; then echo echo "You are running Besu and have less than 50 GiB of free disk space." echo @@ -522,158 +522,158 @@ Full\". Free space:" } -source_build() { +__source_build() { # Check whether there's a source-built client and if so, force it with --no-cache - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) - case "${value}" in + case "${__value}" in *deposit-cli.yml* ) - docompose --profile tools build --pull --no-cache deposit-cli-new + __docompose --profile tools build --pull --no-cache deposit-cli-new ;; esac - case "${value}" in + case "${__value}" in *mev-boost.yml* ) - var="MEV_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache mev-boost + __var="MEV_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache mev-boost fi ;; esac - case "${value}" in + case "${__value}" in *reth.yml* ) - var="RETH_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="RETH_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *geth.yml* ) - var="GETH_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="GETH_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *besu.yml* ) - var="BESU_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="BESU_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *nethermind.yml* ) - var="NM_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="NM_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *erigon.yml* ) - var="ERIGON_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="ERIGON_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; *nimbus-el.yml* ) - var="NIMEL_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache execution + __var="NIMEL_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache execution fi ;; esac - case "${value}" in + case "${__value}" in *lighthouse.yml* | *lighthouse-cl-only.yml* ) - var="LH_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="LH_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *teku.yml* | *teku-allin1.yml* | *teku-cl-only.yml* ) - var="TEKU_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="TEKU_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *lodestar.yml* | *lodestar-cl-only.yml* ) - var="LS_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="LS_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *nimbus.yml* | *nimbus-allin1.yml* | *nimbus-cl-only.yml* ) - var="NIM_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="NIM_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *prysm.yml* | *prysm-cl-only.yml* ) - var="PRYSM_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="PRYSM_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; *grandine.yml* | *grandine-allin1.yml* | *grandine-cl-only.yml* ) - var="GRANDINE_DOCKERFILE" - build=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ "${build}" = "Dockerfile.source" ]; then - docompose build --pull --no-cache consensus + __var="GRANDINE_DOCKERFILE" + __build=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${__build}" = "Dockerfile.source" ]; then + __docompose build --pull --no-cache consensus fi ;; esac } -migrate_compose_file() { -# When this gets called $var is COMPOSE_FILE and $value is what is set in .env for it +__migrate_compose_file() { +# When this gets called $__var is COMPOSE_FILE and $__value is what is set in .env for it # Some files have been renamed and others removed altogether - FROM_YML=( ) - TO_YML=( ) + __from_yml=( ) + __to_yml=( ) IFS=":" set -o noglob # Globbing is off # shellcheck disable=SC2206 - __ymlarray=($value) # split+glob with glob disabled, and split using : as delimiter + __ymlarray=($__value) # split+glob with glob disabled, and split using : as delimiter set +o noglob # Unset restores default unset IFS - value="" - for n in "${!__ymlarray[@]}"; do - __ymlfile="${__ymlarray[n]}" - for index in "${!FROM_YML[@]}"; do - if [ "${FROM_YML[index]}" = "${__ymlfile}" ]; then - __ymlfile=${TO_YML[index]} + __value="" + for __n in "${!__ymlarray[@]}"; do + __ymlfile="${__ymlarray[__n]}" + for __index in "${!__from_yml[@]}"; do + if [ "${__from_yml[__index]}" = "${__ymlfile}" ]; then + __ymlfile=${__to_yml[__index]} break fi done if [ -n "${__ymlfile}" ]; then - if [ -z "${value}" ]; then - value="${__ymlfile}" + if [ -z "${__value}" ]; then + __value="${__ymlfile}" else - value="${value}:${__ymlfile}" + __value="${__value}:${__ymlfile}" fi fi done } -ssv_switch() { +__ssv_switch() { echo "Detected legacy SSV Node. Migrating config to new testnet." echo echo "Stopping SSV Node container" - __node=$(dodocker ps --format '{{.Names}}' | grep 'ssv2-node') - dodocker stop "${__node}" && dodocker rm -f "${__node}" - dodocker volume rm "$(dodocker volume ls -q | grep "$(basename "$(realpath .)")"_ssv2-data)" + __node=$(__dodocker ps --format '{{.Names}}' | grep 'ssv2-node') + __dodocker stop "${__node}" && __dodocker rm -f "${__node}" + __dodocker volume rm "$(__dodocker volume ls -q | grep "$(basename "$(realpath .)")"_ssv2-data)" echo echo "SSV Node stopped and database deleted." echo @@ -682,9 +682,9 @@ ssv_switch() { rm blox-ssv-config.yaml echo "Backup copy blox-ssv-config.yaml.bak created" echo "Making changes to ssv-config/config.yaml" - var="NETWORK" - NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - sed -i'.original' 's/blox-ssv2.yml/ssv.yml/' "${ENV_FILE}".source + __var="NETWORK" + NETWORK=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + sed -i'.original' 's/blox-ssv2.yml/ssv.yml/' "${__env_file}".source if ! grep -q "LogFilePath:" ssv-config/config.yaml; then # macOS-isms: Newline for sed add sed -i'.original' '/global:/a\ @@ -720,31 +720,31 @@ MetricsAPIPort: 15000 } -delete_reth() { +__delete_reth() { # Check for Reth - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "reth.yml" ]]; then + if [[ ! "${__value}" =~ "reth.yml" ]]; then return 0 fi # Check Reth version, only continue if not on alpha - var="RETH_DOCKER_TAG" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="RETH_DOCKER_TAG" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ "${value}" =~ "alpha" ]]; then + if [[ "${__value}" =~ "alpha" ]]; then return 0 fi - if [ -z "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" ]; then # No Reth volume + if [ -z "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" ]; then # No Reth volume return 0 fi # Check Reth db version - __db_version="$(dodocker run --rm -v "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ + __db_version="$(__dodocker run --rm -v "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ alpine:3 cat /var/lib/reth/db/database.version)" if [ "${__db_version}" -ne "1" ]; then return 0 @@ -754,8 +754,8 @@ delete_reth() { echo if [ "${__non_interactive:-0}" -eq 0 ]; then while true; do - read -rp "WARNING - About to delete the Reth database. Do you wish to continue? (Y/n) " yn - case $yn in + read -rp "WARNING - About to delete the Reth database. Do you wish to continue? (Y/n) " __yn + case $__yn in [Nn]o | [Nn] ) echo "No changes made"; return 0;; * ) break;; esac @@ -763,40 +763,40 @@ delete_reth() { fi echo "Stopping Reth container" - docompose stop execution && docompose rm -f execution - dodocker volume rm "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" + __docompose stop execution && __docompose rm -f execution + __dodocker volume rm "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")" echo echo "Reth stopped and database deleted." echo } -delete_erigon() { +__delete_erigon() { # Check for Erigon - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "erigon.yml" ]]; then + if [[ ! "${__value}" =~ "erigon.yml" ]]; then return 0 fi # Check Erigon version, only continue if v3 - var="ERIGON_DOCKER_TAG" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="ERIGON_DOCKER_TAG" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 -# if [[ ! ("${value}" =~ "v3" || "${value}" = "latest" || "${value}" = "stable") ]]; then # No stable yet - if [[ ! ("${value}" =~ "v3" || "${value}" = "latest") ]]; then +# if [[ ! ("${__value}" =~ "v3" || "${__value}" = "latest" || "${__value}" = "stable") ]]; then # No stable yet + if [[ ! ("${__value}" =~ "v3" || "${__value}" = "latest") ]]; then return 0 fi - if [ -z "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" ]; then # No Erigon volume + if [ -z "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" ]; then # No Erigon volume return 0 fi # Detect Erigon v3 by directory caplin/latest - __erigon_v3=$(dodocker run --rm -v "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")":"/var/lib/erigon" \ + __erigon_v3=$(__dodocker run --rm -v "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")":"/var/lib/erigon" \ alpine:3 sh -c 'if [ -d "/var/lib/erigon/caplin/latest" ]; then echo true; else echo false; fi') if [ "$__erigon_v3" = "true" ]; then return 0 @@ -805,41 +805,41 @@ delete_erigon() { echo "Detected Erigon. For Erigon v3, it will need to be re-synced from scratch." echo while true; do - read -rp "WARNING - About to delete the Erigon database. Do you wish to continue? (Y/n) " yn - case $yn in + read -rp "WARNING - About to delete the Erigon database. Do you wish to continue? (Y/n) " __yn + case $__yn in [Nn]o | [Nn] ) echo "Aborting, no changes made"; exit 130;; * ) break;; esac done echo "Stopping Erigon container" - docompose stop execution && docompose rm -f execution - dodocker volume rm "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" + __docompose stop execution && __docompose rm -f execution + __dodocker volume rm "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]erigon-el-data")" echo echo "Erigon stopped and database deleted." echo } -upgrade_postgres() { +__upgrade_postgres() { # Check for web3signer - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "web3signer.yml" ]]; then + if [[ ! "${__value}" =~ "web3signer.yml" ]]; then return 0 fi __source_vol="$(basename "$(pwd)")_web3signer-slashing-data" - if [ -z "$(dodocker volume ls -q -f "name=${__source_vol}")" ]; then + if [ -z "$(__dodocker volume ls -q -f "name=${__source_vol}")" ]; then return 0 fi __target_pg=16 __during_postgres=1 - __source_pg="$(dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __source_pg="$(__dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ alpine:3 cat /var/lib/postgresql/data/PG_VERSION)" if [ "${__source_pg}" -lt "${__target_pg}" ]; then @@ -847,8 +847,8 @@ upgrade_postgres() { echo if [ "${__non_interactive:-0}" -eq 0 ]; then while true; do - read -rp "Would you like to migrate to PostgreSQL ${__target_pg}? (Y/n) " yn - case $yn in + read -rp "Would you like to migrate to PostgreSQL ${__target_pg}? (Y/n) " __yn + case $__yn in [Nn]o | [Nn] ) echo "Keeping PostgreSQL at version ${__source_pg}"; return 0;; * ) break;; esac @@ -858,11 +858,11 @@ upgrade_postgres() { return 0 fi - __source_size="$(dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __source_size="$(__dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ alpine:3 du -s /var/lib/postgresql/data/ | awk '{print $1}')" - re='^[0-9]+$' - if ! [[ "${__source_size}" =~ $re ]] ; then + __regex='^[0-9]+$' + if ! [[ "${__source_size}" =~ $__regex ]] ; then echo "Unable to determine database size. This is likely a bug." echo "__source_size is ${__source_size}" return 70 @@ -884,9 +884,9 @@ upgrade_postgres() { __backup_vol="$(basename "$(pwd)")_web3signer-slashing-data-pg${__source_pg}-backup" echo "Stopping Web3signer" - docompose stop web3signer && docompose rm -f web3signer + __docompose stop web3signer && __docompose rm -f web3signer echo "Stopping PostgreSQL" - docompose stop postgres && docompose rm -f postgres + __docompose stop postgres && __docompose rm -f postgres echo echo "Migrating database from PostgreSQL ${__source_pg} to PostgreSQL ${__target_pg}" @@ -894,48 +894,48 @@ upgrade_postgres() { echo "In failure case, do not start Web3signer again, instead seek help on Ethstaker Discord." echo - dodocker pull "pats22/postgres-upgrade:${__source_pg}-to-${__target_pg}" - dodocker volume create "${__migrated_vol}" - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/${__source_pg}/data" \ + __dodocker pull "pats22/postgres-upgrade:${__source_pg}-to-${__target_pg}" + __dodocker volume create "${__migrated_vol}" + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/${__source_pg}/data" \ -v "${__migrated_vol}":"/var/lib/postgresql/${__target_pg}/data" \ "pats22/postgres-upgrade:${__source_pg}-to-${__target_pg}" # Adjust ownership. We use 70; postgres-upgrade creates it with 999 - dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ + __dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ alpine:3 chown -R 70:70 /var/lib/postgres # Conversion can leave us with a pg_hba.conf that does not allow connections - dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ + __dodocker run --rm -v "${__migrated_vol}":"/var/lib/postgres" \ alpine:3 sh -c 'grep -qxE "host\s+all\s+all\s+all\s+scram-sha-256" /var/lib/postgres/pg_hba.conf \ || echo "host all all all scram-sha-256" \ >> /var/lib/postgres/pg_hba.conf' echo echo "Migration complete, copying data in web3signer-slashing-data volume to backup" - dodocker volume create "${__backup_vol}" - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __dodocker volume create "${__backup_vol}" + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ -v "${__backup_vol}":"/var/lib/postgresql/${__source_pg}/data" \ alpine:3 cp -a /var/lib/postgresql/data/. "/var/lib/postgresql/${__source_pg}/data/" __during_migrate=1 echo "Moving migrated data to web3signer-slashing-data volume" - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ alpine:3 rm -rf /var/lib/postgresql/data/* - dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ + __dodocker run --rm -v "${__source_vol}":"/var/lib/postgresql/data" \ -v "${__migrated_vol}":"/var/lib/postgresql/${__target_pg}/data" \ alpine:3 cp -a "/var/lib/postgresql/${__target_pg}/data/." /var/lib/postgresql/data/ __migrated=1 - dodocker volume remove "${__migrated_vol}" + __dodocker volume remove "${__migrated_vol}" echo echo "Adjusting PostgreSQL Docker tag" - if [ ! -f "${ENV_FILE}.source" ]; then # update() didn't migrate env, let's make sure .env.source exists - cp "${ENV_FILE}" "${ENV_FILE}.source" + if [ ! -f "${__env_file}.source" ]; then # update() didn't migrate env, let's make sure .env.source exists + cp "${__env_file}" "${__env_file}.source" fi - var="PG_DOCKER_TAG" + __var="PG_DOCKER_TAG" # This gets used, but shellcheck doesn't recognize that # shellcheck disable=SC2034 PG_DOCKER_TAG=${__target_pg}-bookworm # To bookworm to avoid collation errors - also a faster PostgreSQL - set_value_in_env + __set_value_in_env echo "Web3signer has been stopped. You'll need to run \"$__me up\" to start it again." echo echo "A copy of your old slashing protection database is in the Docker volume ${__backup_vol}." @@ -945,12 +945,12 @@ upgrade_postgres() { __lookup_cf_zone() { # Migrates traefik-cf setup to use Zone ID - __compose_ymls=$(sed -n -e "s/^COMPOSE_FILE=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - __dns_token=$(sed -n -e "s/^CF_DNS_API_TOKEN=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - __zone_token=$(sed -n -e "s/^CF_ZONE_API_TOKEN=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - __domain=$(sed -n -e "s/^DOMAIN=\(.*\)/\1/p" "${ENV_FILE}.source" || true) + __compose_ymls=$(sed -n -e "s/^COMPOSE_FILE=\(.*\)/\1/p" "${__env_file}.source" || true) + __dns_token=$(sed -n -e "s/^CF_DNS_API_TOKEN=\(.*\)/\1/p" "${__env_file}.source" || true) + __zone_token=$(sed -n -e "s/^CF_ZONE_API_TOKEN=\(.*\)/\1/p" "${__env_file}.source" || true) + __domain=$(sed -n -e "s/^DOMAIN=\(.*\)/\1/p" "${__env_file}.source" || true) if [[ ! $__compose_ymls =~ traefik-cf.yml ]]; then - value="" + __value="" return elif [[ -n $__dns_token ]]; then if [[ -n $__zone_token ]]; then @@ -959,37 +959,37 @@ __lookup_cf_zone() { # Migrates traefik-cf setup to use Zone ID __token=$__dns_token fi set +e - value=$(docompose run --rm curl-jq sh -c \ + __value=$(__docompose run --rm curl-jq sh -c \ "curl -s \"https://api.cloudflare.com/client/v4/zones?name=${__domain}\" -H \"Authorization: Bearer ${__token}\" \ -H \"Content-Type: application/json\" | jq -r '.result[0].id'" | tail -n 1) __code=$? if [[ "$__code" -ne 0 ]]; then - value="" + __value="" return fi - __success=$(docompose run --rm curl-jq sh -c \ + __success=$(__docompose run --rm curl-jq sh -c \ "curl -s \"https://api.cloudflare.com/client/v4/zones?name=${__domain}\" -H \"Authorization: Bearer ${__token}\" \ -H \"Content-Type: application/json\" | jq -r '.success'" | tail -n 1) set -e if [ "${__success}" = "true" ]; then return else - value="" + __value="" return fi else - value="" + __value="" return fi } -envmigrate() { - if [ ! -f "${ENV_FILE}" ]; then +__env_migrate() { + if [ ! -f "${__env_file}" ]; then return 0 fi - ALL_VARS=( COMPOSE_FILE FEE_RECIPIENT EL_NODE GRAFFITI DEFAULT_GRAFFITI NETWORK MEV_BOOST MEV_RELAYS MEV_MIN_BID \ + __all_vars=( COMPOSE_FILE FEE_RECIPIENT EL_NODE GRAFFITI DEFAULT_GRAFFITI NETWORK MEV_BOOST MEV_RELAYS MEV_MIN_BID \ MEV_NODE CL_MAX_PEER_COUNT CL_MIN_PEER_COUNT EL_MAX_PEER_COUNT EL_MIN_PEER_COUNT DOMAIN ACME_EMAIL ANCIENT_DIR \ AUTOPRUNE_NM LOGS_LABEL CF_DNS_API_TOKEN CF_ZONE_API_TOKEN CF_ZONE_ID AWS_PROFILE AWS_HOSTED_ZONE_ID \ GRAFANA_HOST SIREN_HOST DISTRIBUTED BESU_HEAP TEKU_HEAP PROM_HOST HOST_IP SHARE_IP PRYSM_HOST EE_HOST \ @@ -999,7 +999,7 @@ envmigrate() { TRAEFIK_WEB_HTTP_PORT CL_REST_PORT EL_RPC_PORT EL_WS_PORT EE_PORT ERIGON_TORRENT_PORT LOG_LEVEL JWT_SECRET \ EL_EXTRAS CL_EXTRAS VC_EXTRAS ARCHIVE_NODE SSV_P2P_PORT SSV_P2P_PORT_UDP ERIGON_P2P_PORT_2 \ ERIGON_P2P_PORT_3 LODESTAR_HEAP SSV_DKG_PORT SIREN_PASSWORD ) - TARGET_VARS=( ETH_DOCKER_TAG NIM_SRC_BUILD_TARGET NIM_SRC_REPO NIM_DOCKER_TAG NIM_DOCKER_VC_TAG NIM_DOCKER_REPO \ + __target_vars=( ETH_DOCKER_TAG NIM_SRC_BUILD_TARGET NIM_SRC_REPO NIM_DOCKER_TAG NIM_DOCKER_VC_TAG NIM_DOCKER_REPO \ NIM_DOCKER_VC_REPO NIM_DOCKERFILE TEKU_SRC_BUILD_TARGET TEKU_SRC_REPO TEKU_DOCKER_TAG TEKU_DOCKER_REPO \ TEKU_DOCKERFILE LH_SRC_BUILD_TARGET LH_SRC_REPO LH_DOCKER_TAG LH_DOCKER_REPO LH_DOCKERFILE \ PRYSM_SRC_BUILD_TARGET PRYSM_SRC_REPO PRYSM_DOCKER_TAG PRYSM_DOCKER_VC_TAG PRYSM_DOCKER_CTL_TAG \ @@ -1015,23 +1015,23 @@ envmigrate() { GRANDINE_SRC_BUILD_TARGET GRANDINE_SRC_REPO GRANDINE_DOCKER_TAG GRANDINE_DOCKER_REPO GRANDINE_DOCKERFILE \ SIREN_DOCKER_TAG SIREN_DOCKER_REPO SSV_DKG_TAG NODE_EXPORTER_IGNORE_MOUNT_REGEX ) - OLD_VARS=( ) - NEW_VARS=( ) + __old_vars=( ) + __new_vars=( ) # Always make sure we have a SIREN password - var="SIREN_PASSWORD" - SIREN_PASSWORD=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="SIREN_PASSWORD" + SIREN_PASSWORD=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ -z "${SIREN_PASSWORD}" ]; then SIREN_PASSWORD=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) - set_value_in_env + __set_value_in_env fi - var=ENV_VERSION - __target_ver=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "default.env" || true) - __source_ver=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var=ENV_VERSION + __target_ver=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "default.env" || true) + __source_ver=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Aggressive prune to work around Docker grabbing old clients. Here so it doesn't get called during config if [[ "${__source_ver}" -lt "9" ]]; then - dodocker system prune --force -a + __dodocker system prune --force -a fi if [[ "${__keep_targets}" -eq 1 && "${__target_ver}" -le "${__source_ver}" ]]; then # No changes in template, do nothing @@ -1039,111 +1039,111 @@ envmigrate() { fi if [ "${__keep_targets}" -eq 0 ]; then - echo "Refreshing build targets in ${ENV_FILE}" + echo "Refreshing build targets in ${__env_file}" else - echo "Migrating ${ENV_FILE} to version ${__target_ver}" + echo "Migrating ${__env_file} to version ${__target_ver}" fi - ${__as_owner} cp "${ENV_FILE}" "${ENV_FILE}".source + ${__as_owner} cp "${__env_file}" "${__env_file}".source __during_migrate=1 __migrated=1 - ${__as_owner} cp default.env "${ENV_FILE}" + ${__as_owner} cp default.env "${__env_file}" - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ "${value}" =~ "blox-ssv2.yml" ]]; then - ssv_switch + if [[ "${__value}" =~ "blox-ssv2.yml" ]]; then + __ssv_switch fi # Migrate over user settings - for var in "${ALL_VARS[@]}"; do - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${value}" ] || [ "${var}" = "GRAFFITI" ] || [ "${var}" = "MEV_RELAYS" ] \ - || [ "${var}" = "ETH_DOCKER_TAG" ] || [ "${var}" = "RAPID_SYNC_URL" ]; then - if [ "${var}" = "COMPOSE_FILE" ]; then - migrate_compose_file + for __var in "${__all_vars[@]}"; do + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__value}" ] || [ "${__var}" = "GRAFFITI" ] || [ "${__var}" = "MEV_RELAYS" ] \ + || [ "${__var}" = "ETH_DOCKER_TAG" ] || [ "${__var}" = "RAPID_SYNC_URL" ]; then + if [ "${__var}" = "COMPOSE_FILE" ]; then + __migrate_compose_file fi - if [ "${var}" = "CL_QUIC_PORT" ]; then - __cl_port=$(sed -n -e "s/^CL_P2P_PORT=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${__cl_port}" ] && [ "${__cl_port}" = "${value}" ]; then - value=$((value + 1)) - echo "Adjusted CL_QUIC_PORT to ${value} so it does not conflict with CL_P2P_PORT" + if [ "${__var}" = "CL_QUIC_PORT" ]; then + __cl_port=$(sed -n -e "s/^CL_P2P_PORT=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__cl_port}" ] && [ "${__cl_port}" = "${__value}" ]; then + __value=$((__value + 1)) + echo "Adjusted CL_QUIC_PORT to ${__value} so it does not conflict with CL_P2P_PORT" fi - __prysm_port=$(sed -n -e "s/^PRYSM_UDP_PORT=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${__prysm_port}" ] && [ "${__prysm_port}" = "${value}" ]; then # just in case this is one ahead - value=$((value + 1)) - echo "Adjusted CL_QUIC_PORT to ${value} so it does not conflict with PRYSM_UDP_PORT" + __prysm_port=$(sed -n -e "s/^PRYSM_UDP_PORT=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__prysm_port}" ] && [ "${__prysm_port}" = "${__value}" ]; then # just in case this is one ahead + __value=$((__value + 1)) + echo "Adjusted CL_QUIC_PORT to ${__value} so it does not conflict with PRYSM_UDP_PORT" fi fi # Literal match intended # shellcheck disable=SC2076 - if [[ "${var}" = "RAPID_SYNC_URL" && "${value}" =~ "eth2-beacon-mainnet.infura.io" ]]; then - value="https://beaconstate.info" + if [[ "${__var}" = "RAPID_SYNC_URL" && "${__value}" =~ "eth2-beacon-mainnet.infura.io" ]]; then + __value="https://beaconstate.info" fi - if [[ "${var}" = "HOST_IP" && "${value: -1}" = ":" ]]; then - value="${value%:}" # Undo Compose V1 accommodation + if [[ "${__var}" = "HOST_IP" && "${__value: -1}" = ":" ]]; then + __value="${__value%:}" # Undo Compose V1 accommodation fi - if [[ "${var}" = "SHARE_IP" && "${value: -1}" = ":" ]]; then - value="${value%:}" # Undo Compose V1 accommodation + if [[ "${__var}" = "SHARE_IP" && "${__value: -1}" = ":" ]]; then + __value="${__value%:}" # Undo Compose V1 accommodation fi # Handle & in GRAFFITI gracefully - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*\$~\1${value//&/\\&}~" "${ENV_FILE}" - else # empty value - if [ "${var}" = "CF_ZONE_ID" ]; then + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*\$~\1${__value//&/\\&}~" "${__env_file}" + else # empty __value + if [ "${__var}" = "CF_ZONE_ID" ]; then __lookup_cf_zone - if [ -n "${value}" ]; then - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*\$~\1${value//&/\\&}~" "${ENV_FILE}" + if [ -n "${__value}" ]; then + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*\$~\1${__value//&/\\&}~" "${__env_file}" fi fi fi done if [ "${__keep_targets}" -eq 1 ]; then # Migrate over build targets - for var in "${TARGET_VARS[@]}"; do - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${value}" ]; then - if [[ "${var}" = "DDNS_TAG" && "${__source_ver}" -lt "8" ]]; then # Switch to ddns-updater - value="v2" + for __var in "${__target_vars[@]}"; do + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__value}" ]; then + if [[ "${__var}" = "DDNS_TAG" && "${__source_ver}" -lt "8" ]]; then # Switch to ddns-updater + __value="v2" fi - if [[ "${var}" = "LH_DOCKER_TAG" && "${value}" = "latest-modern" ]]; then # LH 5.2 ditched latest-modern - value="latest" + if [[ "${__var}" = "LH_DOCKER_TAG" && "${__value}" = "latest-modern" ]]; then # LH 5.2 ditched latest-modern + __value="latest" fi - if [[ "${var}" = "ERIGON_DOCKER_TAG" && "${value}" = "stable" ]]; then # Erigon ditched stable - value="v2.60.1" + if [[ "${__var}" = "ERIGON_DOCKER_TAG" && "${__value}" = "stable" ]]; then # Erigon ditched stable + __value="v2.60.6" fi - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*$~\1${value}~" "${ENV_FILE}" + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*$~\1${__value}~" "${__env_file}" fi done fi # Move value from old variable name(s) to new one(s) - for index in "${!OLD_VARS[@]}"; do - var=${OLD_VARS[index]} - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}.source" || true) - if [ -n "${value}" ]; then - sed -i'.original' -e "s~^\(${NEW_VARS[index]}\s*=\s*\).*$~\1${value}~" "${ENV_FILE}" + for __index in "${!__old_vars[@]}"; do + __var=${__old_vars[__index]} + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}.source" || true) + if [ -n "${__value}" ]; then + sed -i'.original' -e "s~^\(${__new_vars[__index]}\s*=\s*\).*$~\1${__value}~" "${__env_file}" fi done # Check whether we run a CL or VC, if so nag about FEE_RECIPIENT - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # It's CL&VC, CL-only, or VC-only # I do mean to match literally # shellcheck disable=SC2076 - if [[ "${value}" =~ "prysm.yml" || "${value}" =~ "lighthouse.yml" || "${value}" =~ "teku.yml" \ - || "${value}" =~ "nimbus.yml" || "${value}" =~ "lodestar.yml" || "${value}" =~ "-cl-only.yml" \ - || "${value}" =~ "-allin1.yml" || "${value}" =~ "-vc-only.yml" ]]; then + if [[ "${__value}" =~ "prysm.yml" || "${__value}" =~ "lighthouse.yml" || "${__value}" =~ "teku.yml" \ + || "${__value}" =~ "nimbus.yml" || "${__value}" =~ "lodestar.yml" || "${__value}" =~ "-cl-only.yml" \ + || "${__value}" =~ "-allin1.yml" || "${__value}" =~ "-vc-only.yml" ]]; then # Check for rewards - var="FEE_RECIPIENT" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ -z "${value}" || ${value} != 0x* || ${#value} -ne 42 ]]; then + __var="FEE_RECIPIENT" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ -z "${__value}" || ${__value} != 0x* || ${#__value} -ne 42 ]]; then if [ "${__non_interactive:-0}" -eq 0 ]; then whiptail --msgbox "A fee recipient ETH wallet address is required in order to start the client. This is \ for priority fees and, optionally, MEV. Please enter a valid ETH address in the next screen. Refer to \ Eth Docker docs (https://ethdocker.com/About/Rewards) for more information.\n\nCAUTION: \"$__me up\" will fail if no \ valid address is set" 12 75 - query_coinbase - set_value_in_env + __query_coinbase + __set_value_in_env else echo "A fee recipient ETH wallet address is required in order to start the client. Please set one in \".env\"." echo "CAUTION: \"$__me up\" will fail if no valid address is set." @@ -1152,48 +1152,48 @@ envmigrate() { fi # User signals it's a distributed setup and not to nag - var="DISTRIBUTED" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" || "${__non_interactive:-0}" -eq 1 ]]; then - ${__as_owner} rm "${ENV_FILE}".original + __var="DISTRIBUTED" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" || "${__non_interactive:-0}" -eq 1 ]]; then + ${__as_owner} rm "${__env_file}".original __during_migrate=0 return 0 fi # Check for CL and EL, nag if we have only one without the other - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Case 1 ... CL, do we have an EL? # I do mean to match literally # shellcheck disable=SC2076 - if [[ "${value}" =~ "prysm.yml" || "${value}" =~ "lighthouse.yml" || "${value}" =~ "teku.yml" \ - || "${value}" =~ "nimbus.yml" || "${value}" =~ "lodestar.yml" || "${value}" =~ "-cl-only.yml" \ - || "${value}" =~ "-allin1.yml" ]]; then - if [[ ! "${value}" =~ "geth.yml" && ! "${value}" =~ "besu.yml" && ! "${value}" =~ "erigon.yml" \ - && ! "${value}" =~ "nethermind.yml" && ! "${value}" =~ "nimbus-el.yml" \ - && ! "${value}" =~ "reth.yml" ]]; then + if [[ "${__value}" =~ "prysm.yml" || "${__value}" =~ "lighthouse.yml" || "${__value}" =~ "teku.yml" \ + || "${__value}" =~ "nimbus.yml" || "${__value}" =~ "lodestar.yml" || "${__value}" =~ "-cl-only.yml" \ + || "${__value}" =~ "-allin1.yml" ]]; then + if [[ ! "${__value}" =~ "geth.yml" && ! "${__value}" =~ "besu.yml" && ! "${__value}" =~ "erigon.yml" \ + && ! "${__value}" =~ "nethermind.yml" && ! "${__value}" =~ "nimbus-el.yml" \ + && ! "${__value}" =~ "reth.yml" ]]; then whiptail --msgbox "An Execution Layer client is required alongside your Consensus Layer client since \ Ethereum Merge.\n\nIf you run a distributed setup, you can shut off this nag screen by setting DISTRIBUTED=true in \ -${ENV_FILE}" 12 75 +${__env_file}" 12 75 fi # Case 2 ... EL, do we have a CL? - elif [[ "${value}" =~ "geth.yml" || "${value}" =~ "besu.yml" || "${value}" =~ "erigon.yml" \ - || "${value}" =~ "nethermind.yml" || "${value}" =~ "nimbus-el.yml" || "${value}" =~ "reth.yml" ]]; then - if [[ ! "${value}" =~ "prysm.yml" && ! "${value}" =~ "lighthouse.yml" && ! "${value}" =~ "teku.yml" \ - && ! "${value}" =~ "nimbus.yml" && ! "${value}" =~ "lodestar.yml" && ! "${value}" =~ "-cl-only.yml" \ - && ! "${value}" =~ "-allin1.yml" ]]; then + elif [[ "${__value}" =~ "geth.yml" || "${__value}" =~ "besu.yml" || "${__value}" =~ "erigon.yml" \ + || "${__value}" =~ "nethermind.yml" || "${__value}" =~ "nimbus-el.yml" || "${__value}" =~ "reth.yml" ]]; then + if [[ ! "${__value}" =~ "prysm.yml" && ! "${__value}" =~ "lighthouse.yml" && ! "${__value}" =~ "teku.yml" \ + && ! "${__value}" =~ "nimbus.yml" && ! "${__value}" =~ "lodestar.yml" && ! "${__value}" =~ "-cl-only.yml" \ + && ! "${__value}" =~ "-allin1.yml" ]]; then whiptail --msgbox "A Consensus Layer client is required alongside your Execution Layer client since \ Ethereum Merge.\n\nIf you run a distributed setup, you can shut off this nag screen by setting DISTRIBUTED=true in \ -${ENV_FILE}" 12 75 +${__env_file}" 12 75 fi fi - ${__as_owner} rm "${ENV_FILE}".original + ${__as_owner} rm "${__env_file}".original __during_migrate=0 - echo "${ENV_FILE} updated successfully" + echo "${__env_file} updated successfully" } -nag_os_version() { +__nag_os_version() { if [[ "$__distro" = "ubuntu" ]]; then if [ "${__os_major_version}" -lt 22 ]; then echo @@ -1218,11 +1218,11 @@ nag_os_version() { } -pull_and_build() { - dodocker system prune --force - docompose --profile tools pull - source_build - docompose --profile tools build --pull +__pull_and_build() { + __dodocker system prune --force + __docompose --profile tools pull + __source_build + __docompose --profile tools build --pull } @@ -1239,8 +1239,8 @@ update() { __free_space=$(df -P "$(pwd)" | awk '/[0-9]%/{print $(NF-2)}') - re='^[0-9]+$' - if ! [[ "${__free_space}" =~ $re ]] ; then + __regex='^[0-9]+$' + if ! [[ "${__free_space}" =~ $__regex ]] ; then echo "Unable to determine free disk space. This is likely a bug." echo "df reports $(df -P "$(pwd)") and __free_space is ${__free_space}" exit 70 @@ -1260,9 +1260,9 @@ update() { if [ -z "${ETHDSECUNDO-}" ]; then set +e ${__as_owner} git config pull.rebase false - var="ETH_DOCKER_TAG" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [ -z "${value}" ] || [ "${value}" = "latest" ]; then + __var="ETH_DOCKER_TAG" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ -z "${__value}" ] || [ "${__value}" = "latest" ]; then export ETHDPINNED="" __branch=$(git rev-parse --abbrev-ref HEAD) if [[ "${__branch}" =~ ^tag-v[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$ ]]; then @@ -1280,9 +1280,9 @@ update() { ${__as_owner} git update-index --assume-unchanged ext-network.yml fi else - export ETHDPINNED="${value}" + export ETHDPINNED="${__value}" ${__as_owner} git fetch --tags - ${__as_owner} git checkout -B "tag-${value}" "tags/${value}" + ${__as_owner} git checkout -B "tag-${__value}" "tags/${__value}" fi export GITEXITCODE=$? set -e @@ -1332,33 +1332,33 @@ update() { __non_interactive=1 fi -# envmigrate used to be called w/ arguments and checks for that +# __env_migrate used to be called w/ arguments and checks for that # shellcheck disable=SC2119 - envmigrate - pull_and_build + __env_migrate + __pull_and_build - delete_erigon - delete_reth - upgrade_postgres + __delete_erigon + __delete_reth + __upgrade_postgres echo - if [ "${__migrated}" -eq 1 ] && ! cmp -s "${ENV_FILE}" "${ENV_FILE}".source; then - ${__as_owner} cp "${ENV_FILE}".source "${ENV_FILE}".bak - ${__as_owner} rm "${ENV_FILE}".source - echo "Your ${ENV_FILE} configuration settings have been migrated to a fresh copy. You can \ -find the original contents in ${ENV_FILE}.bak." + if [ "${__migrated}" -eq 1 ] && ! cmp -s "${__env_file}" "${__env_file}".source; then + ${__as_owner} cp "${__env_file}".source "${__env_file}".bak + ${__as_owner} rm "${__env_file}".source + echo "Your ${__env_file} configuration settings have been migrated to a fresh copy. You can \ +find the original contents in ${__env_file}.bak." if [ "${__keep_targets}" -eq 0 ]; then echo "NB: If you made changes to the source or binary build targets, these have been \ reset to defaults." fi echo - echo "List of changes made to ${ENV_FILE} during migration - current on left, original on right:" + echo "List of changes made to ${__env_file} during migration - current on left, original on right:" echo - diff -y --suppress-common-lines "${ENV_FILE}" "${ENV_FILE}".bak || true + diff -y --suppress-common-lines "${__env_file}" "${__env_file}".bak || true else - echo "No changes made to ${ENV_FILE} during update" - if [ -f "${ENV_FILE}".source ]; then - ${__as_owner} rm "${ENV_FILE}".source || true + echo "No changes made to ${__env_file} during update" + if [ -f "${__env_file}".source ]; then + ${__as_owner} rm "${__env_file}".source || true fi fi echo @@ -1374,7 +1374,7 @@ reset to defaults." echo "The current partial update risks startup failure." fi - nag_os_version + __nag_os_version unset ETHDSECUNDO unset GITEXITCODE @@ -1398,10 +1398,10 @@ reset to defaults." resync-execution() { # Check for EL client - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) - case "${value}" in + case "${__value}" in *erigon.yml* ) __el_volume='erigon-el-data'; __el_client="erigon";; *geth.yml* ) __el_volume='geth-el-data'; __el_client="geth";; *reth.yml* ) __el_volume='reth-el-data'; __el_client="reth";; @@ -1410,35 +1410,35 @@ resync-execution() { * ) echo "You do not appear to be running an execution layer client. Nothing to do."; return 0;; esac - if ! dodocker volume ls -q | grep -q "$(basename "$(realpath .)")[_-]${__el_volume}"; then + if ! __dodocker volume ls -q | grep -q "$(basename "$(realpath .)")[_-]${__el_volume}"; then echo "Did not find Docker volume for ${__el_client}. Nothing to do." return 0 fi echo "This will stop ${__el_client} and delete its database to force a resync." - read -rp "WARNING - resync may take days. Do you wish to continue? (No/yes) " yn - case $yn in + read -rp "WARNING - resync may take days. Do you wish to continue? (No/yes) " __yn + case $__yn in [Yy][Ee][Ss] ) ;; * ) echo "Aborting."; exit 130;; esac __el_volume="$(basename "$(realpath .)")_${__el_volume}" echo "Stopping ${__el_client} container" - docompose stop execution && docompose rm -f execution - dodocker volume rm "$(dodocker volume ls -q -f "name=${__el_volume}")" + __docompose stop execution && __docompose rm -f execution + __dodocker volume rm "$(__dodocker volume ls -q -f "name=${__el_volume}")" __volume_id="" if [[ "${__el_volume}" =~ geth-el-data ]]; then __legacy_volume="$(basename "$(realpath .)")_geth-eth1-data" - __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" elif [[ "${__el_volume}" =~ besu-el-data ]]; then __legacy_volume="$(basename "$(realpath .)")_besu-eth1-data" - __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" elif [[ "${__el_volume}" =~ nethermind-el-data ]]; then __legacy_volume="$(basename "$(realpath .)")_nm-eth1-data" - __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" fi if [ -n "${__volume_id}" ]; then - dodocker volume rm "${__volume_id}" + __dodocker volume rm "${__volume_id}" fi echo echo "${__el_client} stopped and database deleted." @@ -1450,10 +1450,10 @@ resync-execution() { resync-consensus() { # Check for CL client - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) - case "${value}" in + case "${__value}" in *lighthouse.yml* | *lighthouse-cl-only.yml* ) __cl_volume='lhconsensus-data'; __cl_client="lighthouse";; *teku-allin1.yml* ) __cl_volume='wipe-db'; __cl_client="teku";; *teku.yml* | *teku-cl-only.yml* ) __cl_volume='tekuconsensus-data'; __cl_client="teku";; @@ -1466,43 +1466,43 @@ resync-consensus() { * ) echo "You do not appear to be running a consensus layer client. Nothing to do."; return;; esac - if [ ! "${__cl_volume}" = "wipe-db" ] && ! dodocker volume ls -q \ + if [ ! "${__cl_volume}" = "wipe-db" ] && ! __dodocker volume ls -q \ | grep -q "$(basename "$(realpath .)")[_-]${__cl_volume}"; then echo "Did not find Docker volume for ${__cl_client}. Nothing to do." return 0 fi # Can we checkpoint sync? - var="RAPID_SYNC_URL" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="RAPID_SYNC_URL" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) echo "This will stop ${__cl_client} and delete its database to force a resync." - if [ -z "${value}" ]; then - read -rp "WARNING - RAPID_SYNC_URL not set, resync may take days. Do you wish to continue? (No/yes) " yn + if [ -z "${__value}" ]; then + read -rp "WARNING - RAPID_SYNC_URL not set, resync may take days. Do you wish to continue? (No/yes) " __yn else - read -rp "RAPID_SYNC_URL set, resync should finish in minutes. Do you wish to continue? (No/yes) " yn + read -rp "RAPID_SYNC_URL set, resync should finish in minutes. Do you wish to continue? (No/yes) " __yn fi - case $yn in + case $__yn in [Yy][Ee][Ss] ) ;; * ) echo "Aborting."; exit 130;; esac echo "Stopping ${__cl_client} container" - docompose stop consensus && docompose rm -f consensus + __docompose stop consensus && __docompose rm -f consensus if [ "${__cl_volume}" = "wipe-db" ]; then - docompose run --rm wipe-db + __docompose run --rm wipe-db else __cl_volume="$(basename "$(realpath .)")_${__cl_volume}" - dodocker volume rm "$(dodocker volume ls -q -f "name=${__cl_volume}")" + __dodocker volume rm "$(__dodocker volume ls -q -f "name=${__cl_volume}")" __volume_id="" if [[ "${__cl_volume}" =~ lhconsensus-data ]]; then __legacy_volume="$(basename "$(realpath .)")_lhbeacon-data" - __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" elif [[ "${__cl_volume}" =~ prysmconsensus-data ]]; then __legacy_volume="$(basename "$(realpath .)")_prysmbeacon-data" - __volume_id="$(dodocker volume ls -q -f "name=${__legacy_volume}")" + __volume_id="$(__dodocker volume ls -q -f "name=${__legacy_volume}")" fi if [ -n "${__volume_id}" ]; then - dodocker volume rm "${__volume_id}" + __dodocker volume rm "${__volume_id}" fi fi echo @@ -1514,23 +1514,23 @@ resync-consensus() { attach-geth() { - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*geth\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*geth\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Geth, aborting." exit 1 fi - __legacy_datadir=$(dodocker run --rm -v "$(dodocker volume ls -q -f \ + __legacy_datadir=$(__dodocker run --rm -v "$(__dodocker volume ls -q -f \ "name=$(basename "$(realpath .)")[_-]geth-eth1-data")":"/var/lib/goethereum" \ alpine:3 sh -c 'if [ -d "/var/lib/goethereum/geth/chaindata" ]; then echo true; else echo false; fi') if [ "${__legacy_datadir}" = "true" ]; then - docompose exec -it execution bash -c "geth attach /var/lib/goethereum/geth.ipc" + __docompose exec -it execution bash -c "geth attach /var/lib/goethereum/geth.ipc" else - docompose exec -it execution bash -c "geth attach /var/lib/geth/geth.ipc" + __docompose exec -it execution bash -c "geth attach /var/lib/geth/geth.ipc" fi } @@ -1557,48 +1557,48 @@ prune-besu() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*besu\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*besu\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Besu, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Besu is an archive node: Aborting." exit 1 fi - rpc_line=$(grep '^EL_RPC_PORT=' "${ENV_FILE}") - regex='^EL_RPC_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^EL_RPC_PORT=' "${__env_file}") + __regex='^EL_RPC_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine EL_RPC_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T execution wget -qO- "http://localhost:$rpc_port" \ + __sync_status=$(__docompose exec -T execution wget -qO- "http://localhost:$__rpc_port" \ --header 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Besu: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ ! "${sync_status}" =~ "false" ]]; then + if [[ ! "${__sync_status}" =~ "false" ]]; then echo "Besu is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 @@ -1606,8 +1606,8 @@ prune-besu() { if [ $__non_interactive = 0 ]; then while true; do - read -rp "WARNING - this will stop Besu and prune its trie-logs. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will stop Besu and prune its trie-logs. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1617,8 +1617,8 @@ prune-besu() { echo echo "Starting Besu prune" echo - docompose run --rm set-prune-marker "touch /var/lib/besu/prune-marker" - docompose stop execution && docompose rm -f execution + __docompose run --rm set-prune-marker "touch /var/lib/besu/prune-marker" + __docompose stop execution && __docompose rm -f execution start echo echo "Prune is running, you can observe it with '$__me logs -f execution'" @@ -1650,48 +1650,48 @@ prune-reth() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*reth\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*reth\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Reth, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Reth is an archive node: Aborting." exit 1 fi - rpc_line=$(grep '^EL_RPC_PORT=' "${ENV_FILE}") - regex='^EL_RPC_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^EL_RPC_PORT=' "${__env_file}") + __regex='^EL_RPC_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine EL_RPC_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T execution wget -qO- "http://localhost:$rpc_port" \ + __sync_status=$(__docompose exec -T execution wget -qO- "http://localhost:$__rpc_port" \ --header 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Reth: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ ! "${sync_status}" =~ "false" ]]; then + if [[ ! "${__sync_status}" =~ "false" ]]; then echo "Reth is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 @@ -1699,8 +1699,8 @@ prune-reth() { if [ $__non_interactive = 0 ]; then while true; do - read -rp "WARNING - this will stop Reth and prune its database. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will stop Reth and prune its database. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1710,8 +1710,8 @@ prune-reth() { echo echo "Starting Reth prune" echo - docompose run --rm set-prune-marker "touch /var/lib/reth/prune-marker" - docompose stop execution && docompose rm -f execution + __docompose run --rm set-prune-marker "touch /var/lib/reth/prune-marker" + __docompose stop execution && __docompose rm -f execution start echo echo "Prune is running, you can observe it with '$__me logs -f execution'" @@ -1743,28 +1743,28 @@ prune-nethermind() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - if ! grep -q '^COMPOSE_FILE=.*nethermind\.yml' "${ENV_FILE}" 2>/dev/null ; then + if ! grep -q '^COMPOSE_FILE=.*nethermind\.yml' "${__env_file}" 2>/dev/null ; then echo "You do not appear to be using Nethermind, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Nethermind is an archive node: Aborting." exit 1 fi __get_docker_free_space - var="NETWORK" - NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="NETWORK" + NETWORK=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ "${NETWORK}" = "mainnet" ] || [ "${NETWORK}" = "gnosis" ]; then __min_free=262144000 @@ -1783,51 +1783,51 @@ prune-nethermind() { exit 1 fi - rpc_line=$(grep '^EL_RPC_PORT=' "${ENV_FILE}") - regex='^EL_RPC_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^EL_RPC_PORT=' "${__env_file}") + __regex='^EL_RPC_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine EL_RPC_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T execution wget -qO- "http://localhost:$rpc_port" --header \ + __sync_status=$(__docompose exec -T execution wget -qO- "http://localhost:$__rpc_port" --header \ 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Nethermind: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ ! "${sync_status}" =~ "false" ]]; then + if [[ ! "${__sync_status}" =~ "false" ]]; then echo "Nethermind is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 fi - var="AUTOPRUNE_NM" - auto_prune=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="AUTOPRUNE_NM" + __auto_prune=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ $__non_interactive = 0 ]; then while true; do - if [ "${auto_prune}" = true ]; then + if [ "${__auto_prune}" = true ]; then if [ "${NETWORK}" = "mainnet" ] || [ "${NETWORK}" = "gnosis" ]; then - threshold="350" + __threshold="350" else - threshold="50" + __threshold="50" fi - echo "Nethermind should auto-prune below ${threshold} GiB free. Check logs with \"$__me logs -f --tail 500 \ + echo "Nethermind should auto-prune below ${__threshold} GiB free. Check logs with \"$__me logs -f --tail 500 \ execution | grep Full\" to see whether it is." fi - read -rp "WARNING - this will prune Nethermind's database in the background. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will prune Nethermind's database in the background. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1839,21 +1839,21 @@ execution | grep Full\" to see whether it is." echo set +e - prune_result=$(docompose exec -T execution wget -qO- "http://localhost:1337" --header \ + __prune_result=$(__docompose exec -T execution wget -qO- "http://localhost:1337" --header \ 'Content-Type: application/json' --post-data '{"jsonrpc":"2.0","method":"admin_prune","params":[],"id":1}') - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then - echo "Unable to start prune, error code ${exitstatus}. This is likely a bug." - echo "An attempt to run it returned this: ${prune_result}" + if [ $__exitstatus -ne 0 ]; then + echo "Unable to start prune, error code ${__exitstatus}. This is likely a bug." + echo "An attempt to run it returned this: ${__prune_result}" # shellcheck disable=SC2028 echo 'The command attempted was: docker compose run --rm set-prune-marker "curl -s \ --data {\\\"method\\\":\\\"admin_prune\\\",\\\"params\\\":[],\\\"id\\\":1,\\\"jsonrpc\\\":\\\"2.0\\\"} \ -H Content-Type:\ application/json http://execution:8545"' - exit ${exitstatus} + exit ${__exitstatus} fi - echo "Nethermind returns ${prune_result}" - if [[ ! "${prune_result}" =~ [Ss]tarting ]]; then + echo "Nethermind returns ${__prune_result}" + if [[ ! "${__prune_result}" =~ [Ss]tarting ]]; then echo "Unable to start prune. This is likely a bug." exit 70 fi @@ -1889,51 +1889,51 @@ prune-lighthouse() { __non_interactive=1 fi - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "lighthouse.yml" && ! "${value}" =~ "lighthouse-cl-only.yml" ]]; then + if [[ ! "${__value}" =~ "lighthouse.yml" && ! "${__value}" =~ "lighthouse-cl-only.yml" ]]; then echo "You do not appear to be using Lighthouse, aborting." exit 1 fi # Check for archive node - var="ARCHIVE_NODE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - if [[ "${value}" = "true" ]]; then + __var="ARCHIVE_NODE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [[ "${__value}" = "true" ]]; then echo "Lighthouse is an archive node: Aborting." exit 1 fi - rpc_line=$(grep '^CL_REST_PORT=' "${ENV_FILE}") - regex='^CL_REST_PORT=([0-9]+)' - if [[ ! "${rpc_line}" =~ ${regex} ]]; then + __rpc_line=$(grep '^CL_REST_PORT=' "${__env_file}") + __regex='^CL_REST_PORT=([0-9]+)' + if [[ ! "${__rpc_line}" =~ ${__regex} ]]; then echo "Unable to determine CL_REST_PORT, aborting." exit 1 else - rpc_port="${BASH_REMATCH[1]}" + __rpc_port="${BASH_REMATCH[1]}" fi set +e - sync_status=$(docompose exec -T consensus wget -qO- "http://localhost:$rpc_port/eth/v1/node/syncing") - exitstatus=$? + __sync_status=$(__docompose exec -T consensus wget -qO- "http://localhost:$__rpc_port/eth/v1/node/syncing") + __exitstatus=$? set -e - if [ $exitstatus -ne 0 ]; then + if [ $__exitstatus -ne 0 ]; then echo "Unable to connect to Lighthouse: Is it running?" - echo "Output: ${sync_status}" + echo "Output: ${__sync_status}" echo "Aborting." exit 1 fi - if [[ "${sync_status}" =~ "true" ]]; then # Avoid jq - if el_offline or is_optimistic or is_syncing, don't proceed + if [[ "${__sync_status}" =~ "true" ]]; then # Avoid jq - if el_offline or is_optimistic or is_syncing, don't proceed echo "Lighthouse is not done syncing yet. Sync status:" - echo "${sync_status}" + echo "${__sync_status}" echo echo "Aborting." exit 1 @@ -1941,8 +1941,8 @@ prune-lighthouse() { if [ $__non_interactive = 0 ]; then while true; do - read -rp "WARNING - this will stop Lighthouse and prune its state. Do you wish to continue? (No/Yes) " yn - case $yn in + read -rp "WARNING - this will stop Lighthouse and prune its state. Do you wish to continue? (No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -1952,8 +1952,8 @@ prune-lighthouse() { echo echo "Starting Lighthouse prune" echo - docompose run --rm set-cl-prune-marker "touch /var/lib/lighthouse/beacon/prune-marker" - docompose stop consensus && docompose rm -f consensus + __docompose run --rm set-cl-prune-marker "touch /var/lib/lighthouse/beacon/prune-marker" + __docompose stop consensus && __docompose rm -f consensus start echo echo "Prune is running, you can observe it with '$__me logs -f consensus'" @@ -1963,19 +1963,19 @@ prune-lighthouse() { } -prep-keyimport() { - if [ ! -f "${ENV_FILE}" ]; then - echo "${ENV_FILE} configuration file not found, aborting." +__prep-keyimport() { + if [ ! -f "${__env_file}" ]; then + echo "${__env_file} configuration file not found, aborting." exit 1 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "prysm.yml" ]] && [[ ! "${value}" =~ "lighthouse.yml" ]] && [[ ! "${value}" =~ "teku.yml" ]] \ - && [[ ! "${value}" =~ "nimbus.yml" ]] && [[ ! "${value}" =~ "lodestar.yml" ]] && \ - [[ ! "${value}" =~ "-allin1.yml" ]] && [[ ! "${value}" =~ "vc-only.yml" ]]; then + if [[ ! "${__value}" =~ "prysm.yml" ]] && [[ ! "${__value}" =~ "lighthouse.yml" ]] && [[ ! "${__value}" =~ "teku.yml" ]] \ + && [[ ! "${__value}" =~ "nimbus.yml" ]] && [[ ! "${__value}" =~ "lodestar.yml" ]] && \ + [[ ! "${__value}" =~ "-allin1.yml" ]] && [[ ! "${__value}" =~ "vc-only.yml" ]]; then echo "You do not appear to be running a validator client. Aborting." exit 1 fi @@ -2003,18 +2003,18 @@ prep-keyimport() { continue fi IFS=$'\n' - files=$(find "$2" -maxdepth 1 -name '*.json') + __files=$(find "$2" -maxdepth 1 -name '*.json') # Unset restores default unset IFS - if [ -z "$files" ]; then + if [ -z "$__files" ]; then echo "No .json files found in $2, aborting" exit 1 fi IFS=$'\n' - files=$(find ./.eth/validator_keys -maxdepth 1 -name '*.json') + __files=$(find ./.eth/validator_keys -maxdepth 1 -name '*.json') # Unset restores default unset IFS - if [ -n "$files" ]; then + if [ -n "$__files" ]; then ${__as_owner} mkdir -p ./.eth/validator_keys/keybackup ${__as_owner} mv -uf ./.eth/validator_keys/*.json ./.eth/validator_keys/keybackup ${__as_owner} rm -f ./.eth/validator_keys/*.json @@ -2048,70 +2048,70 @@ prep-keyimport() { __i_haz_ethdo() { - if [ ! -f "${ENV_FILE}" ]; then + if [ ! -f "${__env_file}" ]; then echo "${__project_name} has not been configured. Please run $__me config first." exit 0 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "ethdo.yml" ]]; then - echo "Please edit the ${ENV_FILE} file and make sure \":ethdo.yml\" is added to the \"COMPOSE_FILE\" line" - echo "For example, \"nano ${ENV_FILE}\" will open the nano text editor with the \"${ENV_FILE}\" file loaded." + if [[ ! "${__value}" =~ "ethdo.yml" ]]; then + echo "Please edit the ${__env_file} file and make sure \":ethdo.yml\" is added to the \"COMPOSE_FILE\" line" + echo "For example, \"nano ${__env_file}\" will open the nano text editor with the \"${__env_file}\" file loaded." echo "Without it, this step cannot be run" echo - read -rp "Do you want me to make this change for you? (n/y)" yn - case $yn in + read -rp "Do you want me to make this change for you? (n/y)" __yn + case $__yn in [Yy] );; * ) exit 130;; esac - if [ -n "${value}" ]; then - COMPOSE_FILE="${value}:ethdo.yml" + if [ -n "${__value}" ]; then + COMPOSE_FILE="${__value}:ethdo.yml" else COMPOSE_FILE="ethdo.yml" - echo "You do not have a CL in ${__project_name}. Please make sure CL_NODE in ${ENV_FILE} points at an available one" + echo "You do not have a CL in ${__project_name}. Please make sure CL_NODE in ${__env_file} points at an available one" fi - set_value_in_env + __set_value_in_env echo "Your COMPOSE_FILE now reads ${COMPOSE_FILE}" fi } __i_haz_web3signer() { - if [ ! -f "${ENV_FILE}" ]; then + if [ ! -f "${__env_file}" ]; then echo "${__project_name} has not been configured. Please run $__me config first." exit 0 fi - var="WEB3SIGNER" - __w3s=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="WEB3SIGNER" + __w3s=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ ! "${__w3s}" = "true" ]; then return 0 fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "web3signer.yml" ]]; then - echo "WEB3SIGNER=true in ${ENV_FILE}, but web3signer.yml is not in use" - echo "Please edit the ${ENV_FILE} file and make sure \":web3signer.yml\" is added to the \"COMPOSE_FILE\" line" - echo "For example, \"nano ${ENV_FILE}\" will open the nano text editor with the \"${ENV_FILE}\" file loaded." + if [[ ! "${__value}" =~ "web3signer.yml" ]]; then + echo "WEB3SIGNER=true in ${__env_file}, but web3signer.yml is not in use" + echo "Please edit the ${__env_file} file and make sure \":web3signer.yml\" is added to the \"COMPOSE_FILE\" line" + echo "For example, \"nano ${__env_file}\" will open the nano text editor with the \"${__env_file}\" file loaded." echo "Without it, $__me keys cannot be run" echo - read -rp "Do you want me to make this change for you? (n/y)" yn - case $yn in + read -rp "Do you want me to make this change for you? (n/y)" __yn + case $__yn in [Yy] );; * ) exit 130;; esac - if [ -n "${value}" ]; then - COMPOSE_FILE="${value}:web3signer.yml" + if [ -n "${__value}" ]; then + COMPOSE_FILE="${__value}:web3signer.yml" else echo "You do not have a validator client in ${__project_name}. web3signer cannot be used without one." exit 1 fi - set_value_in_env + __set_value_in_env echo "Your COMPOSE_FILE now reads ${COMPOSE_FILE}" fi } @@ -2119,7 +2119,7 @@ __i_haz_web3signer() { __i_haz_keys_service() { # This caused issues and is currently not being called - if ! docompose --profile tools config --services | grep -q validator-keys; then + if ! __docompose --profile tools config --services | grep -q validator-keys; then if [[ "${1:-}" = "silent" ]]; then return 1 fi @@ -2151,7 +2151,7 @@ __keys_usage() { echo echo " get-recipient 0xPUBKEY" echo " List fee recipient set for the validator with public key 0xPUBKEY" - echo " Validators will use FEE_RECIPIENT in ${ENV_FILE} by default, if not set individually" + echo " Validators will use FEE_RECIPIENT in ${__env_file} by default, if not set individually" echo " set-recipient 0xPUBKEY 0xADDRESS" echo " Set individual fee recipient for the validator with public key 0xPUBKEY" echo " delete-recipient 0xPUBKEY" @@ -2214,52 +2214,52 @@ keys() { if [ "${1:-}" = "import" ]; then #__i_haz_keys_service shift - prep-keyimport "$@" - docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys import "${__args}" + __prep-keyimport "$@" + __docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys import "${__args}" elif [ "${1:-}" = "create-prysm-wallet" ]; then - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ ! "${value}" =~ "prysm.yml" ]] && [[ ! "${value}" =~ "prysm-vc-only.yml" ]]; then + if [[ ! "${__value}" =~ "prysm.yml" ]] && [[ ! "${__value}" =~ "prysm-vc-only.yml" ]]; then echo "You do not appear to be using a Prysm validator. Aborting." exit 1 fi - if docompose run --rm create-wallet; then - docompose stop validator - docompose rm --force validator + if __docompose run --rm create-wallet; then + __docompose stop validator + __docompose rm --force validator up fi elif [ "${1:-}" = "prepare-address-change" ]; then __i_haz_ethdo echo "Generating offline prep file" set +e - docompose run --rm ethdo validator credentials set --prepare-offline - exitstatus=$? + __docompose run --rm ethdo validator credentials set --prepare-offline + __exitstatus=$? set -e - if [ "${exitstatus}" -ne 0 ]; then + if [ "${__exitstatus}" -ne 0 ]; then echo "Running ethdo failed, unfortunately. Is the CL running and synced?" echo "Please try again after fixing root cause. Aborting." exit 1 fi echo echo "Downloading ethdo" - REPO="wealdtech/ethdo"; \ - wget -q -O- https://api.github.com/repos/${REPO}/releases/latest | grep "browser_download_url.*linux-amd64.tar.gz" \ + __repo="wealdtech/ethdo"; \ + wget -q -O- https://api.github.com/repos/${__repo}/releases/latest | grep "browser_download_url.*linux-amd64.tar.gz" \ | head -1 \ | cut -d : -f 2,3 \ | tr -d \" \ | wget -qi- -O- \ | ${__as_owner} tar zxf - -C ./.eth/ethdo/ \ - || echo "-> Could not download the latest version of '${REPO}' for amd64." + || echo "-> Could not download the latest version of '${__repo}' for amd64." ${__as_owner} mkdir -p ./.eth/ethdo/arm64 - wget -q -O- https://api.github.com/repos/${REPO}/releases/latest | grep "browser_download_url.*linux-arm64.tar.gz" \ + wget -q -O- https://api.github.com/repos/${__repo}/releases/latest | grep "browser_download_url.*linux-arm64.tar.gz" \ | head -1 \ | cut -d : -f 2,3 \ | tr -d \" \ | wget -qi- -O- \ | ${__as_owner} tar zxf - -C ./.eth/ethdo/arm64 \ - || echo "-> Could not download the latest version of '${REPO}' for arm64." + || echo "-> Could not download the latest version of '${__repo}' for arm64." ${__as_owner} mv ./.eth/ethdo/arm64/ethdo ./.eth/ethdo/ethdo-arm64 ${__as_owner} rm -rf ./.eth/ethdo/arm64 echo @@ -2267,7 +2267,7 @@ keys() { echo "Please see https://ethdocker.com/Support/ChangingWithdrawalCredentials for details" elif [ "${1:-}" = "send-address-change" ]; then __i_haz_ethdo - docompose run --rm ethdo validator credentials set + __docompose run --rm ethdo validator credentials set elif [ "${1:-}" = "sign-exit" ] && [ "${2:-}" = "from-keystore" ]; then __i_haz_ethdo @@ -2298,8 +2298,8 @@ keys() { if [ "$__num_files" -gt 1 ]; then while true; do - read -rp "Do all validator keys have the same password? (y/n) " yn - case $yn in + read -rp "Do all validator keys have the same password? (y/n) " __yn + case $__yn in [Yy]* ) __justone=1; break;; [Nn]* ) __justone=0; break;; * ) echo "Please answer yes or no.";; @@ -2325,8 +2325,8 @@ keys() { fi fi - created=0 - failed=0 + __created=0 + __failed=0 for __keyfile in .eth/validator_keys/keystore-*.json; do [ -f "${__keyfile}" ] || continue # Should always evaluate true - just in case if [ "${__justone}" -eq 0 ]; then @@ -2355,40 +2355,40 @@ keys() { set +e # __offline may be empty, don't quote it # shellcheck disable=SC2086 - __json=$(docompose run --rm ethdo validator exit --validator "${__keyfile}" --json --timeout 2m \ + __json=$(__docompose run --rm ethdo validator exit --validator "${__keyfile}" --json --timeout 2m \ --passphrase "${__password}" ${__offline}) - exitstatus=$? - if [ "${exitstatus}" -eq 0 ]; then + __exitstatus=$? + if [ "${__exitstatus}" -eq 0 ]; then echo "${__json}" >".eth/exit_messages/${__pubkey::10}--${__pubkey:90}-exit.json" # shellcheck disable=SC2320 - exitstatus=$? - if [ "${exitstatus}" -eq 0 ]; then + __exitstatus=$? + if [ "${__exitstatus}" -eq 0 ]; then echo "Creating an exit message for validator ${__pubkey} into file \ ./.eth/exit_messages/${__pubkey::10}--${__pubkey:90}-exit.json succeeded" - (( created++ )) + (( __created++ )) else echo "Error writing exit json to file ./.eth/exit_messages/${__pubkey::10}--${__pubkey:90}-exit.json" - (( failed++ )) + (( __failed++ )) fi else echo "Creating an exit message for validator ${__pubkey} from file ${__keyfile} failed" - (( failed++ )) + (( __failed++ )) fi set -e done echo - echo "Created pre-signed exit messages for ${created} validators" - if [ "${created}" -gt 0 ]; then + echo "Created pre-signed exit messages for ${__created} validators" + if [ "${__created}" -gt 0 ]; then echo "You can find them in ./.eth/exit_messages" fi - if [ "${failed}" -gt 0 ]; then - echo "Failed for ${failed} validators" + if [ "${__failed}" -gt 0 ]; then + echo "Failed for ${__failed} validators" fi #elif [ "${1:-}" = "send-exit" ] && ! __i_haz_keys_service silent; then elif [ "${1:-}" = "send-exit" ]; then - var="CL_NODE" - CL_NODE=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - network_name="$(docompose config | awk ' + __var="CL_NODE" + CL_NODE=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __network_name="$(__docompose config | awk ' BEGIN { found_networks=0; found_default=0; @@ -2406,19 +2406,19 @@ keys() { exit; } ')" - if ! dodocker image ls --format "{{.Repository}}:{{.Tag}}" | grep -q "vc-utils:local"; then + if ! __dodocker image ls --format "{{.Repository}}:{{.Tag}}" | grep -q "vc-utils:local"; then if ! dpkg-query -W -f='${Status}' docker-ce 2>/dev/null | grep -q "ok installed"; then - dodocker build -t vc-utils:local ./vc-utils + __dodocker build -t vc-utils:local ./vc-utils else if ! dpkg-query -W -f='${Status}' docker-buildx-plugin 2>/dev/null | grep -q "ok installed"; then ${__auto_sudo} apt-get update && ${__auto_sudo} apt-get install -y docker-buildx-plugin fi - dodocker buildx build -t vc-utils:local ./vc-utils + __dodocker buildx build -t vc-utils:local ./vc-utils fi fi - dodocker run --rm \ + __dodocker run --rm \ -u 1000:1000 \ - --network "${network_name}" \ + --network "${__network_name}" \ --name send-exit \ -v "$(pwd)/.eth/exit_messages:/exit_messages" \ -v "/etc/localtime:/etc/localtime:ro" \ @@ -2427,10 +2427,10 @@ keys() { vc-utils:local /var/lib/lighthouse/nonesuch.txt eth2 send-exit else #__i_haz_keys_service - docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys "$@" + __docompose run --rm -e OWNER_UID="${__owner_uid}" validator-keys "$@" fi - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) } @@ -2440,7 +2440,7 @@ upgrade() { start() { - docompose up -d --remove-orphans "$@" + __docompose up -d --remove-orphans "$@" } # Passed by user @@ -2456,7 +2456,7 @@ run() { stop() { - docompose down --remove-orphans "$@" + __docompose down --remove-orphans "$@" } @@ -2472,17 +2472,17 @@ restart() { logs() { - docompose logs "$@" + __docompose logs "$@" } cmd() { - docompose "$@" + __docompose "$@" } terminate() { - if [ -z "$(dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then + if [ -z "$(__dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+")" ]; then echo "There are no data stores - Docker volumes - left to remove for this Ethereum node." stop return 0 @@ -2490,8 +2490,8 @@ terminate() { while true; do read -rp "WARNING - this action will destroy all data stores for this Ethereum node. Do you wish to continue? \ -(No/Yes) " yn - case $yn in +(No/Yes) " __yn + case $__yn in [Yy][Ee][Ss] ) break;; * ) echo "Aborting, no changes made"; exit 130;; esac @@ -2500,16 +2500,16 @@ terminate() { stop # In this case I want the word splitting, so rm can remove all volumes # shellcheck disable=SC2046 - dodocker volume rm $(dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+") + __dodocker volume rm $(__dodocker volume ls -q -f "name=^$(basename "$(realpath .)")_[^_]+") echo echo "All containers stopped and all volumes deleted" echo } -query_network() { - var="NETWORK" - __prev_network=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__query_network() { + __var="NETWORK" + __prev_network=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) NETWORK=$(whiptail --notags --title "Select Network" --menu \ "Which network do you want to run on?" 13 65 6 \ "holesky" "Holešovice Testnet" \ @@ -2550,7 +2550,7 @@ screen.\n\nCustom testnets only work with a URL to fetch their configuration fro } -query_deployment() { +__query_deployment() { if [ "${NETWORK}" = "gnosis" ]; then if uname -m | grep -q riscv64; then echo "Gnosis network has no available client combos on RISC-V. Aborting." @@ -2590,7 +2590,7 @@ query_deployment() { } -query_validator_client() { +__query_validator_client() { if [ "${NETWORK}" = "gnosis" ]; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select validator client" --menu \ "Which validator client do you want to run?" 11 65 4 \ @@ -2635,7 +2635,7 @@ query_validator_client() { } -query_consensus_client() { +__query_consensus_client() { if [ "${NETWORK}" = "gnosis" ]; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ @@ -2672,7 +2672,7 @@ query_consensus_client() { } -query_consensus_only_client() { +__query_consensus_only_client() { if [ "${NETWORK}" = "gnosis" ]; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ @@ -2707,15 +2707,15 @@ query_consensus_only_client() { } -query_custom_execution_client() { +__query_custom_execution_client() { if [ "${__minty_fresh}" -eq 1 ]; then EL_CUSTOM_NODE="" JWT_SECRET="" else - var="EL_NODE" - EL_CUSTOM_NODE=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) - var="JWT_SECRET" - JWT_SECRET=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="EL_NODE" + EL_CUSTOM_NODE=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="JWT_SECRET" + JWT_SECRET=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) fi EL_CUSTOM_NODE=$(whiptail --title "Configure custom execution client" --inputbox "What is the URL for your custom \ execution client? (right-click to paste)" 10 65 "${EL_CUSTOM_NODE}" 3>&1 1>&2 2>&3) @@ -2737,7 +2737,7 @@ again or Cancel on the next screen." 10 65 } -query_execution_client() { +__query_execution_client() { if [ "${NETWORK}" = "gnosis" ]; then if uname -m | grep -q aarch64 || uname -m | grep -q arm64; then EXECUTION_CLIENT=$(whiptail --notags --title "Select execution client" --menu \ @@ -2776,7 +2776,7 @@ query_execution_client() { if [ "${EXECUTION_CLIENT}" == "NONE" ]; then unset EXECUTION_CLIENT - query_custom_execution_client + __query_custom_execution_client EL_NODE="${EL_CUSTOM_NODE}" else echo "Your execution client file is:" "${EXECUTION_CLIENT}" @@ -2790,7 +2790,7 @@ query_execution_client() { } -query_grafana() { +__query_grafana() { if (whiptail --title "Grafana" --yesno "Do you want to use Grafana dashboards?" 10 65) then if [[ "$OSTYPE" == "darwin"* ]]; then # macOS doesn't do well with / bind mount - leave node-exporter, cadvisor and loki/promtail off by default @@ -2804,7 +2804,7 @@ query_grafana() { } -query_remote_beacon() { +__query_remote_beacon() { if [ "${__minty_fresh}" -eq 1 ]; then if [ "${__deployment}" = "rocket" ]; then REMOTE_BEACON="http://eth2:5052" @@ -2812,8 +2812,8 @@ query_remote_beacon() { REMOTE_BEACON="" fi else - var="CL_NODE" - REMOTE_BEACON=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="CL_NODE" + REMOTE_BEACON=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) fi REMOTE_BEACON=$(whiptail --title "Configure remote consensus client" --inputbox "What is the URL for your remote \ consensus client? (right-click to paste)" 10 60 "${REMOTE_BEACON}" 3>&1 1>&2 2>&3) @@ -2822,12 +2822,12 @@ consensus client? (right-click to paste)" 10 60 "${REMOTE_BEACON}" 3>&1 1>&2 2>& } -query_checkpoint_beacon() { +__query_checkpoint_beacon() { if [ "${__minty_fresh}" -eq 1 ] || [ "${__network_change}" -eq 1 ]; then RAPID_SYNC_URL="" else - var="RAPID_SYNC_URL" - RAPID_SYNC_URL=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="RAPID_SYNC_URL" + RAPID_SYNC_URL=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) fi if [ -z "${RAPID_SYNC_URL}" ]; then case "${NETWORK}" in @@ -2856,9 +2856,9 @@ checkpoint sync provider? (right-click to paste)" 10 65 "${RAPID_SYNC_URL}" 3>&1 } -query_graffiti() { - var="GRAFFITI" - GRAFFITI=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__query_graffiti() { + __var="GRAFFITI" + GRAFFITI=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) while true; do GRAFFITI=$(whiptail --title "Configure Graffiti" --inputbox "What Graffiti do you want to send with your blocks? \ @@ -2875,18 +2875,18 @@ query_graffiti() { } -query_rapid_sync() { +__query_rapid_sync() { if [[ "${NETWORK}" =~ ^https?:// ]]; then RAPID_SYNC_URL="" return fi - query_checkpoint_beacon + __query_checkpoint_beacon } -query_coinbase() { - var="FEE_RECIPIENT" - FEE_RECIPIENT=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__query_coinbase() { + __var="FEE_RECIPIENT" + FEE_RECIPIENT=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) while true; do set +e # Can't rely on the error handler here because of the special-casing below for update() @@ -2900,9 +2900,9 @@ address? Yes even on an RPC node. Can be any address at all. (right-click to pas "${FEE_RECIPIENT}" 3>&1 1>&2 2>&3) fi - exitstatus=$? + __exitstatus=$? set -e - if [ $exitstatus -eq 0 ]; then + if [ $__exitstatus -eq 0 ]; then if [[ ${FEE_RECIPIENT} == 0x* && ${#FEE_RECIPIENT} -eq 42 ]]; then echo "Your rewards address is: ${FEE_RECIPIENT}" break @@ -2916,7 +2916,7 @@ screen.\n\nThe client will not start successfully until a valid ETH rewards addr echo "Please make requested changes manually or run \"$__me update\" again" echo "before running \"$__me up\"." echo - echo "Without a FEE_RECIPIENT set in \"${ENV_FILE}\", containers will not" + echo "Without a FEE_RECIPIENT set in \"${__env_file}\", containers will not" echo "start successfully. Already running containers will keep running with the" echo "old configuration until you are ready to restart them." else @@ -2929,7 +2929,7 @@ screen.\n\nThe client will not start successfully until a valid ETH rewards addr } -query_mev() { +__query_mev() { if [ "${NETWORK}" = "gnosis" ]; then return 0 fi @@ -2954,8 +2954,8 @@ https://0x98650451ba02064f7b000f5768cf0cf4d4e492317d82871bdc87ef841a0743f69f0f1e esac return 0 fi - var="MEV_BOOST" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="MEV_BOOST" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 if [[ "${CONSENSUS_CLIENT}" =~ "-vc-only.yml" ]]; then @@ -2968,9 +2968,9 @@ want to use MEV Boost?" 10 65); then fi if (whiptail --title "MEV Boost" --yesno "Do you want to use MEV Boost?" 10 65) then MEV_BOOST="true" - if [ "${value}" = "true" ]; then - var="MEV_RELAYS" - MEV_RELAYS=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + if [ "${__value}" = "true" ]; then + __var="MEV_RELAYS" + MEV_RELAYS=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) else case "${NETWORK}" in "sepolia") @@ -3008,11 +3008,11 @@ https://0x8c7d33605ecef85403f8b7289c8058f440cbb6bf72b055dfe2f3e2c6695b6a1ea5a9cd fi } -query_dkg() { +__query_dkg() { __ssv_operator_id=-1 if (whiptail --title "DKG ceremony" --yesno "Do you want to participate in DKG ceremonies as an operator?" 10 60); then __key_file_content=$(${__auto_sudo} cat ./ssv-config/encrypted_private_key.json) - __public_key=$(docompose -f ./ssv-dkg.yml run --rm curl-jq sh -c \ + __public_key=$(__docompose -f ./ssv-dkg.yml run --rm curl-jq sh -c \ "echo '${__key_file_content}' | jq -r '.pubKey'" | tail -n 1) echo "Your SSV node public key is: ${__public_key}" __ssv_operator_id=$(whiptail --title "Register SSV operator" --inputbox "\n1. Your SSV node public key:\n\n${__public_key}\n\n2. Register your operator in the SSV network with the public key\n\n3. Input your Operator ID \ @@ -3028,43 +3028,43 @@ query_dkg() { rm -f ssv-config/dkg-config.yaml.original } -set_value_in_env() { -# Assumes that "var" has been set to the name of the variable to be changed - if [ "${!var+x}" ]; then - if ! grep -qF "${var}" "${ENV_FILE}" 2>/dev/null ; then - echo "${var}=${!var}" >> "${ENV_FILE}" +__set_value_in_env() { +# Assumes that "__var" has been set to the name of the variable to be changed + if [ "${!__var+x}" ]; then + if ! grep -qF "${__var}" "${__env_file}" 2>/dev/null ; then + echo "${__var}=${!__var}" >> "${__env_file}" else # Handle & in GRAFFITI gracefully - sed -i'.original' -e "s~^\(${var}\s*=\s*\).*\$~\1${!var//&/\\&}~" "${ENV_FILE}" + sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*\$~\1${!__var//&/\\&}~" "${__env_file}" fi fi } -handle_error() { +__handle_error() { if [[ ! $- =~ e ]]; then # set +e, do nothing return 0 fi - local exit_code=$1 + local __exit_code=$1 echo - if [ "$exit_code" -eq 130 ]; then + if [ "$__exit_code" -eq 130 ]; then echo "$__me terminated by user" - elif [ "$__during_config" -eq 1 ] && [ "$exit_code" -eq 1 ]; then + elif [ "$__during_config" -eq 1 ] && [ "$__exit_code" -eq 1 ]; then echo "Canceled config wizard." else - echo "$__me terminated with exit code $exit_code on line $2" + echo "$__me terminated with exit code $__exit_code on line $2" if [ -n "${__command}" ]; then echo "This happened during $__me ${__command} ${__params}" fi fi if [ "$__during_update" -eq 1 ] && [ "$__during_migrate" -eq 1 ]; then - cp "${ENV_FILE}" "${ENV_FILE}".partial - cp "${ENV_FILE}".source "${ENV_FILE}" + cp "${__env_file}" "${__env_file}".partial + cp "${__env_file}".source "${__env_file}" echo - echo "Restored your ${ENV_FILE} file, to undo partial migration. Please verify it looks correct." - echo "The partially migrated file is in ${ENV_FILE}.partial for troubleshooting." + echo "Restored your ${__env_file} file, to undo partial migration. Please verify it looks correct." + echo "The partially migrated file is in ${__env_file}.partial for troubleshooting." fi if [ "$__during_postgres" -eq 1 ]; then echo @@ -3075,7 +3075,7 @@ handle_error() { echo "Starting the node again could get you slashed." echo echo "Marking Web3signer as unsafe to start." - dodocker run --rm -v "$(dodocker volume ls -q -f "name=web3signer-keys")":/var/lib/web3signer \ + __dodocker run --rm -v "$(__dodocker volume ls -q -f "name=web3signer-keys")":/var/lib/web3signer \ alpine:3 touch /var/lib/web3signer/.migration_fatal_error elif [ "$__migrated" -eq 1 ]; then echo "Web3signer slashing protection database migration failed, after switching to the migrated data." @@ -3092,18 +3092,18 @@ handle_error() { } -check_legacy() { - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) +__check_legacy() { + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Literal match intended # shellcheck disable=SC2076 - if [[ "${value}" =~ "-allin1.yml" && ! "${value}" =~ "grandine-allin1.yml" ]]; then # Warn re Grandine once VC - if [[ "${value}" =~ "teku-allin1.yml" ]]; then + if [[ "${__value}" =~ "-allin1.yml" && ! "${__value}" =~ "grandine-allin1.yml" ]]; then # Warn re Grandine once VC + if [[ "${__value}" =~ "teku-allin1.yml" ]]; then __client="Teku" - elif [[ "${value}" =~ "nimbus-allin1.yml" ]]; then + elif [[ "${__value}" =~ "nimbus-allin1.yml" ]]; then __client="Nimbus" - elif [[ "${value}" =~ "grandine-allin1.yml" ]]; then + elif [[ "${__value}" =~ "grandine-allin1.yml" ]]; then __client="Grandine" else __client="Mystery" @@ -3119,8 +3119,8 @@ config() { # Do not track changes to ext-network.yml ${__as_owner} git update-index --assume-unchanged ext-network.yml # Create ENV file if needed - if ! [[ -f "${ENV_FILE}" ]]; then - ${__as_owner} cp default.env "${ENV_FILE}" + if ! [[ -f "${__env_file}" ]]; then + ${__as_owner} cp default.env "${__env_file}" __minty_fresh=1 else __minty_fresh=0 @@ -3128,15 +3128,15 @@ config() { __during_config=1 - check_legacy - query_network - query_deployment + __check_legacy + __query_network + __query_deployment case "${__deployment}" in "node") - query_consensus_client + __query_consensus_client ;; "validator" | "rocket") - query_validator_client + __query_validator_client ;; "ssv") if [ "${NETWORK}" = "holesky" ]; then @@ -3158,17 +3158,17 @@ config() { fi if [ ! -f "./ssv-config/encrypted_private_key.json" ]; then echo "Creating encrypted operator private key" - dodocker run --name ssv-node-key-generation -v "$(pwd)/ssv-config/password.pass":/password.pass \ + __dodocker run --name ssv-node-key-generation -v "$(pwd)/ssv-config/password.pass":/password.pass \ -it bloxstaking/ssv-node:latest /go/bin/ssvnode generate-operator-keys \ - --password-file=/password.pass && dodocker cp ssv-node-key-generation:/encrypted_private_key.json \ - ./ssv-config/encrypted_private_key.json && dodocker rm ssv-node-key-generation + --password-file=/password.pass && __dodocker cp ssv-node-key-generation:/encrypted_private_key.json \ + ./ssv-config/encrypted_private_key.json && __dodocker rm ssv-node-key-generation ${__auto_sudo} chown 12000:12000 ./ssv-config/encrypted_private_key.json fi - query_dkg - query_consensus_only_client + __query_dkg + __query_consensus_only_client ;; "rpc") - query_consensus_only_client + __query_consensus_only_client ;; *) echo "Unknown deployment ${__deployment}, this is a bug." @@ -3182,25 +3182,25 @@ config() { if [[ ! "${CONSENSUS_CLIENT}" =~ "-vc-only.yml" ]]; then CL_NODE="http://consensus:5052" - query_execution_client - query_rapid_sync - query_mev - query_grafana - query_coinbase + __query_execution_client + __query_rapid_sync + __query_mev + __query_grafana + __query_coinbase if [ "${__deployment}" = "node" ]; then - query_graffiti + __query_graffiti fi else unset EXECUTION_CLIENT unset GRAFANA_CLIENT - query_remote_beacon + __query_remote_beacon # This gets used, but shellcheck doesn't recognize that # shellcheck disable=SC2034 CL_NODE="${REMOTE_BEACON}" - query_mev - query_coinbase - query_graffiti + __query_mev + __query_coinbase + __query_graffiti fi __during_config=0 @@ -3234,59 +3234,59 @@ config() { echo "Your COMPOSE_FILE is:" "${COMPOSE_FILE}" - var=FEE_RECIPIENT - set_value_in_env - var=GRAFFITI - set_value_in_env - var=CL_NODE - set_value_in_env - var=RAPID_SYNC_URL - set_value_in_env - var=COMPOSE_FILE - set_value_in_env - var=EL_NODE - set_value_in_env - var=JWT_SECRET - set_value_in_env - var=NETWORK - set_value_in_env - var=MEV_BOOST - set_value_in_env - var=MEV_RELAYS - set_value_in_env + __var=FEE_RECIPIENT + __set_value_in_env + __var=GRAFFITI + __set_value_in_env + __var=CL_NODE + __set_value_in_env + __var=RAPID_SYNC_URL + __set_value_in_env + __var=COMPOSE_FILE + __set_value_in_env + __var=EL_NODE + __set_value_in_env + __var=JWT_SECRET + __set_value_in_env + __var=NETWORK + __set_value_in_env + __var=MEV_BOOST + __set_value_in_env + __var=MEV_RELAYS + __set_value_in_env if [[ "${NETWORK}" = "gnosis" ]] && [[ "${CONSENSUS_CLIENT}" =~ "nimbus" ]] ; then # We are using the variable # shellcheck disable=SC2034 NIM_DOCKERFILE=Dockerfile.sourcegnosis - var=NIM_DOCKERFILE - set_value_in_env + __var=NIM_DOCKERFILE + __set_value_in_env fi if uname -m | grep -q riscv64; then # We are using the variable # shellcheck disable=SC2034 NIM_DOCKERFILE=Dockerfile.source - var=NIM_DOCKERFILE - set_value_in_env + __var=NIM_DOCKERFILE + __set_value_in_env # We are using the variable # shellcheck disable=SC2034 GETH_DOCKERFILE=Dockerfile.source - var=GETH_DOCKERFILE - set_value_in_env + __var=GETH_DOCKERFILE + __set_value_in_env fi - var="SIREN_PASSWORD" - SIREN_PASSWORD=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="SIREN_PASSWORD" + SIREN_PASSWORD=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) if [ -z "${SIREN_PASSWORD}" ]; then SIREN_PASSWORD=$(head -c 8 /dev/urandom | od -A n -t u8 | tr -d '[:space:]' | sha256sum | head -c 32) - set_value_in_env + __set_value_in_env fi ${__as_owner} rm .env.original - pull_and_build - nag_os_version + __pull_and_build + __nag_os_version echo - echo "Your configuration file is: $(dirname "$(realpath "${BASH_SOURCE[0]}")")/${ENV_FILE}" + echo "Your configuration file is: $(dirname "$(realpath "${BASH_SOURCE[0]}")")/${__env_file}" echo "You can change advanced config items with \"nano .env\" when in the $(dirname "$(realpath "${BASH_SOURCE[0]}")") directory." echo } @@ -3295,108 +3295,108 @@ config() { version() { grep "^This is" README.md echo - var="COMPOSE_FILE" - value=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __var="COMPOSE_FILE" + __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Client versions - case "${value}" in + case "${__value}" in *ssv.yml* ) - docompose exec ssv-node /go/bin/ssvnode --version + __docompose exec ssv-node /go/bin/ssvnode --version echo ;;& *lighthouse.yml* | *lighthouse-cl-only* ) - docompose exec consensus lighthouse --version + __docompose exec consensus lighthouse --version echo ;;& *lighthouse-vc-only* ) - docompose exec validator lighthouse --version + __docompose exec validator lighthouse --version echo ;;& *lodestar.yml* | *lodestar-cl-only* ) - docompose exec consensus node /usr/app/node_modules/.bin/lodestar --version + __docompose exec consensus node /usr/app/node_modules/.bin/lodestar --version echo ;;& *lodestar-vc-only* ) - docompose exec validator node /usr/app/node_modules/.bin/lodestar --version + __docompose exec validator node /usr/app/node_modules/.bin/lodestar --version echo ;;& *prysm.yml* ) - docompose exec consensus beacon-chain --version + __docompose exec consensus beacon-chain --version echo - docompose exec validator validator --version + __docompose exec validator validator --version echo ;;& *prysm-cl-only* ) - docompose exec consensus beacon-chain --version + __docompose exec consensus beacon-chain --version echo ;;& *prysm-vc-only* ) - docompose exec validator validator --version + __docompose exec validator validator --version echo ;;& *nimbus.yml* | *nimbus-allin1.yml* | *nimbus-cl-only* ) - docompose exec consensus nimbus_beacon_node --version + __docompose exec consensus nimbus_beacon_node --version echo ;;& *nimbus-vc-only* ) - docompose exec validator nimbus_validator_client --version + __docompose exec validator nimbus_validator_client --version echo ;;& *teku.yml* | *teku-allin1.yml* | *teku-cl-only* ) - docompose exec consensus /opt/teku/bin/teku --version + __docompose exec consensus /opt/teku/bin/teku --version echo ;;& *teku-vc-only* ) - docompose exec validator /opt/teku/bin/teku --version + __docompose exec validator /opt/teku/bin/teku --version echo ;;& *grandine.yml* | *grandine-allin1.yml* | *grandine-cl-only* ) - docompose exec consensus grandine --version + __docompose exec consensus grandine --version echo ;;& *grandine-vc-only* ) - docompose exec validator grandine --version + __docompose exec validator grandine --version echo ;;& *geth.yml* ) - docompose exec execution geth version + __docompose exec execution geth version echo ;;& *reth.yml* ) - docompose exec execution reth --version + __docompose exec execution reth --version echo ;;& *besu.yml* ) - docompose exec execution /opt/besu/bin/besu --version + __docompose exec execution /opt/besu/bin/besu --version echo ;;& *nethermind.yml* ) - docompose exec execution /nethermind/nethermind --version + __docompose exec execution /nethermind/nethermind --version echo ;;& *erigon.yml* ) - docompose exec execution erigon --version + __docompose exec execution erigon --version echo ;;& *web3signer.yml* ) - docompose exec web3signer /opt/web3signer/bin/web3signer --version + __docompose exec web3signer /opt/web3signer/bin/web3signer --version echo - docompose exec postgres pg_config --version + __docompose exec postgres pg_config --version echo ;;& *mev-boost.yml* ) - docompose exec mev-boost /app/mev-boost -version + __docompose exec mev-boost /app/mev-boost -version echo ;;& *grafana.yml* ) - docompose exec prometheus /bin/prometheus --version + __docompose exec prometheus /bin/prometheus --version echo echo -n "Grafana " - docompose exec grafana /run.sh -v + __docompose exec grafana /run.sh -v echo ;;& *traefik-*.yml* ) echo "Traefik" - docompose exec traefik traefik version + __docompose exec traefik traefik version echo ;;& esac @@ -3432,7 +3432,7 @@ __full_help() { echo " Run without ACTION to get help text" echo " update [--refresh-targets] [--non-interactive]" echo " updates all client versions and ${__project_name} itself" - echo " --refresh-targets will reset your custom build targets in ${ENV_FILE} to defaults" + echo " --refresh-targets will reset your custom build targets in ${__env_file} to defaults" echo " up|start [service-name]" echo " starts the Ethereum node, or restarts containers that had their image or" echo " configuration changed. Can also start a specific service by name" @@ -3489,7 +3489,7 @@ help() { } # Main body from here -ENV_FILE=.env +__env_file=.env __during_config=0 __during_update=0 __during_postgres=0 @@ -3501,7 +3501,7 @@ if [ ! -f ~/.profile ] || ! grep -q "alias ethd" ~/.profile; then __me="./$__me" fi -trap 'handle_error $? $LINENO' ERR +trap '__handle_error $? $LINENO' ERR if [[ "$#" -eq 0 || "$*" =~ "-h" ]]; then # Lazy match for -h and --help but also --histogram, so careful here help "$@" @@ -3523,11 +3523,11 @@ __command="$1" shift __params=$* -handle_root -determine_distro -prep_conffiles +__handle_root +__determine_distro +__prep_conffiles -check_for_snap +__check_for_snap # Don't check for Docker before it's installed if [ "$__command" = "install" ]; then @@ -3535,8 +3535,8 @@ if [ "$__command" = "install" ]; then exit "$?" fi -handle_docker_sudo -check_compose_version +__handle_docker_sudo +__check_compose_version if [ "${__old_compose}" -eq 1 ]; then echo @@ -3564,12 +3564,12 @@ if ! type -P whiptail >/dev/null 2>&1; then exit 0 fi -if ! dodocker images >/dev/null 2>&1; then +if ! __dodocker images >/dev/null 2>&1; then echo "Please ensure you can call $__docker_exe before running ${__project_name}." exit 0 fi -if ! docompose --help >/dev/null 2>&1; then +if ! __docompose --help >/dev/null 2>&1; then echo "Please ensure you can call $__compose_exe before running ${__project_name}." exit 0 fi @@ -3584,7 +3584,7 @@ case "$__command" in ;; esac -check_disk_space +__check_disk_space if [ "${__compose_upgraded}" -eq 1 ]; then echo From 67ca15058f1198945b76e8cb2ad002b1688107c6 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sun, 1 Sep 2024 07:00:57 -0400 Subject: [PATCH 18/48] Remove Prysm deprecated --enable-debug-rpc-endpoints (#1920) --- prysm-cl-only.yml | 1 - prysm.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/prysm-cl-only.yml b/prysm-cl-only.yml index 1d12996a..0fc75795 100644 --- a/prysm-cl-only.yml +++ b/prysm-cl-only.yml @@ -87,7 +87,6 @@ services: - "8008" - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --enable-debug-rpc-endpoints labels: - metrics.scrape=true - metrics.path=/metrics diff --git a/prysm.yml b/prysm.yml index 314a55b5..c704c95c 100644 --- a/prysm.yml +++ b/prysm.yml @@ -89,7 +89,6 @@ services: - "8008" - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --enable-debug-rpc-endpoints labels: - metrics.scrape=true - metrics.path=/metrics From c626eced47eda53601c560a15d40de934b8463f0 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sun, 1 Sep 2024 12:29:44 -0400 Subject: [PATCH 19/48] Do not overide Nethermind's Pruning.CacheMb (#1918) --- nethermind/docker-entrypoint.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/nethermind/docker-entrypoint.sh b/nethermind/docker-entrypoint.sh index cea716ba..04d3eef9 100755 --- a/nethermind/docker-entrypoint.sh +++ b/nethermind/docker-entrypoint.sh @@ -75,8 +75,7 @@ else fi fi if [ "${__memtotal}" -ge 30 ]; then -# Investigating instability - __prune="${__prune} --Pruning.CacheMb=4096 --Pruning.FullPruningMemoryBudgetMb=16384 --Init.StateDbKeyScheme=HalfPath" + __prune="${__prune} --Pruning.FullPruningMemoryBudgetMb=16384 --Init.StateDbKeyScheme=HalfPath" fi echo "Using pruning parameters:" echo "${__prune}" From 41ddcd35c7d89dcb52b73d12821edf469e985e4e Mon Sep 17 00:00:00 2001 From: Drew <65291057+Hydepwns@users.noreply.github.com> Date: Sun, 1 Sep 2024 18:50:45 +0200 Subject: [PATCH 20/48] add keys count (#1913) * add keys count * Update vc-utils/keymanager.sh * Update vc-utils/keymanager.sh * Update vc-utils/keymanager.sh * Update vc-utils/keymanager.sh * Update vc-utils/keymanager.sh * Update vc-utils/keymanager.sh * Update vc-utils/keymanager.sh --------- Co-authored-by: yorickdowne <71337066+yorickdowne@users.noreply.github.com> --- ethd | 4 +++- vc-utils/keymanager.sh | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+), 1 deletion(-) diff --git a/ethd b/ethd index 94435286..cf9ffb5c 100755 --- a/ethd +++ b/ethd @@ -2139,6 +2139,8 @@ __keys_usage() { echo "Call keymanager with an ACTION, one of:" echo " list" echo " Lists the public keys of all validators currently loaded into your validator client" + echo " count" + echo " Counts the keys currently loaded into your validator client" echo " import" echo " Import all keystore*.json in .eth/validator_keys while loading slashing protection data" echo " in slashing_protection*.json files that match the public key(s) of the imported validator(s)" @@ -3428,7 +3430,7 @@ __full_help() { echo " config" echo " configures ${__project_name} with your choice of Ethereum clients" echo " keys ACTION [--non-interactive]" - echo " list, delete, import keys; their fee recipients; and gas fees" + echo " list, count, delete, import keys; their fee recipients; and gas fees" echo " Run without ACTION to get help text" echo " update [--refresh-targets] [--non-interactive]" echo " updates all client versions and ${__project_name} itself" diff --git a/vc-utils/keymanager.sh b/vc-utils/keymanager.sh index bc903a75..503e3e80 100755 --- a/vc-utils/keymanager.sh +++ b/vc-utils/keymanager.sh @@ -485,6 +485,40 @@ validator-list() { fi } +validator-count() { + __api_path=eth/v1/keystores + if [ "${WEB3SIGNER}" = "true" ]; then + __token=NIL + __vc_api_container=${__api_container} + __api_container=web3signer + __vc_api_port=${__api_port} + __api_port=9000 + __vc_api_tls=${__api_tls} + __api_tls=false + else + get-token + fi + __validator-list-call + key_count=$(echo "$__result" | jq -r '.data | length') + echo "Validator keys loaded into ${__service}: $key_count" + + if [ "${WEB3SIGNER}" = "true" ]; then + get-token + __api_path=eth/v1/remotekeys + __api_container=${__vc_api_container} + __service=${__vc_service} + __api_port=${__vc_api_port} + __api_tls=${__vc_api_tls} + __validator-list-call + remote_key_count=$(echo "$__result" | jq -r '.data | length') + echo "Remote Validator keys registered with ${__service}: $remote_key_count" + if [ "${key_count}" -ne "${remote_key_count}" ]; then + echo "WARNING: The number of keys loaded into Web3signer and registered with the validator client differ." + echo "Please run \"./ethd keys register\"" + fi + fi +} + validator-delete() { if [ -z "${__pubkey}" ]; then echo "Please specify a validator public key to delete, or \"all\"" @@ -1056,6 +1090,8 @@ usage() { echo "Call keymanager with an ACTION, one of:" echo " list" echo " Lists the public keys of all validators currently loaded into your validator client" + echo " count" + echo " Counts the number of keys currently loaded into your validator client" echo " import" echo " Import all keystore*.json in .eth/validator_keys while loading slashing protection data" echo " in slashing_protection*.json files that match the public key(s) of the imported validator(s)" @@ -1206,6 +1242,9 @@ case "$3" in register) validator-register ;; + count) + validator-count + ;; get-recipient) __pubkey=$4 recipient-get From a0c13b8cf1143bd9ab9c8060a7c72da631b91e9c Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 2 Sep 2024 07:52:00 -0400 Subject: [PATCH 21/48] Prysm resolve w3s warnings (#1919) --- prysm-vc-only.yml | 3 --- prysm.yml | 3 --- prysm/docker-entrypoint-vc.sh | 2 +- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/prysm-vc-only.yml b/prysm-vc-only.yml index e17fbcec..f0e2e486 100644 --- a/prysm-vc-only.yml +++ b/prysm-vc-only.yml @@ -60,7 +60,6 @@ services: - 0.0.0.0 - --monitoring-port - "8009" - - --web - --grpc-gateway-host - 0.0.0.0 - --grpc-gateway-port @@ -70,8 +69,6 @@ services: - consensus:5052 - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --wallet-password-file - - /var/lib/prysm/password.txt labels: - traefik.enable=true - traefik.http.routers.prysm.entrypoints=web,websecure diff --git a/prysm.yml b/prysm.yml index c704c95c..c40bcf20 100644 --- a/prysm.yml +++ b/prysm.yml @@ -138,7 +138,6 @@ services: - 0.0.0.0 - --monitoring-port - "8009" - - --web - --grpc-gateway-host - 0.0.0.0 - --grpc-gateway-port @@ -148,8 +147,6 @@ services: - consensus:5052 - --suggested-fee-recipient - ${FEE_RECIPIENT} - - --wallet-password-file - - /var/lib/prysm/password.txt depends_on: - consensus labels: diff --git a/prysm/docker-entrypoint-vc.sh b/prysm/docker-entrypoint-vc.sh index d3525e98..2153f4f0 100755 --- a/prysm/docker-entrypoint-vc.sh +++ b/prysm/docker-entrypoint-vc.sh @@ -55,7 +55,7 @@ if [ "${WEB3SIGNER}" = "true" ]; then touch /var/lib/prysm/w3s-keys.txt fi else - __w3s_url="" + __w3s_url="--web --wallet-password-file /var/lib/prysm/password.txt" fi if [ "${DEFAULT_GRAFFITI}" = "true" ]; then From fb6c0838eeb3b5f989b5d8e83c0c050b16eb7457 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 14 Sep 2024 07:23:11 +0100 Subject: [PATCH 22/48] Offer Prysm on ARM64 (#1922) --- ethd | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ethd b/ethd index cf9ffb5c..779bab26 100755 --- a/ethd +++ b/ethd @@ -2622,7 +2622,8 @@ __query_validator_client() { "lighthouse-vc-only.yml" "Lighthouse validator client" \ "teku-vc-only.yml" "Teku validator client" \ "lodestar-vc-only.yml" "Lodestar validator client" \ - "nimbus-vc-only.yml" "Nimbus validator client" 3>&1 1>&2 2>&3) + "nimbus-vc-only.yml" "Nimbus validator client" \ + "prysm-vc-only.yml" "Prysm validator client" 3>&1 1>&2 2>&3) else CONSENSUS_CLIENT=$(whiptail --notags --title "Select validator client" --menu \ "Which validator client do you want to run?" 12 65 5 \ @@ -2653,6 +2654,7 @@ __query_consensus_client() { "lodestar.yml" "Lodestar (Javascript) - consensus and validator client" \ "teku.yml" "Teku (Java) - consensus and validator client" \ "lighthouse.yml" "Lighthouse (Rust) - consensus and validator client" \ + "prysm.yml" "Prysm (Go) - consensus and validator client" \ 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ @@ -2689,7 +2691,9 @@ __query_consensus_only_client() { "grandine-cl-only.yml" "Grandine (Rust) - consensus client" \ "lodestar-cl-only.yml" "Lodestar (Javascript) - consensus client" \ "lighthouse-cl-only.yml" "Lighthouse (Rust) - consensus client" \ - "teku-cl-only.yml" "Teku (Java) - consensus client" 3>&1 1>&2 2>&3) + "teku-cl-only.yml" "Teku (Java) - consensus client" \ + "prysm-cl-only.yml" "Prysm (Go) - consensus client" \ + 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then CONSENSUS_CLIENT=$(whiptail --notags --title "Select consensus client" --menu \ "Which consensus client do you want to run?" 11 65 4 \ From cc7e62139c40d104e81e76c988359d4772cfaefb Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 14 Sep 2024 07:29:55 +0100 Subject: [PATCH 23/48] Reth include SSV RocketPool StakeWise contract in full (#1924) --- ethd | 7 ++++ reth/docker-entrypoint.sh | 68 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 70 insertions(+), 5 deletions(-) diff --git a/ethd b/ethd index 779bab26..5f21ad9e 100755 --- a/ethd +++ b/ethd @@ -743,6 +743,13 @@ __delete_reth() { return 0 fi +# Has db been initialized? + __db_exists=$(dodocker run --rm -v "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ + alpine:3 sh -c 'if [ -f "/var/lib/reth/db/database.version" ]; then echo true; else echo false; fi') + if [ "$__db_exists" = "false" ]; then + return 0 + fi + # Check Reth db version __db_version="$(__dodocker run --rm -v "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ alpine:3 cat /var/lib/reth/db/database.version)" diff --git a/reth/docker-entrypoint.sh b/reth/docker-entrypoint.sh index a1cea933..45d09f96 100755 --- a/reth/docker-entrypoint.sh +++ b/reth/docker-entrypoint.sh @@ -74,17 +74,75 @@ case ${LOG_LEVEL} in ;; esac +__static="" +if [ -n "${STATIC_DIR}" ] && [ ! "${STATIC_DIR}" = ".nada" ]; then + echo "Using separate static files directory at ${STATIC_DIR}." + __static="--datadir.static-files /var/lib/static" +fi + if [ "${ARCHIVE_NODE}" = "true" ]; then echo "Reth archive node without pruning" __prune="" else __prune="--full" -fi + if [ ! -f "/var/lib/reth/reth.toml" ]; then # Configure ssv, rocketpool, stakewise contracts +# Word splitting is desired for the command line parameters +# shellcheck disable=SC2086 + reth init ${__network} --datadir /var/lib/reth ${__static} + cat <> /var/lib/reth/reth.toml -__static="" -if [ -n "${STATIC_DIR}" ] && [ ! "${STATIC_DIR}" = ".nada" ]; then - echo "Using separate static files directory at ${STATIC_DIR}." - __static="--datadir.static-files /var/lib/static" +[prune] +block_interval = 5 + +[prune.segments] +sender_recovery = "full" + +[prune.segments.receipts] +before = 0 + +[prune.segments.account_history] +distance = 10064 + +[prune.segments.storage_history] +distance = 10064 +EOF + case "${NETWORK}" in + mainnet) + echo "Configuring Reth pruning to include RocketPool, SSV and StakeWise contracts" + cat <> /var/lib/reth/reth.toml + +[prune.segments.receipts_log_filter.0x00000000219ab540356cBB839Cbe05303d7705Fa] +before = 0 + +[prune.segments.receipts_log_filter.0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1] +before = 0 + +[prune.segments.receipts_log_filter.0xEE4d2A71cF479e0D3d0c3c2C923dbfEB57E73111] +before = 0 + +[prune.segments.receipts_log_filter.0x6B5815467da09DaA7DC83Db21c9239d98Bb487b5] +before = 0 +EOF + ;; + holesky) + echo "Configuring Reth pruning to include RocketPool, SSV and StakeWise contracts" + cat <> /var/lib/reth/reth.toml + +[prune.segments.receipts_log_filter.0x4242424242424242424242424242424242424242] +before = 0 + +[prune.segments.receipts_log_filter.0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA] +before = 0 + +[prune.segments.receipts_log_filter.0x9D210F9169bc6Cf49152F21A57A446bCcaA87b33] +before = 0 + +[prune.segments.receipts_log_filter.0xB580799Bf7d62721D1a523f0FDF2f5Ed7BA4e259] +before = 0 +EOF + ;; + esac + fi fi if [ -f /var/lib/reth/prune-marker ]; then From a58fe0d2da1b29db1d8db87fe08c6908231e99c9 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 14 Sep 2024 07:38:40 +0100 Subject: [PATCH 24/48] New Erigon repo and tag (#1923) --- default.env | 4 ++-- erigon.yml | 6 +++--- erigon/Dockerfile.binary | 4 ++-- ethd | 12 ++++++++---- 4 files changed, 15 insertions(+), 11 deletions(-) diff --git a/default.env b/default.env index db0fe546..b89f1e75 100644 --- a/default.env +++ b/default.env @@ -257,8 +257,8 @@ BESU_DOCKERFILE=Dockerfile.binary # SRC build target can be a tag, a branch, or a pr as "pr-ID" ERIGON_SRC_BUILD_TARGET='$(git describe --tags $(git rev-list --tags --max-count=1))' ERIGON_SRC_REPO=https://github.com/ledgerwatch/erigon -ERIGON_DOCKER_TAG=v2.60.6 -ERIGON_DOCKER_REPO=thorax/erigon +ERIGON_DOCKER_TAG=latest +ERIGON_DOCKER_REPO=erigontech/erigon ERIGON_DOCKERFILE=Dockerfile.binary # Nethermind diff --git a/erigon.yml b/erigon.yml index 4fe00f8c..4e3ae7ce 100644 --- a/erigon.yml +++ b/erigon.yml @@ -15,8 +15,8 @@ services: args: - BUILD_TARGET=${ERIGON_SRC_BUILD_TARGET:-'$(git describe --tags $(git rev-list --tags --max-count=1))'} - SRC_REPO=${ERIGON_SRC_REPO:-https://github.com/ledgerwatch/erigon} - - DOCKER_TAG=${ERIGON_DOCKER_TAG:-stable} - - DOCKER_REPO=${ERIGON_DOCKER_REPO:-thorax/erigon} + - DOCKER_TAG=${ERIGON_DOCKER_TAG:-latest} + - DOCKER_REPO=${ERIGON_DOCKER_REPO:-erigontech/erigon} stop_grace_period: 5m image: erigon:local pull_policy: never @@ -27,7 +27,7 @@ services: - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - DOCKER_TAG=${ERIGON_DOCKER_TAG:-stable} + - DOCKER_TAG=${ERIGON_DOCKER_TAG:-latest} - COMPOSE_FILE=${COMPOSE_FILE} - CL_P2P_PORT=${CL_P2P_PORT:-9000} - CL_REST_PORT=${CL_REST_PORT:-5052} diff --git a/erigon/Dockerfile.binary b/erigon/Dockerfile.binary index 13d7706f..26da6dff 100644 --- a/erigon/Dockerfile.binary +++ b/erigon/Dockerfile.binary @@ -1,5 +1,5 @@ -ARG DOCKER_TAG=stable -ARG DOCKER_REPO=thorax/erigon +ARG DOCKER_TAG=latest +ARG DOCKER_REPO=erigontech/erigon FROM ${DOCKER_REPO}:${DOCKER_TAG} diff --git a/ethd b/ethd index 5f21ad9e..6c0206fa 100755 --- a/ethd +++ b/ethd @@ -793,8 +793,7 @@ __delete_erigon() { __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 -# if [[ ! ("${__value}" =~ "v3" || "${__value}" = "latest" || "${__value}" = "stable") ]]; then # No stable yet - if [[ ! ("${__value}" =~ "v3" || "${__value}" = "latest") ]]; then + if [[ ! ("${__value}" =~ "v3" || "${__value}" = "latest" || "${__value}" = "main-latest") ]]; then return 0 fi @@ -1116,8 +1115,11 @@ __env_migrate() { if [[ "${__var}" = "LH_DOCKER_TAG" && "${__value}" = "latest-modern" ]]; then # LH 5.2 ditched latest-modern __value="latest" fi - if [[ "${__var}" = "ERIGON_DOCKER_TAG" && "${__value}" = "stable" ]]; then # Erigon ditched stable - __value="v2.60.6" + if [[ "${__var}" = "ERIGON_DOCKER_TAG" && "${__value}" = "stable" ]]; then # Erigon switched to latest + __value="latest" + fi + if [[ "${__var}" = "ERIGON_DOCKER_REPO" && "${__value}" = "thorax/erigon" ]]; then # Erigon new repo + __value="erigontech/erigon" fi sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*$~\1${__value}~" "${__env_file}" fi @@ -2756,6 +2758,7 @@ __query_execution_client() { EXECUTION_CLIENT=$(whiptail --notags --title "Select execution client" --menu \ "Which execution client do you want to run?" 9 65 2 \ "nethermind.yml" "Nethermind (.NET)" \ + "erigon.yml" "Erigon (Go)" \ "NONE" "Custom - Distributed" 3>&1 1>&2 2>&3) else EXECUTION_CLIENT=$(whiptail --notags --title "Select execution client" --menu \ @@ -2769,6 +2772,7 @@ __query_execution_client() { "Which execution client do you want to run?" 11 65 4 \ "besu.yml" "Besu (Java)" \ "nethermind.yml" "Nethermind (.NET)" \ + "erigon.yml" "Erigon (Go)" \ "geth.yml" "Geth (Go)" \ "NONE" "Custom - Distributed" 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then From b24fb1db933a3e09a6aff746e3a839474e43631d Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 14 Sep 2024 07:48:28 +0100 Subject: [PATCH 25/48] Adjust Erigon entrypoint to new tags (#1925) --- erigon/docker-entrypoint.sh | 65 +++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 32 deletions(-) diff --git a/erigon/docker-entrypoint.sh b/erigon/docker-entrypoint.sh index 35cc44ff..c560bcc8 100755 --- a/erigon/docker-entrypoint.sh +++ b/erigon/docker-entrypoint.sh @@ -56,8 +56,39 @@ fi __caplin="" __db_params="" -#if [[ "${DOCKER_TAG}" =~ "v3" || "${DOCKER_TAG}" = "latest" || "${DOCKER_TAG}" = "stable" ]]; then # No stable yet -if [[ "${DOCKER_TAG}" =~ "v3" || "${DOCKER_TAG}" = "latest" ]]; then +# Literal match intended +# shellcheck disable=SC2076 +if [[ "${DOCKER_TAG}" =~ "2." || "${DOCKER_TAG}" = "latest" ]]; then +# Check for network, and set prune accordingly + if [ "${ARCHIVE_NODE}" = "true" ]; then + echo "Erigon archive node without pruning" + __prune="" + else + if [[ "${NETWORK}" = "mainnet" ]]; then + echo "mainnet: Running with prune.r.before=11052984 for eth deposit contract" + __prune="--prune=htc --prune.r.before=11052984" + elif [[ "${NETWORK}" = "goerli" ]]; then + echo "goerli: Running with prune.r.before=4367322 for eth deposit contract" + __prune="--prune=htc --prune.r.before=4367322" + elif [[ "${NETWORK}" = "sepolia" ]]; then + echo "sepolia: Running with prune.r.before=1273020 for eth deposit contract" + __prune="--prune=htc --prune.r.before=1273020" + elif [[ "${NETWORK}" = "gnosis" ]]; then + echo "gnosis: Running with prune.r.before=19469077 for gno deposit contract" + __prune="--prune=htc --prune.r.before=19469077" + elif [[ "${NETWORK}" = "holesky" ]]; then + echo "holesky: Running without prune.r for eth deposit contract" + __prune="--prune=htc" + elif [[ "${NETWORK}" =~ ^https?:// ]]; then + echo "Custom testnet: Running without prune.r for eth deposit contract" + __prune="--prune=htc" + else + echo "Unable to determine eth deposit contract, running without prune.r" + __prune="--prune=htc" + fi + fi + __db_params="--db.pagesize 16K --db.size.limit 8TB" +else # Erigon v3 if [ "${ARCHIVE_NODE}" = "true" ]; then echo "Erigon archive node without pruning" __prune="--prune.mode=archive" @@ -90,36 +121,6 @@ if [[ "${DOCKER_TAG}" =~ "v3" || "${DOCKER_TAG}" = "latest" ]]; then fi echo "Caplin parameters: ${__caplin}" fi -else -# Check for network, and set prune accordingly - if [ "${ARCHIVE_NODE}" = "true" ]; then - echo "Erigon archive node without pruning" - __prune="" - else - if [[ "${NETWORK}" = "mainnet" ]]; then - echo "mainnet: Running with prune.r.before=11052984 for eth deposit contract" - __prune="--prune=htc --prune.r.before=11052984" - elif [[ "${NETWORK}" = "goerli" ]]; then - echo "goerli: Running with prune.r.before=4367322 for eth deposit contract" - __prune="--prune=htc --prune.r.before=4367322" - elif [[ "${NETWORK}" = "sepolia" ]]; then - echo "sepolia: Running with prune.r.before=1273020 for eth deposit contract" - __prune="--prune=htc --prune.r.before=1273020" - elif [[ "${NETWORK}" = "gnosis" ]]; then - echo "gnosis: Running with prune.r.before=19469077 for gno deposit contract" - __prune="--prune=htc --prune.r.before=19469077" - elif [[ "${NETWORK}" = "holesky" ]]; then - echo "holesky: Running without prune.r for eth deposit contract" - __prune="--prune=htc" - elif [[ "${NETWORK}" =~ ^https?:// ]]; then - echo "Custom testnet: Running without prune.r for eth deposit contract" - __prune="--prune=htc" - else - echo "Unable to determine eth deposit contract, running without prune.r" - __prune="--prune=htc" - fi - fi - __db_params="--db.pagesize 16K --db.size.limit 8TB" fi if [ "${IPV6}" = "true" ]; then From 848ea5a10ad9fde4020ea6ba7d0b727f71bb0d40 Mon Sep 17 00:00:00 2001 From: Jake Tagnepis Date: Tue, 17 Sep 2024 14:45:42 +0900 Subject: [PATCH 26/48] Fix wrong method name (#1927) (#1928) --- ethd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ethd b/ethd index 6c0206fa..5ad89365 100755 --- a/ethd +++ b/ethd @@ -744,7 +744,7 @@ __delete_reth() { fi # Has db been initialized? - __db_exists=$(dodocker run --rm -v "$(dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ + __db_exists=$(__dodocker run --rm -v "$(__dodocker volume ls -q -f "name=$(basename "$(realpath .)")[_-]reth-el-data")":"/var/lib/reth" \ alpine:3 sh -c 'if [ -f "/var/lib/reth/db/database.version" ]; then echo true; else echo false; fi') if [ "$__db_exists" = "false" ]; then return 0 From 83feff33eb80d64af1d965d8cddb809b89eaab8e Mon Sep 17 00:00:00 2001 From: Vladimir Gorkavenko <32727352+vgorkavenko@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:22:20 +0200 Subject: [PATCH 27/48] Lido Community Staking integration (#1754) * Lido integration (#4) * feat: lido relays (#5) * fix: change mech of relay selection for Lido (#6) * fix: Obol integration * fix: linter * feat: add obol dashboards * feat: add obol loki dashboards * fix: obol metrics * fix: use `.eth` instead of `.charon` * feat: backup ENR public key to the file * fix: `OWNER` for macOS * fix: exitstatus check for charon-run-dkg * feat: add ssv-dkg for Lido integration * fix: linter * fix: sed * feat: backup .eth when exists * fix: linter * fix: charon dir * fix: lido_ssv flow * fix: permissions * fix: jq for obol * feat: add `--distributed` for prysm and lodestar * fix: lint * fix: obol flow * fix: linter * fix: Relays list window size * fix: obol flow * fix: backup folder with keys * fix: linter * fix: prysm rest api * fix: add mkdir for obol * fix: add `--publish` flag to dkg * feat: add LIDO_DV_EXIT_EXIT_EPOCH * fix: update image for lido-dv-exit * fix: lido-dv-exit and exit epoch * fix: ssv-dkg config path * fix: ssv dashboard * fix: lido ssv flow * fix: add curl-jq * chore: remove goerli * fix: compose files * fix: permissions * feat: holesky relays, user flow adjustment * feat: update VE_ORACLE_ADDRESSES_ALLOWLIST for holesky * fix: extra chown for .eth * fix: add `-u` for enr and dkg services * fix: linter * Fix: Argument list too long error when verifying Oblol cluster definition * feat: automatic import N keystore files with different passwords * Fix: "Argument list too long" when verifying existing cluster definition file * Fix: pre-commit errors "SC2046 (warning): Quote this to prevent word splitting" * chore: fix typo * chore: update `VE_ORACLE_ADDRESSES_ALLOWLIST` for holesky * feat: `distributed` arg for lh, nim, teku VC clients * fix: `OBOL_NODE_TAG` -> `CHARON_VERSION` * fix: charon version * feat: csm testnet (#9) * fix: link to csm widget * Support Nethermind 1.27 (#1848) (cherry picked from commit d951ecab8be0550d2fe2c3d10bca8416c43ddb6c) * fix: after sync * Fix link to the repo in the README.md * fix: lighthouse dashboards * fix: export vars for deposit-cli * fix: save vars for deposit-cli * fix: linter * fix: sed for macOS in Obol config * feat: changes for upstream (#19) * fix: remove env vars * fix: remove unused operator-sample * feat: reuse deposit-cli * fix: remove unused file from gitignore * fix: lidofinance -> eth-educators * fix: add `--uid` flag for keys creation * fix: message * fix: MEV_BOOST should be enabled for Lido configs * fix: reuse attention message * fix: add `OBOL_CL_NODE` and `OBOL_EL_NODE` env vars * fix: misleading msg * fix: add env vars to `ALL_VARS` * chore: bump `ENV_VERSION` * feat: follow to refactoring * fix: docompose -> __docompose --------- Co-authored-by: cnupy Co-authored-by: cnupy <38653063+cnupy@users.noreply.github.com> Co-authored-by: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Co-authored-by: Dmitry Gusakov --- .gitignore | 3 + default.env | 20 +- ethd | 428 ++++++++++++++++++++++- grafana/provision.sh | 15 + lido-obol.yml | 104 ++++++ lighthouse-vc-only.yml | 1 + lighthouse.yml | 1 + lighthouse/docker-entrypoint-vc.sh | 11 +- lodestar-vc-only.yml | 1 + lodestar.yml | 1 + lodestar/docker-entrypoint-vc.sh | 11 +- nimbus-allin1.yml | 1 + nimbus-vc-only.yml | 1 + nimbus.yml | 1 + nimbus/docker-entrypoint-vc.sh | 11 +- prometheus/docker-entrypoint.sh | 4 + prometheus/obol-prom.yml | 8 + prometheus/rootless/lido-obol-prom.yml | 11 + prysm-vc-only.yml | 8 +- prysm.yml | 8 +- prysm/docker-entrypoint-vc.sh | 11 +- staking-deposit-cli/docker-entrypoint.sh | 15 + teku-allin1.yml | 1 + teku-vc-only.yml | 1 + teku.yml | 1 + teku/docker-entrypoint-vc.sh | 11 +- vc-utils/keymanager.sh | 8 + 27 files changed, 670 insertions(+), 27 deletions(-) create mode 100644 lido-obol.yml create mode 100644 prometheus/obol-prom.yml create mode 100644 prometheus/rootless/lido-obol-prom.yml diff --git a/.gitignore b/.gitignore index c418707a..3bfced4c 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,9 @@ ext-network.yml ext-network.yml.original ext-network.yml.bak .eth/* +.eth_backup* +.charon/* +validator-ejector/* !.eth/README.md !.eth/validator_keys/.empty !.eth/exit_messages/.empty diff --git a/default.env b/default.env index b89f1e75..2635c9e7 100644 --- a/default.env +++ b/default.env @@ -79,6 +79,9 @@ HOST_IP= # IP address to use when host-mapping a port through *-shared.yml. Set this to 127.0.0.1 to restrict the share to localhost SHARE_IP= +# Relays to connect charon node +OBOL_P2P_RELAYS= + # P2P ports you will forward to your staking node. Adjust here if you are # going to use something other than defaults. EL_P2P_PORT=30303 @@ -115,6 +118,8 @@ SSV_P2P_PORT=13001 SSV_P2P_PORT_UDP=12001 # SSV DKG port SSV_DKG_PORT=3030 +# OBOL Node ports +OBOL_P2P_PORT=3610 # Engine port. Only for distributed setups, this should otherwise be left alone EE_PORT=8551 # Consensus layer REST port. Only for distributed setups, this should otherwise be left alone @@ -160,6 +165,10 @@ EL_NODE=http://execution:8551 CL_NODE=http://consensus:5052 # MEV-boost address. This would only be changed for Vouch setups MEV_NODE=http://mev-boost:18550 +# Consensus client address for Charon and Lido Validator Ejector in Obol setup +OBOL_CL_NODE=http://consensus:5052 +# Execution client address (RPC) for Lido Validator Ejector in Obol setup +OBOL_EL_NODE=http://execution:8545 # You can set specific version targets and choose binary or compiled from source builds below, # via "Dockerfile.binary" or "Dockerfile.source" @@ -174,6 +183,15 @@ ETH_DOCKER_TAG= SSV_NODE_TAG=latest SSV_DKG_TAG=latest +# Lido OBOL +CHARON_VERSION=latest +VE_OPERATOR_ID= +VE_STAKING_MODULE_ID= +VE_LOCATOR_ADDRESS= +VE_ORACLE_ADDRESSES_ALLOWLIST= +ENABLE_DIST_ATTESTATION_AGGR= +LIDO_DV_EXIT_EXIT_EPOCH= + # MEV-Boost # SRC build target can be a tag, a branch, or a pr as "pr-ID" MEV_SRC_BUILD_TARGET=stable @@ -308,4 +326,4 @@ DDNS_TAG=v2 NODE_EXPORTER_IGNORE_MOUNT_REGEX='^/(dev|proc|sys|run|var/lib/docker/.+)($|/)' # Used by ethd update - please do not adjust -ENV_VERSION=13 +ENV_VERSION=14 diff --git a/ethd b/ethd index 5ad89365..497513a0 100755 --- a/ethd +++ b/ethd @@ -995,15 +995,15 @@ __env_migrate() { return 0 fi - __all_vars=( COMPOSE_FILE FEE_RECIPIENT EL_NODE GRAFFITI DEFAULT_GRAFFITI NETWORK MEV_BOOST MEV_RELAYS MEV_MIN_BID \ + __all_vars=( COMPOSE_FILE FEE_RECIPIENT EL_NODE EL_OBOL_NODE GRAFFITI DEFAULT_GRAFFITI NETWORK MEV_BOOST MEV_RELAYS MEV_MIN_BID \ MEV_NODE CL_MAX_PEER_COUNT CL_MIN_PEER_COUNT EL_MAX_PEER_COUNT EL_MIN_PEER_COUNT DOMAIN ACME_EMAIL ANCIENT_DIR \ AUTOPRUNE_NM LOGS_LABEL CF_DNS_API_TOKEN CF_ZONE_API_TOKEN CF_ZONE_ID AWS_PROFILE AWS_HOSTED_ZONE_ID \ GRAFANA_HOST SIREN_HOST DISTRIBUTED BESU_HEAP TEKU_HEAP PROM_HOST HOST_IP SHARE_IP PRYSM_HOST EE_HOST \ EL_HOST EL_LB EL_WS_HOST EL_WS_LB CL_HOST CL_LB VC_HOST DDNS_SUBDOMAIN IPV6 DDNS_PROXY RAPID_SYNC_URL \ - CL_NODE BEACON_STATS_API BEACON_STATS_MACHINE EL_P2P_PORT CL_P2P_PORT WEB3SIGNER PRYSM_PORT DOPPELGANGER \ + CL_NODE CL_OBOL_NODE BEACON_STATS_API BEACON_STATS_MACHINE EL_P2P_PORT CL_P2P_PORT WEB3SIGNER PRYSM_PORT DOPPELGANGER \ PRYSM_UDP_PORT CL_QUIC_PORT GRAFANA_PORT SIREN_PORT PROMETHEUS_PORT KEY_API_PORT TRAEFIK_WEB_PORT \ TRAEFIK_WEB_HTTP_PORT CL_REST_PORT EL_RPC_PORT EL_WS_PORT EE_PORT ERIGON_TORRENT_PORT LOG_LEVEL JWT_SECRET \ - EL_EXTRAS CL_EXTRAS VC_EXTRAS ARCHIVE_NODE SSV_P2P_PORT SSV_P2P_PORT_UDP ERIGON_P2P_PORT_2 \ + EL_EXTRAS CL_EXTRAS VC_EXTRAS ARCHIVE_NODE SSV_P2P_PORT SSV_P2P_PORT_UDP OBOL_P2P_PORT ERIGON_P2P_PORT_2 \ ERIGON_P2P_PORT_3 LODESTAR_HEAP SSV_DKG_PORT SIREN_PASSWORD ) __target_vars=( ETH_DOCKER_TAG NIM_SRC_BUILD_TARGET NIM_SRC_REPO NIM_DOCKER_TAG NIM_DOCKER_VC_TAG NIM_DOCKER_REPO \ NIM_DOCKER_VC_REPO NIM_DOCKERFILE TEKU_SRC_BUILD_TARGET TEKU_SRC_REPO TEKU_DOCKER_TAG TEKU_DOCKER_REPO \ @@ -1015,7 +1015,7 @@ __env_migrate() { NIMEL_DOCKER_REPO NIMEL_DOCKERFILE LS_SRC_BUILD_TARGET LS_SRC_REPO LS_DOCKER_TAG LS_DOCKER_REPO LS_DOCKERFILE \ GETH_SRC_BUILD_TARGET GETH_SRC_REPO GETH_DOCKER_TAG GETH_DOCKER_REPO TRAEFIK_TAG DDNS_TAG \ GETH_DOCKERFILE NM_SRC_BUILD_TARGET NM_SRC_REPO NM_DOCKER_TAG NM_DOCKER_REPO NM_DOCKERFILE \ - BESU_SRC_BUILD_TARGET BESU_SRC_REPO BESU_DOCKER_TAG BESU_DOCKER_REPO BESU_DOCKERFILE SSV_NODE_TAG \ + BESU_SRC_BUILD_TARGET BESU_SRC_REPO BESU_DOCKER_TAG BESU_DOCKER_REPO BESU_DOCKERFILE SSV_NODE_TAG CHARON_VERSION \ DEPCLI_SRC_BUILD_TARGET DEPCLI_SRC_REPO DEPCLI_DOCKER_TAG W3S_DOCKER_TAG W3S_DOCKER_REPO \ PG_DOCKER_TAG RETH_SRC_BUILD_TARGET RETH_SRC_REPO RETH_DOCKER_TAG RETH_DOCKER_REPO RETH_DOCKERFILE \ GRANDINE_SRC_BUILD_TARGET GRANDINE_SRC_REPO GRANDINE_DOCKER_TAG GRANDINE_DOCKER_REPO GRANDINE_DOCKERFILE \ @@ -2146,10 +2146,12 @@ __i_haz_keys_service() { __keys_usage() { echo "Call keymanager with an ACTION, one of:" + echo " create-for-csm" + echo " Create keys for Lido CSM" echo " list" - echo " Lists the public keys of all validators currently loaded into your validator client" + echo " Lists the public keys of all validators currently loaded into your validator client" echo " count" - echo " Counts the keys currently loaded into your validator client" + echo " Counts the keys currently loaded into your validator client" echo " import" echo " Import all keystore*.json in .eth/validator_keys while loading slashing protection data" echo " in slashing_protection*.json files that match the public key(s) of the imported validator(s)" @@ -2241,6 +2243,10 @@ keys() { __docompose rm --force validator up fi + elif [ "${1:-}" = "create-for-csm" ]; then + var="NETWORK" + NETWORK=$(sed -n -e "s/^${var}=\(.*\)/\1/p" "${ENV_FILE}" || true) + __query_lido_keys_generation elif [ "${1:-}" = "prepare-address-change" ]; then __i_haz_ethdo echo "Generating offline prep file" @@ -2574,9 +2580,10 @@ __query_deployment() { "validator" "Validator client only" 3>&1 1>&2 2>&3) elif uname -m | grep -q aarch64 || uname -m | grep -q arm64; then __deployment=$(whiptail --notags --title "Select deployment type" --menu \ - "What kind of deployment do you want to run?" 10 65 3 \ + "What kind of deployment do you want to run?" 10 65 4 \ "node" "Ethereum node - consensus, execution and validator client" \ "rpc" "Ethereum RPC node - consensus and execution client" \ + "lido_comp" "Lido-compatible node (Community Staking / Simple DVT)" \ "rocket" "Validator client only - integrate with RocketPool" 3>&1 1>&2 2>&3) elif uname -m | grep -q riscv64; then __deployment=$(whiptail --notags --title "Select deployment type" --menu \ @@ -2586,9 +2593,10 @@ __query_deployment() { "rocket" "Validator client only - integrate with RocketPool" 3>&1 1>&2 2>&3) elif uname -m | grep -q x86_64; then __deployment=$(whiptail --notags --title "Select deployment type" --menu \ - "What kind of deployment do you want to run?" 11 65 4 \ + "What kind of deployment do you want to run?" 11 65 5 \ "node" "Ethereum node - consensus, execution and validator client" \ "rpc" "Ethereum RPC node - consensus and execution client" \ + "lido_comp" "Lido-compatible node (Community Staking / Simple DVT)" \ "rocket" "Validator client only - integrate with RocketPool" \ "ssv" "SSV node - consensus, execution and ssv-node" 3>&1 1>&2 2>&3) else @@ -2597,6 +2605,15 @@ __query_deployment() { uname -m exit 1 fi + + if [ "${__deployment}" = "lido_comp" ]; then + __deployment=$(whiptail --notags --title "Select deployment type for Lido" --menu \ + "What kind of deployment to participate in Lido protocol do you want to run?" 13 90 3 \ + "lido_csm" "[Community Staking] CSM node - Consensus, execution and validator client" \ + "lido_ssv" "[Simple DVT] SSV node - Consensus, execution and ssv-node" \ + "lido_obol" "[Simple DVT] Obol node - Nodes, validator client and charon node (obol middleware)" 3>&1 1>&2 2>&3) + fi + echo "Your deployment choice is: ${__deployment}" } @@ -2907,7 +2924,19 @@ __query_coinbase() { while true; do set +e # Can't rely on the error handler here because of the special-casing below for update() - if [ "${__during_update}" -eq 1 ] || [ ! "${__deployment}" = rpc ]; then + if [[ "${__deployment}" =~ "lido_" ]]; then + case "${NETWORK}" in + "mainnet") + FEE_RECIPIENT="0x388c818ca8b9251b393131c08a736a67ccb19297" + ;; + "holesky") + FEE_RECIPIENT="0xE73a3602b99f1f913e72F8bdcBC235e206794Ac8" + ;; + *) + FEE_RECIPIENT="0x0000000000000000000000000000000000000000" + ;; + esac + elif [ "${__during_update}" -eq 1 ] || [ ! "${__deployment}" = rpc ]; then FEE_RECIPIENT=$(whiptail --title "Configure rewards address" --inputbox "What is the address you want \ transaction rewards to be sent to by default? (right-click to paste, CANNOT be an ENS)" 10 65 "${FEE_RECIPIENT}" \ 3>&1 1>&2 2>&3) @@ -2983,6 +3012,89 @@ want to use MEV Boost?" 10 65); then fi return 0 fi + if [[ "${__deployment}" =~ "lido_" ]]; then + MEV_BOOST="true" + while true; do + MEV_RELAYS="" + __selected="" + declare -A relays=() + declare -A optional_relays=() + case "${NETWORK}" in + "mainnet") + relays=( + ['Agnostic']="https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net" + ['bloXroute']="https://0xb0b07cd0abef743db4260b0ed50619cf6ad4d82064cb4fbec9d3ec530f7c5e6793d9f286c4e082c0244ffb9f2658fe88@bloxroute.regulated.blxrbdn.com" + ['Aestus']="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live" + ['bloXroute Max-Profit']="https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com" + ['Flashbots']="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net" + ['Eden Network']="https://0xb3ee7afcf27f1f1259ac1787876318c6584ee353097a50ed84f51a1f21a323b3736f271a895c7ce918c038e4265918be@relay.edennetwork.io" + ['Ultra Sound']="https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money" + ) + optional_relays=( + ['Manifold Finance']="https://0x98650451ba02064f7b000f5768cf0cf4d4e492317d82871bdc87ef841a0743f69f0f1eea11168503240ac35d101c9135@mainnet-relay.securerpc.com/" + ) + __selected=$(whiptail --title "Relays list" --checklist \ + "Choose relays" 15 50 9 \ + "Agnostic" "" ON \ + "bloXroute" "" ON \ + "Aestus" "" ON \ + "bloXroute Max-Profit" "" ON \ + "Flashbots" "" ON \ + "Eden Network" "" ON \ + "Manifold Finance" "(optional)" ON \ + "Ultra Sound" "" ON 3>&1 1>&2 2>&3) + ;; + "holesky") + relays=( + ['Aestus']="https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live" + ['Titan']="https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz" + ['Flashbots Boost']="https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net" + ['Ultrasound']="https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money" + ) + __selected=$(whiptail --title "Relays list" --checklist \ + "Choose relays" 12 30 5 \ + "Aestus" "" ON \ + "Titan" "" ON \ + "Flashbots Boost" "" ON \ + "Ultrasound" "" ON 3>&1 1>&2 2>&3) + ;; + *) + echo "No MEV RELAYS configured for ${NETWORK}" + return + ;; + esac + for i in "${!relays[@]}"; do + if [[ ${__selected} =~ ${i} ]]; then + if [ -z "${MEV_RELAYS}" ]; then + MEV_RELAYS="${relays[$i]}" + else + MEV_RELAYS="${MEV_RELAYS},${relays[$i]}" + fi + fi + done + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + if [ -z "${MEV_RELAYS}" ]; then + whiptail --msgbox "At least one mandatory relay should be chosen" 10 75 + continue + fi + else + echo "You chose Cancel." + exit 1 + fi + for i in "${!optional_relays[@]}"; do + if [[ ${__selected} =~ ${i} ]]; then + if [ -z "${MEV_RELAYS}" ]; then + MEV_RELAYS="${optional_relays[$i]}" + else + MEV_RELAYS="${MEV_RELAYS},${optional_relays[$i]}" + fi + fi + done + break + done + return 0 + fi if (whiptail --title "MEV Boost" --yesno "Do you want to use MEV Boost?" 10 65) then MEV_BOOST="true" if [ "${__value}" = "true" ]; then @@ -3017,7 +3129,6 @@ https://0x8c7d33605ecef85403f8b7289c8058f440cbb6bf72b055dfe2f3e2c6695b6a1ea5a9cd fi MEV_RELAYS=$(whiptail --title "Configure MEV relays" --inputbox "What MEV relay(s) do you want to use? \ (right-click to paste)" 10 65 "${MEV_RELAYS}" 3>&1 1>&2 2>&3) - echo "Your MEV relay(s): ${MEV_RELAYS}" else MEV_BOOST="false" @@ -3025,6 +3136,149 @@ https://0x8c7d33605ecef85403f8b7289c8058f440cbb6bf72b055dfe2f3e2c6695b6a1ea5a9cd fi } +__lido_withdrawal_credentials_address() { + __lido_address="" + case "${NETWORK}" in + "mainnet") + __lido_address="0xB9D7934878B5FB9610B3fE8A5e441e8fad7E293f" + ;; + "holesky") + __lido_address="0xF0179dEC45a37423EAD4FaD5fCb136197872EAd9" + ;; + *) + __lido_address="0x0000000000000000000000000000000000000000" + ;; + esac + echo "${__lido_address}" +} + +__lido_keys_attention_message() { + whiptail --title "Attention" --msgbox "Please, make sure that you set 32 ETH when generated deposit data\nAnd right execution address for your validator keys: $(__lido_withdrawal_credentials_address)\nOtherwise, your keys will not be valid!" 10 80 +} + +__query_lido_keys_generation() { + if [ "${NETWORK}" = "mainnet" ]; then + if (whiptail --title "Security warning" --yesno "Key generation is not recommended on MAINNET for security reasons.\n\nIt is recommended to Select 'No' to skip the step and generate keys in a more secure way later (ex. on an airgapped live USB)\n\nOtherwise, Select 'Yes' to proceed with key generation on this machine" 13 85) then + echo "Proceeding with key generation on MAINNET." + else + __lido_keys_attention_message + return 0 + fi + fi + + __num_validators="1" + __keystore_password="" + __keystore_password_confirm="" + __num_validators=$(whiptail --title "Validators count" --inputbox "Enter the number of validators" 8 60 "${__num_validators}" 3>&1 1>&2 2>&3) + while true; do + __keystore_password=$(whiptail --title "Keystore password" --passwordbox "Enter validators keystore password (at least 8 chars)" 8 60 "${__keystore_password}" 3>&1 1>&2 2>&3) + + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + if [[ ${#__keystore_password} -ge 8 ]]; then + __keystore_password_confirm=$(whiptail --title "Keystore password" --passwordbox "Confirm validators keystore password" 8 60 "${__keystore_password_confirm}" 3>&1 1>&2 2>&3) + if [ "${__keystore_password}" = "${__keystore_password_confirm}" ]; then + echo "Keystore password set." + break + else + whiptail --msgbox "Passwords do not match. Please try again." 10 60 + fi + else + whiptail --msgbox "The keystore password secret needs to be at least 8 characters long. You can try \ +again or Cancel on the next screen." 10 75 + fi + else + echo "You chose Cancel." + exit 1 + fi + done + + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + echo "Your number of validators is:" "${__num_validators}" + __mnemonic="existing" + if (whiptail --title "Mnemonic" --yesno "Do you want to generate new mnemonic?" 8 60) then + __mnemonic="new" + fi + export NETWORK=${NETWORK} && __docompose --profile tools run --rm deposit-cli-${__mnemonic} \ + --uid "$(id -u)" \ + --execution_address "$(__lido_withdrawal_credentials_address)" \ + --num_validators "${__num_validators}" \ + --keystore_password "${__keystore_password}" \ + --non_interactive + else + echo "You chose Cancel." + exit 1 + fi +} + + +__query_lido_obol_enr() { + ${__as_owner} mkdir -p ./.eth + __outcome__=$(__docompose -f ./lido-obol.yml run -u "$(id -u)":"$(id -g)" --rm charon-create-enr) + if [[ "${__outcome__}" =~ "Created ENR private key:" ]]; then + __lido_obol_operator_enr=$(echo "${__outcome__}" | grep -e 'enr:') + else + echo "Something went wrong. Please, try again." + exit 1 + fi + + echo "Your created ENR is:" "${__lido_obol_operator_enr}" + echo "${__lido_obol_operator_enr}" >> "./.eth/charon-enr-public-key" + whiptail --title "Lido Obol operator ENR creation outcome" --msgbox "Your ENR is created!\n\n1. Backup your private key (path: .eth/charon-enr-private-key)!\n2. Copy your public ENR for the futher steps\n\nYour public ENR is:\n\n${__lido_obol_operator_enr}" 16 80 +} + +__query_lido_obol_cluster_definition() { + __cluster_definition_url=$(whiptail --title "Lido Obol cluster creation" --inputbox "\nPut your cluster definition link below:" 10 80 "https://api.obol.tech/dv/example_link_to_your_definition" 3>&1 1>&2 2>&3) + if [ "${__cluster_definition_url}" = "" ]; then + echo "Cluster definition URL can't be empty" + exit 1 + fi + exitstatus=$? + if [ $exitstatus -eq 0 ]; then + ${__as_owner} curl -o ./.eth/cluster_definition.tmp -s "${__cluster_definition_url}" -H "Accept: application/json" +# shellcheck disable=SC2086 + __cluster_definition_is_valid=$(__docompose -f ./lido-obol.yml run --rm -v "$(pwd)"/.eth/cluster_definition.tmp:/cluster_definition.json:ro curl-jq sh -c \ + "cat /cluster_definition.json | jq -r 'all(.validators[]; .fee_recipient_address == \"'${FEE_RECIPIENT}'\" and .withdrawal_address == \"'$(__lido_withdrawal_credentials_address)'\")'" | tail -n 1) + set -e + if [ "${__cluster_definition_is_valid}" = "true" ]; then + echo "Your cluster definition url is:" "${__cluster_definition_url}" + ${__as_owner} mv ./.eth/cluster_definition.tmp ./.eth/cluster-definition.json + else + whiptail --title "Lido Obol cluster creation" --msgbox "Your cluster definition is not valid.\n\nCheck that every validator has \`fee_recipient_address\` and \`withdrawal_address\` equal to Lido contracts and try again.\n\nLido fee recipient: '${FEE_RECIPIENT}'\nLido withdrawal credentials: '$(__lido_withdrawal_credentials_address)'" 14 90 + echo "Your cluster definition is NOT valid." + ${__as_owner} rm ./.eth/cluster_definition.tmp + exit 1 + fi + else + echo "You chose Cancel." + exit 1 + fi +} + +__query_lido_obol_cluster_dkg() { + if [ -d ./.eth/validator_keys ]; then + __folder_postfix=${EPOCHSECONDS} + ${__as_owner} mkdir ./.eth_backup_"$__folder_postfix" + ${__as_owner} cp -vr ./.eth/validator_keys ./.eth_backup_"$__folder_postfix"/validator_keys + ${__as_owner} rm -rf ./.eth/validator_keys + fi + if (whiptail --title "DKG ceremony" --yesno "Do you want to start DKG ceremony?\n\nMake sure all participants are ready!" 10 60) then + __outcome__=$(__docompose -f ./lido-obol.yml run -u "$(id -u)":"$(id -g)" --rm charon-run-dkg) + exitstatus=$? + if [ $exitstatus -ne 0 ]; then + echo "Something went wrong. Please, try again." + exit 1 + fi + echo "DKG ceremony finished successfully" + whiptail --title "Finish" --msgbox "\nThe DKG is finished!" 10 40 + else + whiptail --title "DKG ceremony" --msgbox "You should start DKG ceremony before proceeding further" 8 60 + echo "DKG ceremony starting is canceled" + exit 1 + fi +} + __query_dkg() { __ssv_operator_id=-1 if (whiptail --title "DKG ceremony" --yesno "Do you want to participate in DKG ceremonies as an operator?" 10 60); then @@ -3149,13 +3403,16 @@ config() { __query_network __query_deployment case "${__deployment}" in - "node") + "node" | "lido_csm") + __query_consensus_client + ;; + "lido_obol") __query_consensus_client ;; "validator" | "rocket") __query_validator_client ;; - "ssv") + "ssv" | "lido_ssv") if [ "${NETWORK}" = "holesky" ]; then sed -i'.original' 's/ Network: .*/ Network: holesky/' ssv-config/config.yaml elif [ "${NETWORK}" = "mainnet" ]; then @@ -3204,9 +3461,22 @@ config() { __query_mev __query_grafana __query_coinbase - if [ "${__deployment}" = "node" ]; then + if [[ "${__deployment}" = "node" || "${__deployment}" = "lido_csm" ]]; then __query_graffiti fi + if [ "${__deployment}" = "lido_csm" ]; then + if (whiptail --title "Keys generation" --yesno "Do you want to generate validator keys?" 10 60) then + __query_lido_keys_generation + else + __lido_keys_attention_message + fi + if [ "${NETWORK}" = "holesky" ]; then + __link="https://csm.testnet.fi" + else + __link="https://csm.lido.fi" + fi + whiptail --title "Finish" --msgbox "Final steps!\n\n1. Run your node './ethd start'\n\n2. Wait until your node is fully synchronized\n\n4. Open ${__link} to submit your keys with '.eth/validator_keys/deposit-data-*.json' file content\n\n5. Wait for keys validation\n\n6. Import your keys by './ethd keys import'" 19 85 + fi else unset EXECUTION_CLIENT unset GRAFANA_CLIENT @@ -3222,11 +3492,114 @@ config() { __during_config=0 + if [ "${__deployment}" = "lido_obol" ]; then + CL_NODE="http://charon:3600" + case "${NETWORK}" in + "mainnet") +# We are using the variable +# shellcheck disable=SC2034 + VE_LOCATOR_ADDRESS="0xC1d0b3DE6792Bf6b4b37EccdcC24e45978Cfd2Eb" +# We are using the variable +# shellcheck disable=SC2034 + VE_ORACLE_ADDRESSES_ALLOWLIST='["0x140Bd8FbDc884f48dA7cb1c09bE8A2fAdfea776E","0xA7410857ABbf75043d61ea54e07D57A6EB6EF186","0x404335BcE530400a5814375E7Ec1FB55fAff3eA2","0x946D3b081ed19173dC83Cd974fC69e1e760B7d78","0x007DE4a5F7bc37E2F26c0cb2E8A95006EE9B89b5","0xEC4BfbAF681eb505B94E4a7849877DC6c600Ca3A","0x61c91ECd902EB56e314bB2D5c5C07785444Ea1c8","0x1Ca0fEC59b86F549e1F1184d97cb47794C8Af58d","0xc79F702202E3A6B0B6310B537E786B9ACAA19BAf"]' +# We are using the variable +# shellcheck disable=SC2034 + VE_STAKING_MODULE_ID="2" +# We are using the variable +# shellcheck disable=SC2034 + LIDO_DV_EXIT_EXIT_EPOCH="194048" # capella + ;; + "holesky") +# We are using the variable +# shellcheck disable=SC2034 + VE_LOCATOR_ADDRESS="0x28FAB2059C713A7F9D8c86Db49f9bb0e96Af1ef8" +# We are using the variable +# shellcheck disable=SC2034 + VE_ORACLE_ADDRESSES_ALLOWLIST='["0x12A1D74F8697b9f4F1eEBb0a9d0FB6a751366399","0xD892c09b556b547c80B7d8c8cB8d75bf541B2284","0xf7aE520e99ed3C41180B5E12681d31Aa7302E4e5","0x31fa51343297FFce0CC1E67a50B2D3428057D1b1","0x81E411f1BFDa43493D7994F82fb61A415F6b8Fd4","0x4c75FA734a39f3a21C57e583c1c29942F021C6B7","0xD3b1e36A372Ca250eefF61f90E833Ca070559970","0xF0F23944EfC5A63c53632C571E7377b85d5E6B6f","0xb29dD2f6672C0DFF2d2f173087739A42877A5172","0x3799bDA7B884D33F79CEC926af21160dc47fbe05"]' +# We are using the variable +# shellcheck disable=SC2034 + VE_STAKING_MODULE_ID="2" +# We are using the variable +# shellcheck disable=SC2034 + LIDO_DV_EXIT_EXIT_EPOCH="256" # capella + ;; + *) + ;; + esac + + if [ -f "./.eth/cluster-lock.json" ]; then + if (whiptail --title "Lido Obol cluster exists" --yesno "Your cluster has already been created. Continue with it?" 10 60); then +# shellcheck disable=SC2086 + __cluster_lock_is_valid=$(__docompose -f ./lido-obol.yml run --rm -v "$(pwd)"/.eth/cluster-lock.json:/cluster-lock.json:ro curl-jq sh -c \ + "cat /cluster-lock.json | jq -r 'all(.cluster_definition.validators[]; .fee_recipient_address == \"'${FEE_RECIPIENT}'\" and .withdrawal_address == \"'$(__lido_withdrawal_credentials_address)'\")'" | tail -n 1) + if [[ "${__cluster_lock_is_valid}" =~ "true" ]]; then + echo "Your cluster lock is valid." + else + whiptail --title "Lido Obol cluster definition" --msgbox "Your cluster lock file './.eth/cluster-lock.json' is not valid.\n\nCheck that every validator has \`fee_recipient_address\` and \`withdrawal_address\` equal to Lido contracts and try again.\n\nLido fee recipient: '${FEE_RECIPIENT}'\nLido withdrawal credentials: '$(__lido_withdrawal_credentials_address)'" 14 90 + echo "Your cluster lock is NOT valid." + exit 1 + fi + elif (whiptail --title "Lido Obol cluster creation" --yesno "Backup a previously created cluster to create a new one?" 10 80); then + ${__as_owner} cp -vr ./.eth ./.eth_backup_"$EPOCHSECONDS" + ${__as_owner} rm -rf ./.eth + __query_lido_obol_enr + __query_lido_obol_cluster_definition + __query_lido_obol_cluster_dkg + else + whiptail --title "Lido Obol cluster creation" --msgbox "The \`.eth\` folder must be empty or non-existent to continue" 10 80 + echo "The \`.eth\` folder must be empty to create a new cluster" + exit 1 + fi + else + if [ -f "./.eth/charon-enr-private-key" ] && [ -f "./.eth/charon-enr-public-key" ]; then + if (whiptail --title "Lido Obol operator ENR creation" --yesno "You already have ENR. Use it?" 8 50); then + echo "Use existing ENR" + else + ${__as_owner} cp -vr ./.eth ./.eth_backup_"$EPOCHSECONDS" + ${__as_owner} rm -rf ./.eth + __query_lido_obol_enr + fi + else + __query_lido_obol_enr + fi + + if [ -f "./.eth/cluster-definition.json" ]; then + if (whiptail --title "Lido Obol cluster creation in process" --yesno "You already have cluster definition. Use it?" 10 60); then +# shellcheck disable=SC2086 + __cluster_definition_is_valid=$(__docompose -f ./lido-obol.yml run --rm -v "$(pwd)"/.eth/cluster-definition.json:/cluster-definition.json:ro curl-jq sh -c \ + "cat /cluster-definition.json | jq -r 'all(.validators[]; .fee_recipient_address == \"'${FEE_RECIPIENT}'\" and .withdrawal_address == \"'$(__lido_withdrawal_credentials_address)'\")'" | tail -n 1) + if [ "${__cluster_definition_is_valid}" = "true" ]; then + echo "Your cluster definition is valid." + else + whiptail --title "Lido Obol cluster creation" --msgbox "Your cluster definition is not valid.\n\nCheck that every validator has \`fee_recipient_address\` and \`withdrawal_address\` equal to Lido contracts and try again.\n\nLido fee recipient: '${FEE_RECIPIENT}'\nLido withdrawal credentials: '$(__lido_withdrawal_credentials_address)'" 14 90 + echo "Your cluster definition is NOT valid." + exit 1 + fi + else + __query_lido_obol_cluster_definition + fi + else + __query_lido_obol_cluster_definition + fi + __query_lido_obol_cluster_dkg + fi + +# We are using the variable +# shellcheck disable=SC2034 + VE_OPERATOR_ID=$(whiptail --title "Lido Operator ID" --inputbox "Put your Operator ID from Lido Operators dashboard \ +(right-click to paste)" 10 60 3>&1 1>&2 2>&3) + __obol_prom_remote_token=$(whiptail --title "Obol prometheus" --inputbox "Put Obol Prometheus remote write token \ +(right-click to paste)" 10 60 3>&1 1>&2 2>&3) + cat ./prometheus/obol-prom.yml > ./prometheus/custom-prom.yml + sed -i'.original' "s| credentials: OBOL_PROM_REMOTE_WRITE_TOKEN| credentials: ${__obol_prom_remote_token}|" ./prometheus/custom-prom.yml + rm -f ./prometheus/custom-prom.yml.original + fi + COMPOSE_FILE="${CONSENSUS_CLIENT}" if [ -n "${EXECUTION_CLIENT+x}" ]; then COMPOSE_FILE="${COMPOSE_FILE}:${EXECUTION_CLIENT}" fi - if [ "${__deployment}" = "ssv" ]; then + if [[ "${__deployment}" = "ssv" || "${__deployment}" = "lido_ssv" ]]; then COMPOSE_FILE="${COMPOSE_FILE}:ssv.yml" if [[ -n "${__ssv_operator_id}" && ! "${__ssv_operator_id}" = "-1" ]]; then COMPOSE_FILE="${COMPOSE_FILE}:ssv-dkg.yml" @@ -3238,10 +3611,16 @@ config() { if [ "${MEV_BOOST}" = "true" ] && [ ! "${__deployment}" = "rocket" ]; then COMPOSE_FILE="${COMPOSE_FILE}:mev-boost.yml" fi + if [ "${__deployment}" = "lido_obol" ]; then + COMPOSE_FILE="${COMPOSE_FILE}:lido-obol.yml" + fi if { [ "${__deployment}" = "node" ] || [ "${__deployment}" = "rocket" ]; } \ && [ "${NETWORK}" = "holesky" ]; then COMPOSE_FILE="${COMPOSE_FILE}:deposit-cli.yml" fi + if [ "${__deployment}" = "lido_csm" ]; then + COMPOSE_FILE="${COMPOSE_FILE}:deposit-cli.yml" + fi # Not multi-arch, this would break on ARM64 # COMPOSE_FILE="${COMPOSE_FILE}:ethdo.yml" if [ "${__deployment}" = "rocket" ]; then @@ -3271,6 +3650,23 @@ config() { __set_value_in_env __var=MEV_RELAYS __set_value_in_env + if [ "${__deployment}" = "lido_obol" ]; then + var=LIDO_DV_EXIT_EXIT_EPOCH + __set_value_in_env + var=VE_OPERATOR_ID + __set_value_in_env + var=VE_LOCATOR_ADDRESS + __set_value_in_env + var=VE_ORACLE_ADDRESSES_ALLOWLIST + __set_value_in_env + var=VE_STAKING_MODULE_ID + __set_value_in_env +# We are using the variable +# shellcheck disable=SC2034 + ENABLE_DIST_ATTESTATION_AGGR="true" + var=ENABLE_DIST_ATTESTATION_AGGR + __set_value_in_env + fi if [[ "${NETWORK}" = "gnosis" ]] && [[ "${CONSENSUS_CLIENT}" =~ "nimbus" ]] ; then # We are using the variable # shellcheck disable=SC2034 @@ -3316,6 +3712,10 @@ version() { __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # Client versions case "${__value}" in + *lido-obol.yml* ) + __docompose exec charon charon version + echo + ;;& *ssv.yml* ) __docompose exec ssv-node /go/bin/ssvnode --version echo diff --git a/grafana/provision.sh b/grafana/provision.sh index 7dc085bc..50f14d4b 100755 --- a/grafana/provision.sh +++ b/grafana/provision.sh @@ -122,6 +122,21 @@ case "$CLIENT" in wget -t 3 -T 10 -qcO - "${__url}" | jq '.title = "SSV Node Dashboard"' \ | jq 'walk(if . == "${DS_PROMETHEUS}" then "Prometheus" else . end)' >"${__file}" ;;& + *lido-obol.yml* ) + # Lido Obol Dashboard + __url_charon='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/dash_charon_overview.json' + __file_charon='/etc/grafana/provisioning/dashboards/charon.json' + wget -t 3 -T 10 -qcO - "${__url_charon}" | sed 's/"uid": "prometheus"/"uid": "PBFA97CFB590B2093"/g' >"${__file_charon}" + __url_single_node='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/single_node_dashboard.json' + __file_single_node='/etc/grafana/provisioning/dashboards/single_node_dashboard.json' + wget -t 3 -T 10 -qcO - "${__url_single_node}" | sed 's/"uid": "prometheus"/"uid": "PBFA97CFB590B2093"/g' >"${__file_single_node}" + __url_validator_ejector='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/validator_ejector_overview.json' + __file_validator_ejector='/etc/grafana/provisioning/dashboards/validator_ejector_overview.json' + wget -t 3 -T 10 -qcO - "${__url_validator_ejector}" | sed 's/"uid": "prometheus"/"uid": "PBFA97CFB590B2093"/g' >"${__file_validator_ejector}" + __url_logs='https://raw.githubusercontent.com/ObolNetwork/lido-charon-distributed-validator-node/main/grafana/dashboards/logs_dashboard.json' + __file_logs='/etc/grafana/provisioning/dashboards/logs_dashboard.json' + wget -t 3 -T 10 -qcO - "${__url_logs}" | sed 's/"uid": "loki"/"uid": "P8E80F9AEF21F6940"/g' >"${__file_logs}" + ;;& !(*grafana-rootless*) ) # cadvisor and node exporter dashboard __id=10619 diff --git a/lido-obol.yml b/lido-obol.yml new file mode 100644 index 00000000..978f5c0e --- /dev/null +++ b/lido-obol.yml @@ -0,0 +1,104 @@ +x-logging: &logging + logging: + driver: json-file + options: + max-size: 100m + max-file: "3" + tag: '{{.ImageName}}|{{.Name}}|{{.ImageFullID}}|{{.FullID}}' + +services: + charon: + restart: "unless-stopped" + image: obolnetwork/charon:${CHARON_VERSION:-latest} + volumes: + - .eth:/opt/charon/.charon + <<: *logging + environment: + - CHARON_BEACON_NODE_ENDPOINTS=${OBOL_CL_NODE:-http://consensus:5052} + - CHARON_LOG_LEVEL=${OBOL_LOG_LEVEL:-debug} + - CHARON_LOG_FORMAT=${OBOL_LOG_FORMAT:-console} + - CHARON_P2P_RELAYS=${OBOL_P2P_RELAYS:-https://0.relay.obol.tech,https://1.relay.obol.tech/} + - CHARON_P2P_EXTERNAL_HOSTNAME=${OBOL_P2P_EXTERNAL_HOSTNAME:-} # Empty default required to avoid warnings. + - CHARON_P2P_TCP_ADDRESS=0.0.0.0:${OBOL_P2P_PORT:-3610} + - CHARON_VALIDATOR_API_ADDRESS=0.0.0.0:3600 + - CHARON_MONITORING_ADDRESS=0.0.0.0:3620 + - CHARON_BUILDER_API=${BUILDER_API_ENABLED:-true} + - CHARON_FEATURE_SET_ENABLE=eager_double_linear,consensus_participate + - CHARON_LOKI_ADDRESSES=${CHARON_LOKI_ADDRESSES:-http://loki:3100/loki/api/v1/push} + - CHARON_LOKI_SERVICE=charon + ports: + - ${OBOL_P2P_PORT:-3610}:${OBOL_P2P_PORT:-3610}/tcp # P2P TCP libp2p + healthcheck: + test: wget -qO- http://localhost:3620/readyz + labels: + - metrics.scrape=true + - metrics.path=/metrics + - metrics.port=3620 + - metrics.instance=charon + + charon-create-enr: + profiles: ["tools"] + restart: "no" + image: obolnetwork/charon:${CHARON_VERSION:-latest} + command: create enr + volumes: + - .eth:/opt/charon/.charon + charon-run-dkg: + profiles: ["tools"] + restart: "no" + image: obolnetwork/charon:${CHARON_VERSION:-latest} + volumes: + - .eth:/opt/charon/.charon + command: dkg --publish + curl-jq: + image: curl-jq:local + pull_policy: build + build: + context: ./traefik + dockerfile: Dockerfile.jq + restart: "no" + profiles: [ "tools" ] + + validator-ejector: + platform: linux/amd64 + image: lidofinance/validator-ejector:${VALIDATOR_EJECTOR_VERSION:-1.5.0} + user: ":" + volumes: + - ./validator-ejector:/exitmessages + restart: unless-stopped + environment: + - EXECUTION_NODE=${OBOL_EL_NODE:-http://execution:8545} + - CONSENSUS_NODE=${OBOL_CL_NODE:-http://consensus:5052} + - LOCATOR_ADDRESS=${VE_LOCATOR_ADDRESS:-0x28FAB2059C713A7F9D8c86Db49f9bb0e96Af1ef8} + - STAKING_MODULE_ID=${VE_STAKING_MODULE_ID:-2} + - OPERATOR_ID=${VE_OPERATOR_ID} + - ORACLE_ADDRESSES_ALLOWLIST=${VE_ORACLE_ADDRESSES_ALLOWLIST:-["0x140Bd8FbDc884f48dA7cb1c09bE8A2fAdfea776E","0xA7410857ABbf75043d61ea54e07D57A6EB6EF186","0x404335BcE530400a5814375E7Ec1FB55fAff3eA2","0x946D3b081ed19173dC83Cd974fC69e1e760B7d78","0x007DE4a5F7bc37E2F26c0cb2E8A95006EE9B89b5","0xEC4BfbAF681eb505B94E4a7849877DC6c600Ca3A","0x61c91ECd902EB56e314bB2D5c5C07785444Ea1c8","0x1Ca0fEC59b86F549e1F1184d97cb47794C8Af58d","0xc79F702202E3A6B0B6310B537E786B9ACAA19BAf"]} + - MESSAGES_LOCATION=/exitmessages + - RUN_METRICS=true + - HTTP_PORT=8989 + - DISABLE_SECURITY_DONT_USE_IN_PRODUCTION=${VE_DISABLE_SECURITY:-false} + - FORCE_DENCUN_FORK_MODE=true + labels: + - metrics.scrape=true + - metrics.path=/metrics + - metrics.port=8989 + - metrics.instance=validator-ejector + + lido-dv-exit: + image: obolnetwork/lido-dv-exit:${LIDO_DV_EXIT_VERSION:-e8bee1f} + user: ":" + volumes: + - ./validator-ejector:/exitmessages + - .eth:/charon + environment: + - LIDODVEXIT_BEACON_NODE_URL=${OBOL_CL_NODE:-http://consensus:5052} + - LIDODVEXIT_CHARON_RUNTIME_DIR=/charon + - LIDODVEXIT_EJECTOR_EXIT_PATH=/exitmessages + - LIDODVEXIT_EXIT_EPOCH=${LIDO_DV_EXIT_EXIT_EPOCH:-194048} + - LIDODVEXIT_LOG_LEVEL=${LIDO_DV_EXIT_LOG_LEVEL:-info} + - LIDODVEXIT_VALIDATOR_QUERY_CHUNK_SIZE=${LIDO_DV_EXIT_VALIDATOR_QUERY_CHUNK_SIZE:-5} + restart: on-failure + +networks: + default: + enable_ipv6: ${IPV6:-false} diff --git a/lighthouse-vc-only.yml b/lighthouse-vc-only.yml index 172c0292..499f3327 100644 --- a/lighthouse-vc-only.yml +++ b/lighthouse-vc-only.yml @@ -32,6 +32,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lhvalidator-data:/var/lib/lighthouse - /etc/localtime:/etc/localtime:ro diff --git a/lighthouse.yml b/lighthouse.yml index 1eacac90..504224cb 100644 --- a/lighthouse.yml +++ b/lighthouse.yml @@ -109,6 +109,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lhvalidator-data:/var/lib/lighthouse - /etc/localtime:/etc/localtime:ro diff --git a/lighthouse/docker-entrypoint-vc.sh b/lighthouse/docker-entrypoint-vc.sh index e97ae5ca..6531e35e 100755 --- a/lighthouse/docker-entrypoint-vc.sh +++ b/lighthouse/docker-entrypoint-vc.sh @@ -52,12 +52,19 @@ else __doppel="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/lodestar-vc-only.yml b/lodestar-vc-only.yml index 53adb2f4..70d5e9a5 100644 --- a/lodestar-vc-only.yml +++ b/lodestar-vc-only.yml @@ -33,6 +33,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lsvalidator-data:/var/lib/lodestar/validators - /etc/localtime:/etc/localtime:ro diff --git a/lodestar.yml b/lodestar.yml index e24aa32f..91991958 100644 --- a/lodestar.yml +++ b/lodestar.yml @@ -104,6 +104,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} volumes: - lsvalidator-data:/var/lib/lodestar/validators - /etc/localtime:/etc/localtime:ro diff --git a/lodestar/docker-entrypoint-vc.sh b/lodestar/docker-entrypoint-vc.sh index 4814e71b..3ebf5e17 100755 --- a/lodestar/docker-entrypoint-vc.sh +++ b/lodestar/docker-entrypoint-vc.sh @@ -60,12 +60,19 @@ else __w3s_url="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${VC_EXTRAS} + exec "$@" ${__network} ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${VC_EXTRAS} + exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__mev_boost} ${__beacon_stats} ${__doppel} ${__w3s_url} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/nimbus-allin1.yml b/nimbus-allin1.yml index 65ce944d..0d670be4 100644 --- a/nimbus-allin1.yml +++ b/nimbus-allin1.yml @@ -47,6 +47,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - EMBEDDED_VC=true + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp diff --git a/nimbus-vc-only.yml b/nimbus-vc-only.yml index b5e12ca8..89440d5e 100644 --- a/nimbus-vc-only.yml +++ b/nimbus-vc-only.yml @@ -38,6 +38,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/nimbus.yml b/nimbus.yml index dabbf36f..951a9159 100644 --- a/nimbus.yml +++ b/nimbus.yml @@ -102,6 +102,7 @@ services: - GRAFFITI=${GRAFFITI:-} - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/nimbus/docker-entrypoint-vc.sh b/nimbus/docker-entrypoint-vc.sh index d9f6ae5a..5be960b4 100755 --- a/nimbus/docker-entrypoint-vc.sh +++ b/nimbus/docker-entrypoint-vc.sh @@ -50,12 +50,19 @@ else __w3s_url="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__w3s_url} ${__log_level} ${__doppel} ${__mev_boost} ${VC_EXTRAS} + exec "$@" ${__w3s_url} ${__log_level} ${__doppel} ${__mev_boost} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__w3s_url} "--graffiti=${GRAFFITI}" ${__log_level} ${__doppel} ${__mev_boost} ${VC_EXTRAS} + exec "$@" ${__w3s_url} "--graffiti=${GRAFFITI}" ${__log_level} ${__doppel} ${__mev_boost} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/prometheus/docker-entrypoint.sh b/prometheus/docker-entrypoint.sh index 6dee85a0..80529bbc 100755 --- a/prometheus/docker-entrypoint.sh +++ b/prometheus/docker-entrypoint.sh @@ -39,6 +39,10 @@ select_clients() { *ssv.yml* ) cp ./rootless/ssv-prom.yml ./rootless.d ;; esac + case "$CLIENT" in + *lido-obol.yml* ) cp ./rootless/lido-obol-prom.yml ./rootless.d ;; + esac + case "$CLIENT" in *traefik-* ) cp ./rootless/traefik-prom.yml ./rootless.d ;; esac diff --git a/prometheus/obol-prom.yml b/prometheus/obol-prom.yml new file mode 100644 index 00000000..a129ec02 --- /dev/null +++ b/prometheus/obol-prom.yml @@ -0,0 +1,8 @@ +remote_write: + - url: https://vm.monitoring.gcp.obol.tech/write + authorization: + credentials: OBOL_PROM_REMOTE_WRITE_TOKEN + write_relabel_configs: + - source_labels: [job] + regex: "charon" + action: keep # Keeps charon metrics and drop metrics from other containers. \ No newline at end of file diff --git a/prometheus/rootless/lido-obol-prom.yml b/prometheus/rootless/lido-obol-prom.yml new file mode 100644 index 00000000..6af549bb --- /dev/null +++ b/prometheus/rootless/lido-obol-prom.yml @@ -0,0 +1,11 @@ +scrape_configs: + - job_name: charon + metrics_path: /metrics + static_configs: + - targets: + - charon:3620 + - job_name: validator-ejector + metrics_path: /metrics + static_configs: + - targets: + - validator-ejector:8989 \ No newline at end of file diff --git a/prysm-vc-only.yml b/prysm-vc-only.yml index f0e2e486..8c0b4785 100644 --- a/prysm-vc-only.yml +++ b/prysm-vc-only.yml @@ -37,6 +37,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: @@ -69,6 +70,9 @@ services: - consensus:5052 - --suggested-fee-recipient - ${FEE_RECIPIENT} + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} labels: - traefik.enable=true - traefik.http.routers.prysm.entrypoints=web,websecure @@ -110,7 +114,9 @@ services: - validator - exit - --wallet-dir=/var/lib/prysm/ - - --beacon-rpc-provider=${CL_NODE:-http://consensus:4000} + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} - --wallet-password-file=/var/lib/prysm/password.txt - --${NETWORK} diff --git a/prysm.yml b/prysm.yml index c40bcf20..cde37e1c 100644 --- a/prysm.yml +++ b/prysm.yml @@ -115,6 +115,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: @@ -147,6 +148,9 @@ services: - consensus:5052 - --suggested-fee-recipient - ${FEE_RECIPIENT} + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} depends_on: - consensus labels: @@ -190,7 +194,9 @@ services: - validator - exit - --wallet-dir=/var/lib/prysm/ - - --beacon-rpc-provider=consensus:4000 + - --enable-beacon-rest-api + - --beacon-rest-api-provider + - ${CL_NODE:-http://consensus:5052} - --wallet-password-file=/var/lib/prysm/password.txt - --${NETWORK} depends_on: diff --git a/prysm/docker-entrypoint-vc.sh b/prysm/docker-entrypoint-vc.sh index 2153f4f0..bdf93e6f 100755 --- a/prysm/docker-entrypoint-vc.sh +++ b/prysm/docker-entrypoint-vc.sh @@ -58,12 +58,19 @@ else __w3s_url="--web --wallet-password-file /var/lib/prysm/password.txt" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--distributed" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} "--graffiti" "${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/staking-deposit-cli/docker-entrypoint.sh b/staking-deposit-cli/docker-entrypoint.sh index 8ce9fb34..731e1984 100755 --- a/staking-deposit-cli/docker-entrypoint.sh +++ b/staking-deposit-cli/docker-entrypoint.sh @@ -8,6 +8,7 @@ set -Eeuo pipefail ARGS=() foundu=0 foundf=0 +foundnonint=0 uid=1000 folder="validator_keys" for var in "$@"; do @@ -19,6 +20,10 @@ for var in "$@"; do foundf=1 continue fi + if [ "$var" = '--non_interactive' ]; then + foundnonint=1 + continue + fi if [ "$foundu" = '1' ]; then foundu=0 if ! [[ $var =~ ^[0-9]+$ ]] ; then @@ -36,6 +41,16 @@ for var in "$@"; do ARGS+=("$var") done +for i in "${!ARGS[@]}"; do + if [ "${ARGS[$i]}" = '/app/staking_deposit/deposit.py' ]; then + if [ "$foundnonint" = '1' ]; then + # the flag should be before the command + ARGS=("${ARGS[@]:0:$i+1}" "--non_interactive" "${ARGS[@]:$i+1}") + fi + break + fi +done + su-exec depcli "${ARGS[@]}" if [[ "$*" =~ "generate-bls-to-execution-change" ]]; then diff --git a/teku-allin1.yml b/teku-allin1.yml index 3db9fd66..e8f32679 100644 --- a/teku-allin1.yml +++ b/teku-allin1.yml @@ -46,6 +46,7 @@ services: - WEB3SIGNER=${WEB3SIGNER:-false} - EMBEDDED_VC=true - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp diff --git a/teku-vc-only.yml b/teku-vc-only.yml index 50f19310..ef8c3c3b 100644 --- a/teku-vc-only.yml +++ b/teku-vc-only.yml @@ -35,6 +35,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/teku.yml b/teku.yml index 3682f501..8cd11c92 100644 --- a/teku.yml +++ b/teku.yml @@ -105,6 +105,7 @@ services: - DEFAULT_GRAFFITI=${DEFAULT_GRAFFITI:-false} - WEB3SIGNER=${WEB3SIGNER:-false} - NETWORK=${NETWORK} + - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} networks: default: aliases: diff --git a/teku/docker-entrypoint-vc.sh b/teku/docker-entrypoint-vc.sh index 79b4c8d3..347ca797 100755 --- a/teku/docker-entrypoint-vc.sh +++ b/teku/docker-entrypoint-vc.sh @@ -79,12 +79,19 @@ else __w3s_url="" fi +# Distributed attestation aggregation +if [ "${ENABLE_DIST_ATTESTATION_AGGR}" = "true" ]; then + __att_aggr="--Xobol-dvt-integration-enabled=true" +else + __att_aggr="" +fi + if [ "${DEFAULT_GRAFFITI}" = "true" ]; then # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__network} "--validators-graffiti=${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${VC_EXTRAS} + exec "$@" ${__network} "--validators-graffiti=${GRAFFITI}" ${__w3s_url} ${__mev_boost} ${__doppel} ${__att_aggr} ${VC_EXTRAS} fi diff --git a/vc-utils/keymanager.sh b/vc-utils/keymanager.sh index 503e3e80..3243c2f8 100755 --- a/vc-utils/keymanager.sh +++ b/vc-utils/keymanager.sh @@ -798,6 +798,14 @@ and secrets directories into .eth/validator_keys instead." fi if [ "$__eth2_val_tools" -eq 0 ] && [ "$__justone" -eq 0 ]; then while true; do + __passfile=${__keyfile/.json/.txt} + if [ -f "$__passfile" ]; then + echo "Password file is found: $__passfile" + __password=$(< "$__passfile") + break + else + echo "Password file $__passfile not found." + fi read -srp "Please enter the password for your validator key stored in $__keyfile with public key $__pubkey: " __password echo read -srp "Please re-enter the password: " __password2 From 440eba0e5dae2c495c081db252ee279d8a725b47 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Tue, 17 Sep 2024 12:31:53 +0100 Subject: [PATCH 28/48] No automatic switch of Erigon repo yet (#1930) --- ethd | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ethd b/ethd index 497513a0..ae717c64 100755 --- a/ethd +++ b/ethd @@ -1115,12 +1115,12 @@ __env_migrate() { if [[ "${__var}" = "LH_DOCKER_TAG" && "${__value}" = "latest-modern" ]]; then # LH 5.2 ditched latest-modern __value="latest" fi - if [[ "${__var}" = "ERIGON_DOCKER_TAG" && "${__value}" = "stable" ]]; then # Erigon switched to latest - __value="latest" - fi - if [[ "${__var}" = "ERIGON_DOCKER_REPO" && "${__value}" = "thorax/erigon" ]]; then # Erigon new repo - __value="erigontech/erigon" - fi +# if [[ "${__var}" = "ERIGON_DOCKER_TAG" && "${__value}" = "stable" ]]; then # Erigon switched to latest +# __value="latest" +# fi +# if [[ "${__var}" = "ERIGON_DOCKER_REPO" && "${__value}" = "thorax/erigon" ]]; then # Erigon new repo +# __value="erigontech/erigon" +# fi sed -i'.original' -e "s~^\(${__var}\s*=\s*\).*$~\1${__value}~" "${__env_file}" fi done From d889d667fc1d457c4eeea63d31d3cfafb8d4d132 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:25:37 +0100 Subject: [PATCH 29/48] Move ejector messages to .eth (#1931) --- .eth/charon/.empty | 0 .eth/lido-ejector/.empty | 0 .gitignore | 4 ++-- ethd | 2 +- lido-obol.yml | 4 ++-- 5 files changed, 5 insertions(+), 5 deletions(-) create mode 100755 .eth/charon/.empty create mode 100755 .eth/lido-ejector/.empty diff --git a/.eth/charon/.empty b/.eth/charon/.empty new file mode 100755 index 00000000..e69de29b diff --git a/.eth/lido-ejector/.empty b/.eth/lido-ejector/.empty new file mode 100755 index 00000000..e69de29b diff --git a/.gitignore b/.gitignore index 3bfced4c..32cddf71 100644 --- a/.gitignore +++ b/.gitignore @@ -5,12 +5,12 @@ ext-network.yml.original ext-network.yml.bak .eth/* .eth_backup* -.charon/* -validator-ejector/* !.eth/README.md !.eth/validator_keys/.empty !.eth/exit_messages/.empty !.eth/dkg_output/.empty +!.eth/charon/.empty +!.eth/lido-ejector/.empty !.eth/ethdo/README.md !.eth/ethdo/create-withdrawal-change.sh *.swp diff --git a/ethd b/ethd index ae717c64..57ea01bd 100755 --- a/ethd +++ b/ethd @@ -3214,7 +3214,7 @@ again or Cancel on the next screen." 10 75 __query_lido_obol_enr() { - ${__as_owner} mkdir -p ./.eth + ${__as_owner} mkdir -p ./.eth/charon __outcome__=$(__docompose -f ./lido-obol.yml run -u "$(id -u)":"$(id -g)" --rm charon-create-enr) if [[ "${__outcome__}" =~ "Created ENR private key:" ]]; then __lido_obol_operator_enr=$(echo "${__outcome__}" | grep -e 'enr:') diff --git a/lido-obol.yml b/lido-obol.yml index 978f5c0e..21cac711 100644 --- a/lido-obol.yml +++ b/lido-obol.yml @@ -64,7 +64,7 @@ services: image: lidofinance/validator-ejector:${VALIDATOR_EJECTOR_VERSION:-1.5.0} user: ":" volumes: - - ./validator-ejector:/exitmessages + - .eth/lido-ejector:/exitmessages restart: unless-stopped environment: - EXECUTION_NODE=${OBOL_EL_NODE:-http://execution:8545} @@ -88,7 +88,7 @@ services: image: obolnetwork/lido-dv-exit:${LIDO_DV_EXIT_VERSION:-e8bee1f} user: ":" volumes: - - ./validator-ejector:/exitmessages + - .eth/lido-ejector:/exitmessages - .eth:/charon environment: - LIDODVEXIT_BEACON_NODE_URL=${OBOL_CL_NODE:-http://consensus:5052} From 8012a17369e3525e946fc3eae01d3839e8a61e6a Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Tue, 17 Sep 2024 17:10:07 +0100 Subject: [PATCH 30/48] Remove Eden relay (#1926) --- ethd | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ethd b/ethd index 57ea01bd..e3ff4c63 100755 --- a/ethd +++ b/ethd @@ -2985,14 +2985,12 @@ __query_mev() { "holesky") MEV_RELAYS="https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,\ https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,\ -https://0xb1d229d9c21298a87846c7022ebeef277dfc321fe674fa45312e20b5b6c400bfde9383f801848d7837ed5fc449083a12@relay-holesky.edennetwork.io,\ https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com,\ https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money" ;; "mainnet") MEV_RELAYS="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,\ https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,\ -https://0xb3ee7afcf27f1f1259ac1787876318c6584ee353097a50ed84f51a1f21a323b3736f271a895c7ce918c038e4265918be@relay.edennetwork.io,\ https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,\ https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,\ https://0x98650451ba02064f7b000f5768cf0cf4d4e492317d82871bdc87ef841a0743f69f0f1eea11168503240ac35d101c9135@mainnet-relay.securerpc.com" @@ -3108,14 +3106,12 @@ want to use MEV Boost?" 10 65); then "holesky") MEV_RELAYS="https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,\ https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,\ -https://0xb1d229d9c21298a87846c7022ebeef277dfc321fe674fa45312e20b5b6c400bfde9383f801848d7837ed5fc449083a12@relay-holesky.edennetwork.io,\ https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com,\ https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f2cf8574e38d70067d40ef136dc@relay-stag.ultrasound.money" ;; "mainnet") MEV_RELAYS=https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,\ https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,\ -https://0xb3ee7afcf27f1f1259ac1787876318c6584ee353097a50ed84f51a1f21a323b3736f271a895c7ce918c038e4265918be@relay.edennetwork.io,\ https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,\ https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,\ https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,\ From 7c793e8d66873cc5a1343e136c77308a360af13a Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 18 Sep 2024 06:31:23 +0100 Subject: [PATCH 31/48] Add non-censoring Titan relay (#1932) --- ethd | 1 + 1 file changed, 1 insertion(+) diff --git a/ethd b/ethd index e3ff4c63..7d2824d0 100755 --- a/ethd +++ b/ethd @@ -3112,6 +3112,7 @@ https://0xb1559beef7b5ba3127485bbbb090362d9f497ba64e177ee2c8e7db74746306efad687f "mainnet") MEV_RELAYS=https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,\ https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,\ +https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz,\ https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,\ https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,\ https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,\ From 9e2c00399e23e5ec6aa5081e635fcb135254cc7f Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Fri, 20 Sep 2024 03:01:24 +0100 Subject: [PATCH 32/48] Fix bind mounts with Docker 27.3.0 (#1934) --- grafana-cloud.yml | 8 ++++---- grafana.yml | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/grafana-cloud.yml b/grafana-cloud.yml index 4925f24b..27e558ca 100644 --- a/grafana-cloud.yml +++ b/grafana-cloud.yml @@ -67,7 +67,7 @@ services: pid: host restart: unless-stopped volumes: - - '/:/host:ro,rslave' + - /:/host:ro,rslave - /etc/hostname:/etc/nodename:ro - /proc:/host/proc:ro - /sys:/host/sys:ro @@ -112,10 +112,10 @@ services: image: gcr.io/cadvisor/cadvisor:v0.49.1 volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - - /:/rootfs:ro + - /:/rootfs:ro,rslave - /var/run:/var/run - /sys:/sys:ro - - /var/lib/docker:/var/lib/docker:ro + - /var/lib/docker:/var/lib/docker:ro,rslave command: - --docker_only - --housekeeping_interval=30s @@ -134,7 +134,7 @@ services: - /etc/machine-id:/etc/machine-id:ro - ./promtail:/etc/promtail - promtail-data:/tmp - - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/lib/docker/containers:/var/lib/docker/containers:ro,rslave entrypoint: ./etc/promtail/entrypoint.sh command: ["/usr/bin/promtail"] environment: diff --git a/grafana.yml b/grafana.yml index 5a1fc9fe..8693df81 100644 --- a/grafana.yml +++ b/grafana.yml @@ -60,7 +60,7 @@ services: pid: host restart: unless-stopped volumes: - - '/:/host:ro,rslave' + - /:/host:ro,rslave - /etc/hostname:/etc/nodename:ro - /proc:/host/proc:ro - /sys:/host/sys:ro @@ -105,10 +105,10 @@ services: image: gcr.io/cadvisor/cadvisor:v0.49.1 volumes: - /var/run/docker.sock:/var/run/docker.sock:ro - - /:/rootfs:ro + - /:/rootfs:ro,rslave - /var/run:/var/run - /sys:/sys:ro - - /var/lib/docker:/var/lib/docker:ro + - /var/lib/docker:/var/lib/docker:ro,rslave command: - --docker_only - --housekeeping_interval=30s @@ -127,7 +127,7 @@ services: - /etc/machine-id:/etc/machine-id:ro - ./promtail:/etc/promtail - promtail-data:/tmp - - /var/lib/docker/containers:/var/lib/docker/containers:ro + - /var/lib/docker/containers:/var/lib/docker/containers:ro,rslave entrypoint: ./etc/promtail/entrypoint.sh command: ["/usr/bin/promtail"] environment: From 3c80c5d12368e16546c3869eb1770ecd080f3042 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Fri, 20 Sep 2024 09:02:09 +0100 Subject: [PATCH 33/48] discv5 Geth is now enabled by default (#1933) --- geth/docker-entrypoint.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/geth/docker-entrypoint.sh b/geth/docker-entrypoint.sh index d7410a4b..a16a09b6 100755 --- a/geth/docker-entrypoint.sh +++ b/geth/docker-entrypoint.sh @@ -99,13 +99,6 @@ else __prune="" fi -if [ "${IPV6}" = "true" ]; then - echo "Configuring Geth for discv5 for IPv6 advertisements" - __ipv6="--discv5" -else - __ipv6="" -fi - # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 -exec "$@" ${__datadir} ${__ancient} ${__ipv6} ${__network} ${__prune} ${__verbosity} ${EL_EXTRAS} +exec "$@" ${__datadir} ${__ancient} ${__network} ${__prune} ${__verbosity} ${EL_EXTRAS} From 4656f884e4245d3d14f7db14c667a4c19e968de4 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Fri, 20 Sep 2024 09:05:38 +0100 Subject: [PATCH 34/48] Version 2.12.3.0 (#1935) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 525906f7..21e41802 100644 --- a/README.md +++ b/README.md @@ -34,4 +34,4 @@ Eth Docker uses a "semver-ish" scheme. large. - Second through fourth digit, [semver](https://semver.org/). -This is Eth Docker v2.12.2.0 +This is Eth Docker v2.12.3.0 From e22afad2f56d354ea116e7188f29b873550c3160 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 25 Sep 2024 09:43:27 -0400 Subject: [PATCH 35/48] Configurable SSV repo (#1938) --- default.env | 4 +++- ethd | 4 ++-- ssv-dkg.yml | 2 +- ssv.yml | 2 +- 4 files changed, 7 insertions(+), 5 deletions(-) diff --git a/default.env b/default.env index 2635c9e7..6540c21c 100644 --- a/default.env +++ b/default.env @@ -181,7 +181,9 @@ ETH_DOCKER_TAG= # SSV SSV_NODE_TAG=latest +SSV_NODE_REPO=bloxstaking/ssv-node SSV_DKG_TAG=latest +SSV_DKG_REPO=bloxstaking/ssv-dkg # Lido OBOL CHARON_VERSION=latest @@ -326,4 +328,4 @@ DDNS_TAG=v2 NODE_EXPORTER_IGNORE_MOUNT_REGEX='^/(dev|proc|sys|run|var/lib/docker/.+)($|/)' # Used by ethd update - please do not adjust -ENV_VERSION=14 +ENV_VERSION=15 diff --git a/ethd b/ethd index 7d2824d0..deb67b93 100755 --- a/ethd +++ b/ethd @@ -1007,8 +1007,8 @@ __env_migrate() { ERIGON_P2P_PORT_3 LODESTAR_HEAP SSV_DKG_PORT SIREN_PASSWORD ) __target_vars=( ETH_DOCKER_TAG NIM_SRC_BUILD_TARGET NIM_SRC_REPO NIM_DOCKER_TAG NIM_DOCKER_VC_TAG NIM_DOCKER_REPO \ NIM_DOCKER_VC_REPO NIM_DOCKERFILE TEKU_SRC_BUILD_TARGET TEKU_SRC_REPO TEKU_DOCKER_TAG TEKU_DOCKER_REPO \ - TEKU_DOCKERFILE LH_SRC_BUILD_TARGET LH_SRC_REPO LH_DOCKER_TAG LH_DOCKER_REPO LH_DOCKERFILE \ - PRYSM_SRC_BUILD_TARGET PRYSM_SRC_REPO PRYSM_DOCKER_TAG PRYSM_DOCKER_VC_TAG PRYSM_DOCKER_CTL_TAG \ + TEKU_DOCKERFILE LH_SRC_BUILD_TARGET LH_SRC_REPO LH_DOCKER_TAG LH_DOCKER_REPO LH_DOCKERFILE SSV_NODE_REPO \ + PRYSM_SRC_BUILD_TARGET PRYSM_SRC_REPO PRYSM_DOCKER_TAG PRYSM_DOCKER_VC_TAG PRYSM_DOCKER_CTL_TAG SSV_DKG_REPO \ PRYSM_DOCKER_REPO PRYSM_DOCKER_VC_REPO PRYSM_DOCKER_CTL_REPO PRYSM_DOCKERFILE ERIGON_SRC_BUILD_TARGET \ ERIGON_SRC_REPO ERIGON_DOCKER_TAG ERIGON_DOCKER_REPO ERIGON_DOCKERFILE MEV_SRC_BUILD_TARGET MEV_SRC_REPO \ MEV_DOCKERFILE MEV_DOCKER_TAG MEV_DOCKER_REPO NIMEL_SRC_BUILD_TARGET NIMEL_SRC_REPO NIMEL_DOCKER_TAG \ diff --git a/ssv-dkg.yml b/ssv-dkg.yml index bf3d5c5b..19b745f8 100644 --- a/ssv-dkg.yml +++ b/ssv-dkg.yml @@ -18,7 +18,7 @@ services: ssv-dkg: restart: "unless-stopped" - image: bloxstaking/ssv-dkg:${SSV_DKG_TAG:-latest} + image: ${SSV_DKG_REPO:-ssvlabs/ssv-dkg}:${SSV_DKG_TAG:-latest} volumes: - ./ssv-config:/config - ssv-dkg-tls:/ssl diff --git a/ssv.yml b/ssv.yml index 0df77f1c..cbf79e6a 100644 --- a/ssv.yml +++ b/ssv.yml @@ -9,7 +9,7 @@ x-logging: &logging services: ssv-node: restart: "unless-stopped" - image: bloxstaking/ssv-node:${SSV_NODE_TAG:-latest} + image: ${SSV_NODE_REPO:-ssvlabs/ssv-node}:${SSV_NODE_TAG:-latest} user: 12000:12000 volumes: - ./ssv-config:/config From 8bc549c164d1b3a367fc0fe591c7a539b6fda811 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Thu, 26 Sep 2024 08:21:07 -0400 Subject: [PATCH 36/48] Sys and proc are rslave as well (#1936) --- grafana-cloud.yml | 6 +++--- grafana.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/grafana-cloud.yml b/grafana-cloud.yml index 27e558ca..a0591e3f 100644 --- a/grafana-cloud.yml +++ b/grafana-cloud.yml @@ -69,8 +69,8 @@ services: volumes: - /:/host:ro,rslave - /etc/hostname:/etc/nodename:ro - - /proc:/host/proc:ro - - /sys:/host/sys:ro + - /proc:/host/proc:ro,rslave + - /sys:/host/sys:ro,rslave - /etc/localtime:/etc/localtime:ro <<: *logging labels: @@ -114,7 +114,7 @@ services: - /var/run/docker.sock:/var/run/docker.sock:ro - /:/rootfs:ro,rslave - /var/run:/var/run - - /sys:/sys:ro + - /sys:/sys:ro,rslave - /var/lib/docker:/var/lib/docker:ro,rslave command: - --docker_only diff --git a/grafana.yml b/grafana.yml index 8693df81..30dc9d03 100644 --- a/grafana.yml +++ b/grafana.yml @@ -62,8 +62,8 @@ services: volumes: - /:/host:ro,rslave - /etc/hostname:/etc/nodename:ro - - /proc:/host/proc:ro - - /sys:/host/sys:ro + - /proc:/host/proc:ro,rslave + - /sys:/host/sys:ro,rslave - /etc/localtime:/etc/localtime:ro <<: *logging labels: @@ -107,7 +107,7 @@ services: - /var/run/docker.sock:/var/run/docker.sock:ro - /:/rootfs:ro,rslave - /var/run:/var/run - - /sys:/sys:ro + - /sys:/sys:ro,rslave - /var/lib/docker:/var/lib/docker:ro,rslave command: - --docker_only From 75fb3d70d5f55228dd7f60af69515ded2d26d812 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Thu, 26 Sep 2024 08:21:21 -0400 Subject: [PATCH 37/48] Add support for commit-boost (#1939) --- .gitignore | 1 + commit-boost-pbs.yml | 22 ++++++++++++++++++++++ commit-boost/cb-config.toml.sample | 17 +++++++++++++++++ default.env | 7 +++++-- ethd | 8 ++++++-- 5 files changed, 51 insertions(+), 4 deletions(-) create mode 100644 commit-boost-pbs.yml create mode 100644 commit-boost/cb-config.toml.sample diff --git a/.gitignore b/.gitignore index 32cddf71..eac3f087 100644 --- a/.gitignore +++ b/.gitignore @@ -26,4 +26,5 @@ ssv-config/config.yaml ssv-config/config.yaml.original ssv-config/dkg-config.yaml ssv-config/dkg-config.yaml.original +commit-boost/cb-config.toml .nada diff --git a/commit-boost-pbs.yml b/commit-boost-pbs.yml new file mode 100644 index 00000000..e9d11024 --- /dev/null +++ b/commit-boost-pbs.yml @@ -0,0 +1,22 @@ +x-logging: &logging + logging: + driver: json-file + options: + max-size: 100m + max-file: "3" + tag: '{{.ImageName}}|{{.Name}}|{{.ImageFullID}}|{{.FullID}}' + +services: + cb-pbs: + image: ${CB_PBS_DOCKER_REPO:-ghcr.io/commit-boost/pbs}:${CB_PBS_DOCKER_TAG:-latest} + environment: + CB_CONFIG: /cb-config.toml + CB_METRICS_PORT: 10000 + volumes: + - ./commit-boost/cb-config.toml:/cb-config.toml:ro + labels: + - metrics.scrape=true + - metrics.path=/metrics + - metrics.port=10000 + - metrics.instance=cb-pbs + - metrics.network=${NETWORK} diff --git a/commit-boost/cb-config.toml.sample b/commit-boost/cb-config.toml.sample new file mode 100644 index 00000000..934ad9ee --- /dev/null +++ b/commit-boost/cb-config.toml.sample @@ -0,0 +1,17 @@ +chain = "Holesky" + +[pbs] +port = 18550 + +[[relays]] +id = "bloxroute" +url = "https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com" +[[relays]] +id = "aestus" +url = "https://0xab78bf8c781c58078c3beb5710c57940874dd96aef2835e7742c866b4c7c0406754376c2c8285a36c630346aa5c5f833@holesky.aestus.live" +[[relays]] +id = "titan" +url = "https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz" +[[relays]] +id = "flashbots" +url = "https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net" diff --git a/default.env b/default.env index 6540c21c..ae0e17c1 100644 --- a/default.env +++ b/default.env @@ -8,7 +8,7 @@ FEE_RECIPIENT= # If "true" and used with a CL, it also requires :mev-boost.yml in COMPOSE_FILE MEV_BOOST=false # For relay information, please see https://ethstaker.cc/mev-relay-list/ -MEV_RELAYS=https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,https://0xb1d229d9c21298a87846c7022ebeef277dfc321fe674fa45312e20b5b6c400bfde9383f801848d7837ed5fc449083a12@relay-holesky.edennetwork.io,https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com +MEV_RELAYS=https://0xafa4c6985aa049fb79dd37010438cfebeb0f2bd42b115b89dd678dab0670c1de38da0c4e9138c9290a398ecd9a0b3110@boost-relay-holesky.flashbots.net,https://0xaa58208899c6105603b74396734a6263cc7d947f444f396a90f7b7d3e65d102aec7e5e5291b27e08d02c50a050825c2f@holesky.titanrelay.xyz,https://0x821f2a65afb70e7f2e820a925a9b4c80a159620582c1766b1b09729fec178b11ea22abb3a51f07b288be815a1a2ff516@bloxroute.holesky.blxrbdn.com # Set a minimum MEV bid (e.g. 0.05), used by mev-boost.yml. If empty, no minimum is used. MEV_MIN_BID= # Graffiti to use for validator @@ -194,6 +194,9 @@ VE_ORACLE_ADDRESSES_ALLOWLIST= ENABLE_DIST_ATTESTATION_AGGR= LIDO_DV_EXIT_EXIT_EPOCH= +# Commit-Boost +CB_PBS_DOCKER_TAG=latest +CB_PBS_DOCKER_REPO=ghcr.io/commit-boost/pbs # MEV-Boost # SRC build target can be a tag, a branch, or a pr as "pr-ID" MEV_SRC_BUILD_TARGET=stable @@ -328,4 +331,4 @@ DDNS_TAG=v2 NODE_EXPORTER_IGNORE_MOUNT_REGEX='^/(dev|proc|sys|run|var/lib/docker/.+)($|/)' # Used by ethd update - please do not adjust -ENV_VERSION=15 +ENV_VERSION=16 diff --git a/ethd b/ethd index deb67b93..32718fc9 100755 --- a/ethd +++ b/ethd @@ -244,6 +244,10 @@ __prep_conffiles() { if [ ! -f "ext-network.yml" ]; then ${__as_owner} cp ext-network.yml.sample ext-network.yml fi +# Create cb-config.toml if it doesn't exist + if [ ! -f "commit-boost/cb-config.toml" ]; then + ${__as_owner} cp commit-boost/cb-config.toml.sample commit-boost/cb-config.toml + fi } @@ -1013,8 +1017,8 @@ __env_migrate() { ERIGON_SRC_REPO ERIGON_DOCKER_TAG ERIGON_DOCKER_REPO ERIGON_DOCKERFILE MEV_SRC_BUILD_TARGET MEV_SRC_REPO \ MEV_DOCKERFILE MEV_DOCKER_TAG MEV_DOCKER_REPO NIMEL_SRC_BUILD_TARGET NIMEL_SRC_REPO NIMEL_DOCKER_TAG \ NIMEL_DOCKER_REPO NIMEL_DOCKERFILE LS_SRC_BUILD_TARGET LS_SRC_REPO LS_DOCKER_TAG LS_DOCKER_REPO LS_DOCKERFILE \ - GETH_SRC_BUILD_TARGET GETH_SRC_REPO GETH_DOCKER_TAG GETH_DOCKER_REPO TRAEFIK_TAG DDNS_TAG \ - GETH_DOCKERFILE NM_SRC_BUILD_TARGET NM_SRC_REPO NM_DOCKER_TAG NM_DOCKER_REPO NM_DOCKERFILE \ + GETH_SRC_BUILD_TARGET GETH_SRC_REPO GETH_DOCKER_TAG GETH_DOCKER_REPO TRAEFIK_TAG DDNS_TAG CB_PBS_DOCKER_TAG \ + GETH_DOCKERFILE NM_SRC_BUILD_TARGET NM_SRC_REPO NM_DOCKER_TAG NM_DOCKER_REPO NM_DOCKERFILE CB_PBS_DOCKER_REPO \ BESU_SRC_BUILD_TARGET BESU_SRC_REPO BESU_DOCKER_TAG BESU_DOCKER_REPO BESU_DOCKERFILE SSV_NODE_TAG CHARON_VERSION \ DEPCLI_SRC_BUILD_TARGET DEPCLI_SRC_REPO DEPCLI_DOCKER_TAG W3S_DOCKER_TAG W3S_DOCKER_REPO \ PG_DOCKER_TAG RETH_SRC_BUILD_TARGET RETH_SRC_REPO RETH_DOCKER_TAG RETH_DOCKER_REPO RETH_DOCKERFILE \ From 5dd679d1a29382f994f29f7cdbe3982daa111377 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:48:59 -0400 Subject: [PATCH 38/48] Update ddns-updater config for 2.8 (#1940) --- traefik-cf.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/traefik-cf.yml b/traefik-cf.yml index c3338541..29914e27 100644 --- a/traefik-cf.yml +++ b/traefik-cf.yml @@ -53,7 +53,7 @@ services: restart: "unless-stopped" environment: - LOG_LEVEL=${LOG_LEVEL:-info} - - 'CONFIG={"settings": [{"provider": "cloudflare", "zone_identifier": "${CF_ZONE_ID}", "domain": "${DOMAIN}", "host": "${DDNS_SUBDOMAIN}", "ttl": 1, "token": "${CF_DNS_API_TOKEN}", "proxied": ${DDNS_PROXY}, "ip_version": "ipv4"}]}' + - 'CONFIG={"settings": [{"provider": "cloudflare", "zone_identifier": "${CF_ZONE_ID}", "domain": "${DDNS_SUBDOMAIN}.${DOMAIN}", "ttl": 1, "token": "${CF_DNS_API_TOKEN}", "proxied": ${DDNS_PROXY}, "ip_version": "ipv4"}]}' volumes: - /etc/localtime:/etc/localtime:ro <<: *logging From a82462b9ef37bc0e9828d05c2834b6a1643553ec Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 30 Sep 2024 11:49:13 -0400 Subject: [PATCH 39/48] Manual v6 enr (#1941) * V6 manual ENRs * discv5 --- besu.yml | 1 + besu/docker-entrypoint.sh | 10 +++++++++- default.env | 4 +++- grandine/docker-entrypoint.sh | 8 ++++++++ lighthouse/docker-entrypoint.sh | 8 ++++++++ lodestar-cl-only.yml | 4 +++- lodestar.yml | 4 +++- lodestar/Dockerfile.binary | 3 ++- lodestar/Dockerfile.source | 3 ++- lodestar/docker-entrypoint.sh | 10 +++++++++- nethermind.yml | 2 ++ teku-allin1.yml | 4 ++++ teku-cl-only.yml | 4 +++- teku.yml | 4 +++- teku/docker-entrypoint.sh | 20 +++++++++++++++++++- 15 files changed, 79 insertions(+), 10 deletions(-) diff --git a/besu.yml b/besu.yml index 6cb6db41..740685e6 100644 --- a/besu.yml +++ b/besu.yml @@ -27,6 +27,7 @@ services: - EL_EXTRAS=${EL_EXTRAS:-} - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} + - IPV6=${IPV6:-false} volumes: - besu-el-data:/var/lib/besu - besu-eth1-data:/var/lib/besu-og diff --git a/besu/docker-entrypoint.sh b/besu/docker-entrypoint.sh index 5fa93b13..2aee7d17 100755 --- a/besu/docker-entrypoint.sh +++ b/besu/docker-entrypoint.sh @@ -72,6 +72,14 @@ else __datadir="--data-path /var/lib/besu" fi +# DiscV5 for IPV6 +if [ "${IPV6:-false}" = "true" ]; then + echo "Configuring Besu for discv5 for IPv6 advertisements" + __ipv6="--Xv5-discovery-enabled" +else + __ipv6="" +fi + if [ -f /var/lib/besu/prune-marker ]; then rm -f /var/lib/besu/prune-marker if [ "${ARCHIVE_NODE}" = "true" ]; then @@ -84,5 +92,5 @@ if [ -f /var/lib/besu/prune-marker ]; then else # Word splitting is desired for the command line parameters # shellcheck disable=SC2086 - exec "$@" ${__datadir} ${__network} ${__prune} ${__spec} ${EL_EXTRAS} + exec "$@" ${__datadir} ${__network} ${__ipv6} ${__prune} ${__spec} ${EL_EXTRAS} fi diff --git a/default.env b/default.env index ae0e17c1..ef926943 100644 --- a/default.env +++ b/default.env @@ -89,6 +89,8 @@ CL_P2P_PORT=9000 PRYSM_PORT=9000 PRYSM_UDP_PORT=9000 CL_QUIC_PORT=9001 +# Some clients need a separate port for IPv6 +CL_IPV6_P2P_PORT=9090 # Local grafana dashboard port. Do not expose to Internet, it is insecure http GRAFANA_PORT=3000 # Local Siren UI port @@ -331,4 +333,4 @@ DDNS_TAG=v2 NODE_EXPORTER_IGNORE_MOUNT_REGEX='^/(dev|proc|sys|run|var/lib/docker/.+)($|/)' # Used by ethd update - please do not adjust -ENV_VERSION=16 +ENV_VERSION=17 diff --git a/grandine/docker-entrypoint.sh b/grandine/docker-entrypoint.sh index 8aafac91..5a0f62ee 100755 --- a/grandine/docker-entrypoint.sh +++ b/grandine/docker-entrypoint.sh @@ -83,6 +83,14 @@ if [ "${IPV6}" = "true" ]; then echo "Configuring Grandine to listen on IPv6 ports" __ipv6="--listen-address-ipv6 :: --libp2p-port-ipv6 ${CL_P2P_PORT:-9000} --discovery-port-ipv6 ${CL_P2P_PORT:-9000} \ --quic-port-ipv6 ${CL_QUIC_PORT:-9001}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v6=$(curl -s -6 ifconfig.me) + set -e + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + __ipv6+=" --enr-address-ipv6 ${__public_v6} --enr-tcp-port-ipv6 ${CL_P2P_PORT:-9000} --enr-udp-port-ipv6 ${CL_P2P_PORT:-9000}" + fi else __ipv6="" fi diff --git a/lighthouse/docker-entrypoint.sh b/lighthouse/docker-entrypoint.sh index 324f21ef..af7c9262 100755 --- a/lighthouse/docker-entrypoint.sh +++ b/lighthouse/docker-entrypoint.sh @@ -88,6 +88,14 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Lighthouse to listen on IPv6 ports" __ipv6="--listen-address :: --port6 ${CL_P2P_PORT:-9000} --quic-port6 ${CL_QUIC_PORT:-9001}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v6=$(wget -6 -q -O- ifconfig.me) + set -e + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + __ipv6+=" --enr-address ${__public_v6}" + fi else __ipv6="" fi diff --git a/lodestar-cl-only.yml b/lodestar-cl-only.yml index 9509fdeb..06e69b4a 100644 --- a/lodestar-cl-only.yml +++ b/lodestar-cl-only.yml @@ -39,11 +39,13 @@ services: - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} - NODE_OPTIONS=${LODESTAR_HEAP:---max-old-space-size=8192} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/lodestar.yml b/lodestar.yml index 91991958..4ec4204b 100644 --- a/lodestar.yml +++ b/lodestar.yml @@ -39,11 +39,13 @@ services: - ARCHIVE_NODE=${ARCHIVE_NODE:-} - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} - NODE_OPTIONS=${LODESTAR_HEAP:---max-old-space-size=8192} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/lodestar/Dockerfile.binary b/lodestar/Dockerfile.binary index 2323f00a..e5cf2a03 100644 --- a/lodestar/Dockerfile.binary +++ b/lodestar/Dockerfile.binary @@ -7,7 +7,8 @@ FROM ${DOCKER_REPO}:${DOCKER_TAG} ARG BUILD_TARGET ARG SRC_REPO -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git wget \ + && apt-get clean && rm -rf /var/lib/apt/lists/* ARG USER=lsconsensus ARG UID=10002 diff --git a/lodestar/Dockerfile.source b/lodestar/Dockerfile.source index dc5a2f3f..0a9fcf2b 100644 --- a/lodestar/Dockerfile.source +++ b/lodestar/Dockerfile.source @@ -18,7 +18,8 @@ RUN bash -c "cd .. && rm -rf app && git clone ${SRC_REPO} app && cd app && git c FROM node:22.4-slim -RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git && apt-get clean && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git wget \ + && apt-get clean && rm -rf /var/lib/apt/lists/* ARG USER=lsconsensus ARG UID=10002 diff --git a/lodestar/docker-entrypoint.sh b/lodestar/docker-entrypoint.sh index 2b2b915f..7660cc99 100755 --- a/lodestar/docker-entrypoint.sh +++ b/lodestar/docker-entrypoint.sh @@ -85,7 +85,15 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Lodestar to listen on IPv6 ports" - __ipv6="--listenAddress6 :: --port6 ${CL_P2P_PORT:-9000}" + __ipv6="--listenAddress 0.0.0.0 --listenAddress6 :: --port6 ${CL_IPV6_P2P_PORT:-9090}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v6=$(wget -6 -q -O- ifconfig.me) + set -e + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + __ipv6+=" --enr.ip6 ${__public_v6}" + fi else __ipv6="" fi diff --git a/nethermind.yml b/nethermind.yml index 0a233b2c..d20630a6 100644 --- a/nethermind.yml +++ b/nethermind.yml @@ -81,6 +81,8 @@ services: - "6060" - --Pruning.FullPruningCompletionBehavior - AlwaysShutdown + - --Discovery.Discv5Enabled + - "true" - --log - ${LOG_LEVEL} labels: diff --git a/teku-allin1.yml b/teku-allin1.yml index e8f32679..fa59e2c9 100644 --- a/teku-allin1.yml +++ b/teku-allin1.yml @@ -47,9 +47,13 @@ services: - EMBEDDED_VC=true - NETWORK=${NETWORK} - ENABLE_DIST_ATTESTATION_AGGR=${ENABLE_DIST_ATTESTATION_AGGR:-false} + - IPV6=${IPV6:-false} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/teku-cl-only.yml b/teku-cl-only.yml index f62386d9..2c907aa4 100644 --- a/teku-cl-only.yml +++ b/teku-cl-only.yml @@ -46,10 +46,12 @@ services: - WEB3SIGNER=false - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/teku.yml b/teku.yml index 8cd11c92..01dee369 100644 --- a/teku.yml +++ b/teku.yml @@ -46,10 +46,12 @@ services: - EMBEDDED_VC=false - NETWORK=${NETWORK} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/teku/docker-entrypoint.sh b/teku/docker-entrypoint.sh index 3c0488bf..68570c63 100755 --- a/teku/docker-entrypoint.sh +++ b/teku/docker-entrypoint.sh @@ -133,7 +133,25 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Teku to listen on IPv6 ports" - __ipv6="--p2p-interface 0.0.0.0,:: --p2p-port-ipv6 ${CL_P2P_PORT:-9000}" + __ipv6="--p2p-interface 0.0.0.0,:: --p2p-port-ipv6 ${CL_IPV6_P2P_PORT:-9090}" +# ENR discovery on v6 is not yet working, likely too few peers. Manual for now + __ipv4_pattern="^([0-9]{1,3}\.){3}[0-9]{1,3}$" + __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start + set +e + __public_v4=$(curl -s -4 ifconfig.me) + __public_v6=$(curl -s -6 ifconfig.me) + set -e + __valid_v4=0 + if [[ "$__public_v4" =~ $__ipv4_pattern ]]; then + __valid_v4=1 + fi + if [[ "$__public_v6" =~ $__ipv6_pattern ]]; then + if [ "${__valid_v4}" -eq 1 ]; then + __ipv6+=" --p2p-advertised-ips ${__public_v4},${__public_v6}" + else + __ipv6+=" --p2p-advertised-ip ${__public_v6}" + fi + fi else __ipv6="" fi From 07aca7d04c7e4f39628692a7444ba1b074b60b54 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 30 Sep 2024 12:21:11 -0400 Subject: [PATCH 40/48] Fix Lighthouse v6 (#1942) --- lighthouse-cl-only.yml | 4 +++- lighthouse.yml | 4 +++- lighthouse/docker-entrypoint.sh | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/lighthouse-cl-only.yml b/lighthouse-cl-only.yml index 11dcd912..31e0fdd0 100644 --- a/lighthouse-cl-only.yml +++ b/lighthouse-cl-only.yml @@ -38,13 +38,15 @@ services: - CL_EXTRAS=${CL_EXTRAS:-} - ARCHIVE_NODE=${ARCHIVE_NODE:-false} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} - CL_QUIC_PORT=${CL_QUIC_PORT:-9001} - NETWORK=${NETWORK} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp - ${HOST_IP:-}:${CL_QUIC_PORT:-9001}:${CL_QUIC_PORT:-9001}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/lighthouse.yml b/lighthouse.yml index 504224cb..5fbb5fc3 100644 --- a/lighthouse.yml +++ b/lighthouse.yml @@ -40,13 +40,15 @@ services: - CL_EXTRAS=${CL_EXTRAS:-} - ARCHIVE_NODE=${ARCHIVE_NODE:-false} - IPV6=${IPV6:-false} - - CL_P2P_PORT=${CL_P2P_PORT:-9000} + - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} - CL_QUIC_PORT=${CL_QUIC_PORT:-9001} - NETWORK=${NETWORK} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp - ${HOST_IP:-}:${CL_QUIC_PORT:-9001}:${CL_QUIC_PORT:-9001}/udp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp + - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/lighthouse/docker-entrypoint.sh b/lighthouse/docker-entrypoint.sh index af7c9262..a6160e32 100755 --- a/lighthouse/docker-entrypoint.sh +++ b/lighthouse/docker-entrypoint.sh @@ -87,7 +87,7 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Lighthouse to listen on IPv6 ports" - __ipv6="--listen-address :: --port6 ${CL_P2P_PORT:-9000} --quic-port6 ${CL_QUIC_PORT:-9001}" + __ipv6="--listen-address :: --port6 ${CL_IPV6_P2P_PORT:-9090}" # ENR discovery on v6 is not yet working, likely too few peers. Manual for now __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start set +e From ece6f33593d76c6a14ad7e1d9b950160efe5169f Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:40:56 -0400 Subject: [PATCH 41/48] Lh v6 fix (#1943) * Fix Lighthouse v6 * Revert "Fix Lighthouse v6" This reverts commit a76cc9986c546f0840680d83b05ce77aebb8f908. * LH cl only v6 --- lighthouse-cl-only.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/lighthouse-cl-only.yml b/lighthouse-cl-only.yml index 31e0fdd0..2bedea1a 100644 --- a/lighthouse-cl-only.yml +++ b/lighthouse-cl-only.yml @@ -65,6 +65,9 @@ services: - 0.0.0.0 - --http-port - ${CL_REST_PORT:-5052} + - --http-allow-origin=* + - --listen-address + - 0.0.0.0 - --port - ${CL_P2P_PORT:-9000} - --quic-port From 1c7920a5474bf31f22ad9d7e2fb287efbacf9331 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 30 Sep 2024 13:45:26 -0400 Subject: [PATCH 42/48] Revert "Fix Lighthouse v6 (#1942)" (#1944) This reverts commit 07aca7d04c7e4f39628692a7444ba1b074b60b54. --- lighthouse-cl-only.yml | 4 +--- lighthouse.yml | 4 +--- lighthouse/docker-entrypoint.sh | 2 +- 3 files changed, 3 insertions(+), 7 deletions(-) diff --git a/lighthouse-cl-only.yml b/lighthouse-cl-only.yml index 2bedea1a..56918b66 100644 --- a/lighthouse-cl-only.yml +++ b/lighthouse-cl-only.yml @@ -38,15 +38,13 @@ services: - CL_EXTRAS=${CL_EXTRAS:-} - ARCHIVE_NODE=${ARCHIVE_NODE:-false} - IPV6=${IPV6:-false} - - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} + - CL_P2P_PORT=${CL_P2P_PORT:-9000} - CL_QUIC_PORT=${CL_QUIC_PORT:-9001} - NETWORK=${NETWORK} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp - ${HOST_IP:-}:${CL_QUIC_PORT:-9001}:${CL_QUIC_PORT:-9001}/udp - - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp - - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/lighthouse.yml b/lighthouse.yml index 5fbb5fc3..504224cb 100644 --- a/lighthouse.yml +++ b/lighthouse.yml @@ -40,15 +40,13 @@ services: - CL_EXTRAS=${CL_EXTRAS:-} - ARCHIVE_NODE=${ARCHIVE_NODE:-false} - IPV6=${IPV6:-false} - - CL_IPV6_P2P_PORT=${CL_IPV6_P2P_PORT:-9090} + - CL_P2P_PORT=${CL_P2P_PORT:-9000} - CL_QUIC_PORT=${CL_QUIC_PORT:-9001} - NETWORK=${NETWORK} ports: - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/tcp - ${HOST_IP:-}:${CL_P2P_PORT:-9000}:${CL_P2P_PORT:-9000}/udp - ${HOST_IP:-}:${CL_QUIC_PORT:-9001}:${CL_QUIC_PORT:-9001}/udp - - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/tcp - - ${HOST_IP:-}:${CL_IPV6_P2P_PORT:-9090}:${CL_IPV6_P2P_PORT:-9090}/udp networks: default: aliases: diff --git a/lighthouse/docker-entrypoint.sh b/lighthouse/docker-entrypoint.sh index a6160e32..af7c9262 100755 --- a/lighthouse/docker-entrypoint.sh +++ b/lighthouse/docker-entrypoint.sh @@ -87,7 +87,7 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Lighthouse to listen on IPv6 ports" - __ipv6="--listen-address :: --port6 ${CL_IPV6_P2P_PORT:-9090}" + __ipv6="--listen-address :: --port6 ${CL_P2P_PORT:-9000} --quic-port6 ${CL_QUIC_PORT:-9001}" # ENR discovery on v6 is not yet working, likely too few peers. Manual for now __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start set +e From 33d53748847061ed3a4c8e7eca1ee303774313f7 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Mon, 30 Sep 2024 16:26:03 -0400 Subject: [PATCH 43/48] Remove Nethermind DiscV5 (#1945) --- nethermind.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/nethermind.yml b/nethermind.yml index d20630a6..0a233b2c 100644 --- a/nethermind.yml +++ b/nethermind.yml @@ -81,8 +81,6 @@ services: - "6060" - --Pruning.FullPruningCompletionBehavior - AlwaysShutdown - - --Discovery.Discv5Enabled - - "true" - --log - ${LOG_LEVEL} labels: From dedfef999ccd7dd368c2e9933947b25e88bb93fe Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Wed, 2 Oct 2024 21:48:01 -0400 Subject: [PATCH 44/48] Add --enr-udp6-port to Lighthouse (#1947) --- lighthouse/docker-entrypoint.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lighthouse/docker-entrypoint.sh b/lighthouse/docker-entrypoint.sh index af7c9262..0dd9137a 100755 --- a/lighthouse/docker-entrypoint.sh +++ b/lighthouse/docker-entrypoint.sh @@ -87,7 +87,7 @@ fi if [ "${IPV6}" = "true" ]; then echo "Configuring Lighthouse to listen on IPv6 ports" - __ipv6="--listen-address :: --port6 ${CL_P2P_PORT:-9000} --quic-port6 ${CL_QUIC_PORT:-9001}" + __ipv6="--listen-address :: --port6 ${CL_P2P_PORT:-9000} --enr-udp6-port ${CL_P2P_PORT:-9000} --quic-port6 ${CL_QUIC_PORT:-9001}" # ENR discovery on v6 is not yet working, likely too few peers. Manual for now __ipv6_pattern="^[0-9A-Fa-f]{1,4}:" # Sufficient to check the start set +e From 4f6d1f296625b326f04b24f64f06387f91086fcb Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Thu, 3 Oct 2024 03:11:07 -0400 Subject: [PATCH 45/48] Better Erigon v3 detection (#1949) --- ethd | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/ethd b/ethd index 32718fc9..cd5a496d 100755 --- a/ethd +++ b/ethd @@ -795,9 +795,11 @@ __delete_erigon() { # Check Erigon version, only continue if v3 __var="ERIGON_DOCKER_TAG" __value=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + __var="ERIGON_DOCKER_REPO" + __repo=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) # I do mean to match literally # shellcheck disable=SC2076 - if [[ ! ("${__value}" =~ "v3" || "${__value}" = "latest" || "${__value}" = "main-latest") ]]; then + if [[ ! ("${__value}" =~ "v3" || ( "${__value}" = "latest" && "${__repo}" =~ "thorax" ) || "${__value}" = "main-latest") ]]; then return 0 fi From 61e9f6dad67ed0566aa80ee28d35441a21f39758 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 5 Oct 2024 00:47:24 -0400 Subject: [PATCH 46/48] Fresh Reth prune keeps all receipts (#1946) --- reth/docker-entrypoint.sh | 36 ------------------------------------ 1 file changed, 36 deletions(-) diff --git a/reth/docker-entrypoint.sh b/reth/docker-entrypoint.sh index 45d09f96..f45c640c 100755 --- a/reth/docker-entrypoint.sh +++ b/reth/docker-entrypoint.sh @@ -106,42 +106,6 @@ distance = 10064 [prune.segments.storage_history] distance = 10064 EOF - case "${NETWORK}" in - mainnet) - echo "Configuring Reth pruning to include RocketPool, SSV and StakeWise contracts" - cat <> /var/lib/reth/reth.toml - -[prune.segments.receipts_log_filter.0x00000000219ab540356cBB839Cbe05303d7705Fa] -before = 0 - -[prune.segments.receipts_log_filter.0xDD9BC35aE942eF0cFa76930954a156B3fF30a4E1] -before = 0 - -[prune.segments.receipts_log_filter.0xEE4d2A71cF479e0D3d0c3c2C923dbfEB57E73111] -before = 0 - -[prune.segments.receipts_log_filter.0x6B5815467da09DaA7DC83Db21c9239d98Bb487b5] -before = 0 -EOF - ;; - holesky) - echo "Configuring Reth pruning to include RocketPool, SSV and StakeWise contracts" - cat <> /var/lib/reth/reth.toml - -[prune.segments.receipts_log_filter.0x4242424242424242424242424242424242424242] -before = 0 - -[prune.segments.receipts_log_filter.0x38A4794cCEd47d3baf7370CcC43B560D3a1beEFA] -before = 0 - -[prune.segments.receipts_log_filter.0x9D210F9169bc6Cf49152F21A57A446bCcaA87b33] -before = 0 - -[prune.segments.receipts_log_filter.0xB580799Bf7d62721D1a523f0FDF2f5Ed7BA4e259] -before = 0 -EOF - ;; - esac fi fi From 71fc63409f11000dac5db7842485acf5ec239a5e Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 5 Oct 2024 00:47:46 -0400 Subject: [PATCH 47/48] Lodestar source build with 22-slim (#1952) --- lodestar/Dockerfile.source | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lodestar/Dockerfile.source b/lodestar/Dockerfile.source index 0a9fcf2b..401eeadd 100644 --- a/lodestar/Dockerfile.source +++ b/lodestar/Dockerfile.source @@ -1,4 +1,4 @@ -FROM node:22.4-slim AS builder +FROM node:22-slim AS builder # Here only to avoid build-time errors ARG DOCKER_TAG @@ -16,7 +16,7 @@ RUN bash -c "cd .. && rm -rf app && git clone ${SRC_REPO} app && cd app && git c && if [[ ${BUILD_TARGET} =~ pr-.+ ]]; then git fetch origin pull/$(echo ${BUILD_TARGET} | cut -d '-' -f 2)/head:ls-pr; git checkout ls-pr; else git checkout ${BUILD_TARGET}; fi \ && yarn install --non-interactive --frozen-lockfile && yarn build" -FROM node:22.4-slim +FROM node:22-slim RUN apt-get update && apt-get install -y --no-install-recommends ca-certificates tzdata bash gosu git wget \ && apt-get clean && rm -rf /var/lib/apt/lists/* From d1f4fa2132f5d0c34dd8d163a9538d049cf832d0 Mon Sep 17 00:00:00 2001 From: yorickdowne <71337066+yorickdowne@users.noreply.github.com> Date: Sat, 5 Oct 2024 00:49:13 -0400 Subject: [PATCH 48/48] Check for v6 and enable if it works (#1948) --- ethd | 48 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 47 insertions(+), 1 deletion(-) diff --git a/ethd b/ethd index cd5a496d..9905a7ce 100755 --- a/ethd +++ b/ethd @@ -996,6 +996,37 @@ __lookup_cf_zone() { # Migrates traefik-cf setup to use Zone ID } +__enable_v6() { + if [ "${__docker_major_version}" -lt 27 ]; then + return + fi + + __var="IPV6" + IPV6=$(sed -n -e "s/^${__var}=\(.*\)/\1/p" "${__env_file}" || true) + if [ "${IPV6}" = "true" ]; then + return + fi + + echo "Testing IPv6 host connectivity" + if ! ping -c1 2001:4860:4860::8888 >/dev/null; then + return + fi + + echo "Testing IPv6 Docker connectivity" + __dodocker network create --ipv6 ip6net_ethd_test + __v6_works=$(__dodocker run --rm --network ip6net_ethd_test busybox sh -c \ + "if ping -c1 -6 2001:4860:4860::8888 >/dev/null; then echo true; else echo false; fi") + __dodocker network rm ip6net_ethd_test + + if [ "${__v6_works}" = "true" ]; then + echo "Enabling IPv4/6 dual-stack for your Eth Docker setup" + IPV6="true" + __set_value_in_env + __enabled_v6=1 + fi +} + + __env_migrate() { if [ ! -f "${__env_file}" ]; then return 0 @@ -1055,6 +1086,7 @@ __env_migrate() { else echo "Migrating ${__env_file} to version ${__target_ver}" fi + ${__as_owner} cp "${__env_file}" "${__env_file}".source __during_migrate=1 __migrated=1 @@ -1076,6 +1108,12 @@ __env_migrate() { if [ "${__var}" = "COMPOSE_FILE" ]; then __migrate_compose_file fi + if [[ "${__source_ver}" -lt "17" && "${__var}" = "IPV6" ]]; then # One-time attempt; remove after Pectra + __enable_v6 + if [ "${__enabled_v6}" -eq 1 ]; then + __value="true" + fi + fi if [ "${__var}" = "CL_QUIC_PORT" ]; then __cl_port=$(sed -n -e "s/^CL_P2P_PORT=\(.*\)/\1/p" "${__env_file}.source" || true) if [ -n "${__cl_port}" ] && [ "${__cl_port}" = "${__value}" ]; then @@ -1245,6 +1283,7 @@ __pull_and_build() { # shellcheck disable=SC2120 update() { __during_update=1 + __enabled_v6=0 # Remove after Pectra if [[ $(${__as_owner} git status --porcelain) ]]; then __dirty=1 @@ -1378,7 +1417,12 @@ reset to defaults." fi echo if [ -z "${GITEXITCODE+x}" ] || [ "${GITEXITCODE}" -eq 0 ]; then - echo "An \"$__me up\" command will start using the new images and configuration." + if [ "${__enabled_v6}" -eq 0 ]; then # Remove after Pectra + echo "An \"$__me up\" command will start using the new images and configuration." + else + echo "IPv4/6 dual-stack support has been enabled." + echo "An \"$__me restart\" command will start using the new images and configuration." + fi else echo "WARNING" echo @@ -3696,6 +3740,8 @@ config() { __set_value_in_env fi + __enable_v6 + ${__as_owner} rm .env.original __pull_and_build