diff --git a/.env b/.env index 9f6393cee7..5a5fa4bd3e 100644 --- a/.env +++ b/.env @@ -43,6 +43,10 @@ PG_APPLICATION_NAME=stacks-blockchain-api # Limit to how many concurrent connections can be created, defaults to 10 # PG_CONNECTION_POOL_MAX=10 +# Insert concurrency when processing new blocks +# If your PostgreSQL is operating on SSD and has multiple CPU cores, consider raising this value, for instance, to 8 or 16. +# STACKS_BLOCK_DATA_INSERT_CONCURRENCY=4 + # If specified, controls the Stacks Blockchain API mode. The possible values are: # * `readonly`: Runs the API endpoints without an Event Server that listens to events from a node and # writes them to the local database. The API will only read data from the PG database diff --git a/.nvmrc b/.nvmrc index 3f430af82b..9a2a0e219c 100644 --- a/.nvmrc +++ b/.nvmrc @@ -1 +1 @@ -v18 +v20 diff --git a/CHANGELOG.md b/CHANGELOG.md index 4c587dd712..95483cd914 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,36 @@ +## [7.8.0-beta.4](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.0-beta.3...v7.8.0-beta.4) (2024-01-16) + + +### Features + +* upgrade to node 20, use bookworm-slim image ([#1832](https://github.com/hirosystems/stacks-blockchain-api/issues/1832)) ([0a42109](https://github.com/hirosystems/stacks-blockchain-api/commit/0a42109242ab5804004e01338f236f61ef07651b)) + +## [7.8.0-beta.3](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.0-beta.2...v7.8.0-beta.3) (2024-01-12) + + +### Bug Fixes + +* change all HASH indexes to BTREE to optimize writes ([#1825](https://github.com/hirosystems/stacks-blockchain-api/issues/1825)) ([234936b](https://github.com/hirosystems/stacks-blockchain-api/commit/234936b430640fb7108e6cb57bdb21d1085a65b2)) + +## [7.8.0-beta.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.8.0-beta.1...v7.8.0-beta.2) (2024-01-12) + + +### Bug Fixes + +* optimize re-org queries and indexes ([#1821](https://github.com/hirosystems/stacks-blockchain-api/issues/1821)) ([5505d35](https://github.com/hirosystems/stacks-blockchain-api/commit/5505d354ecae6e52c751b3b634752fd56d24642f)) + +## [7.8.0-beta.1](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.7.1...v7.8.0-beta.1) (2024-01-11) + + +### Features + +* run inserts in batch and in parallel when processing new block ([#1818](https://github.com/hirosystems/stacks-blockchain-api/issues/1818)) ([86dfdb5](https://github.com/hirosystems/stacks-blockchain-api/commit/86dfdb5d536fee8d7490ca5213f7005a8800f9fa)) + +### Bug Fixes + +* log block event counts after processing ([#1820](https://github.com/hirosystems/stacks-blockchain-api/issues/1820)) ([9c39743](https://github.com/hirosystems/stacks-blockchain-api/commit/9c397439e6eb2830186cda90a213b3ab3d5a4301)), closes [#1819](https://github.com/hirosystems/stacks-blockchain-api/issues/1819) [#1819](https://github.com/hirosystems/stacks-blockchain-api/issues/1819) + + ## [7.7.2](https://github.com/hirosystems/stacks-blockchain-api/compare/v7.7.1...v7.7.2) (2024-01-16) diff --git a/Dockerfile b/Dockerfile index c9ec7724ac..672eae2fa2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,11 @@ -FROM node:18-bullseye +FROM node:20-bookworm-slim WORKDIR /app COPY . . COPY --from=qldrsc/duckdb /usr/local/bin/duckdb /bin/duckdb RUN apt-get update && \ - apt-get install -y git openjdk-11-jre && \ + apt-get install -y git openjdk-17-jre && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* RUN echo "GIT_TAG=$(git tag --points-at HEAD)" >> .env diff --git a/docker/rosetta.Dockerfile b/docker/rosetta.Dockerfile index 24fa3db5f9..09636a41ff 100644 --- a/docker/rosetta.Dockerfile +++ b/docker/rosetta.Dockerfile @@ -12,7 +12,7 @@ ARG ARCHIVE_VERSION=latest ####################################################################### ## Build the stacks-blockchain-api -FROM node:18-buster as stacks-blockchain-api-build +FROM node:20-bookworm-slim as stacks-blockchain-api-build ARG STACKS_API_VERSION ENV STACKS_API_REPO=hirosystems/stacks-blockchain-api ENV STACKS_API_VERSION=${STACKS_API_VERSION} @@ -20,9 +20,10 @@ ENV DEBIAN_FRONTEND noninteractive WORKDIR /app RUN apt-get update -y \ && apt-get install -y \ + git \ curl \ jq \ - openjdk-11-jre-headless \ + openjdk-17-jre-headless \ cmake \ && git clone -b ${STACKS_API_VERSION} https://github.com/${STACKS_API_REPO} . \ && echo "GIT_TAG=$(git tag --points-at HEAD)" >> .env \ @@ -102,7 +103,7 @@ RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen RUN curl -sL https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" > /etc/apt/sources.list.d/pgsql.list \ - && curl -sL https://deb.nodesource.com/setup_16.x | bash - + && curl -sL https://deb.nodesource.com/setup_20.x | bash - RUN apt-get update \ && apt-get install -y \ postgresql-${PG_VERSION} \ diff --git a/docker/standalone-regtest.Dockerfile b/docker/standalone-regtest.Dockerfile index 33532c5d4e..17c7c6ae94 100644 --- a/docker/standalone-regtest.Dockerfile +++ b/docker/standalone-regtest.Dockerfile @@ -1,6 +1,6 @@ # syntax=docker/dockerfile:1 -FROM node:18-bullseye as api-builder +FROM node:20-bookworm-slim as api-builder ARG API_GIT_COMMIT ARG STACKS_API_VERSION diff --git a/docs/api/smart-contracts/get-smart-contracts-status.example.json b/docs/api/smart-contracts/get-smart-contracts-status.example.json new file mode 100644 index 0000000000..15f66ee27c --- /dev/null +++ b/docs/api/smart-contracts/get-smart-contracts-status.example.json @@ -0,0 +1,31 @@ +{ + "SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.swap-helper-bridged-v1-1": { + "found": true, + "result": { + "contract_id": "SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.swap-helper-bridged-v1-1", + "status": "success", + "tx_id": "0x8542d28e427256ea3c29dcd8793222891999ceff4ef1bb062e2f21cb6def6884", + "block_height": 111021 + } + }, + "SP1JTCR202ECC6333N7ZXD7MK7E3ZTEEE1MJ73C60.name-registrar": { + "found": true, + "result": { + "contract_id": "SP1JTCR202ECC6333N7ZXD7MK7E3ZTEEE1MJ73C60.name-registrar", + "status": "success", + "tx_id": "0x6e1114cce8c6f2e9c8130f9acd75d67bb667ae584f882acdd2db6dd74e6cbe5e", + "block_height": 113010 + } + }, + "SP4SZE494VC2YC5JYG7AYFQ44F5Q4PYV7DVMDPBG.stacking-dao-core-v1": { + "found": true, + "result": { + "contract_id": "SP4SZE494VC2YC5JYG7AYFQ44F5Q4PYV7DVMDPBG.stacking-dao-core-v1", + "status": "pending", + "tx_id": "0x10bdcf10ffee72994f493ac36760f4e95a76c8471370182fd4705c2153dc173d" + } + }, + "SP4SZE494VC2YC5JYG7AYFQ44F5Q4PYV7DVMDPBG.stacking-dao-core": { + "found": false + } +} diff --git a/docs/api/smart-contracts/get-smart-contracts-status.schema.json b/docs/api/smart-contracts/get-smart-contracts-status.schema.json new file mode 100644 index 0000000000..2385c6afa0 --- /dev/null +++ b/docs/api/smart-contracts/get-smart-contracts-status.schema.json @@ -0,0 +1,15 @@ +{ + "description": "GET request that returns the deployment status of multiple smart contracts", + "title": "SmartContractsStatusResponse", + "type": "object", + "additionalProperties": { + "anyOf": [ + { + "$ref": "./smart-contract-found.schema.json" + }, + { + "$ref": "./smart-contract-not-found.schema.json" + } + ] + } +} diff --git a/docs/api/smart-contracts/smart-contract-found.schema.json b/docs/api/smart-contracts/smart-contract-found.schema.json new file mode 100644 index 0000000000..6d781711df --- /dev/null +++ b/docs/api/smart-contracts/smart-contract-found.schema.json @@ -0,0 +1,15 @@ +{ + "type": "object", + "title": "SmartContractFound", + "additionalProperties": false, + "required": ["found", "result"], + "properties": { + "found": { + "type": "boolean", + "enum": [true] + }, + "result": { + "$ref": "../../entities/smart-contracts/smart-contract-status.schema.json" + } + } +} diff --git a/docs/api/smart-contracts/smart-contract-not-found.schema.json b/docs/api/smart-contracts/smart-contract-not-found.schema.json new file mode 100644 index 0000000000..4a4d63514c --- /dev/null +++ b/docs/api/smart-contracts/smart-contract-not-found.schema.json @@ -0,0 +1,12 @@ +{ + "type": "object", + "title": "SmartContractNotFound", + "additionalProperties": false, + "properties": { + "found": { + "type": "boolean", + "enum": [false] + } + }, + "required": ["found"] +} diff --git a/docs/entities/smart-contracts/smart-contract-status.example.json b/docs/entities/smart-contracts/smart-contract-status.example.json new file mode 100644 index 0000000000..ac5cb17bbb --- /dev/null +++ b/docs/entities/smart-contracts/smart-contract-status.example.json @@ -0,0 +1,6 @@ +{ + "contract_id": "SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.swap-helper-bridged-v1-1", + "status": "success", + "tx_id": "0x8542d28e427256ea3c29dcd8793222891999ceff4ef1bb062e2f21cb6def6884", + "block_height": 111021 +} diff --git a/docs/entities/smart-contracts/smart-contract-status.schema.json b/docs/entities/smart-contracts/smart-contract-status.schema.json new file mode 100644 index 0000000000..116cf8c65b --- /dev/null +++ b/docs/entities/smart-contracts/smart-contract-status.schema.json @@ -0,0 +1,29 @@ +{ + "title": "SmartContractStatus", + "description": "Deployment status of a smart contract", + "type": "object", + "additionalProperties": false, + "required": [ + "status", + "tx_id", + "contract_id" + ], + "properties": { + "status": { + "type": "string", + "description": "Smart contract deployment transaction status" + }, + "tx_id": { + "type": "string", + "description": "Deployment transaction ID" + }, + "contract_id": { + "type": "string", + "description": "Smart contract ID" + }, + "block_height": { + "type": "integer", + "description": "Height of the transaction confirmation block" + } + } +} diff --git a/docs/generated.d.ts b/docs/generated.d.ts index f9952d5cfa..095800439f 100644 --- a/docs/generated.d.ts +++ b/docs/generated.d.ts @@ -93,6 +93,9 @@ export type SchemaMergeRootStub = | SearchSuccessResult | TxSearchResult | SearchResult + | SmartContractsStatusResponse + | SmartContractFound + | SmartContractNotFound | PoolDelegationsResponse | { [k: string]: unknown | undefined; @@ -177,6 +180,7 @@ export type SchemaMergeRootStub = | RosettaSyncStatus | TransactionIdentifier | RosettaTransaction + | SmartContractStatus | PoolDelegation | NonFungibleTokenHistoryEventWithTxId | NonFungibleTokenHistoryEventWithTxMetadata @@ -3059,6 +3063,40 @@ export interface TxSearchResult { metadata?: Transaction; }; } +/** + * GET request that returns the deployment status of multiple smart contracts + */ +export interface SmartContractsStatusResponse { + [k: string]: (SmartContractFound | SmartContractNotFound) | undefined; +} +export interface SmartContractFound { + found: true; + result: SmartContractStatus; +} +/** + * Deployment status of a smart contract + */ +export interface SmartContractStatus { + /** + * Smart contract deployment transaction status + */ + status: string; + /** + * Deployment transaction ID + */ + tx_id: string; + /** + * Smart contract ID + */ + contract_id: string; + /** + * Height of the transaction confirmation block + */ + block_height?: number; +} +export interface SmartContractNotFound { + found: false; +} /** * GET request that returns stacking pool member details for a given pool (delegator) principal */ diff --git a/docs/openapi.yaml b/docs/openapi.yaml index afdb5ad6a8..67c6917c63 100644 --- a/docs/openapi.yaml +++ b/docs/openapi.yaml @@ -834,6 +834,36 @@ paths: example: $ref: ./api/transaction/get-transactions.example.json + /extended/v2/smart-contracts/status: + get: + summary: Get smart contracts status + description: | + Retrieves the deployment status of multiple smart contracts. + tags: + - Smart Contracts + operationId: get_smart_contracts_status + parameters: + - name: contract_id + in: query + description: contract ids to fetch status for + required: true + style: form + explode: true + schema: + type: array + example: "SPQZF23W7SEYBFG5JQ496NMY0G7379SRYEDREMSV.Candy" + items: + type: string + responses: + 200: + description: List of smart contract status + content: + application/json: + schema: + $ref: ./api/smart-contracts/get-smart-contracts-status.schema.json + example: + $ref: ./api/smart-contracts/get-smart-contracts-status.example.json + /extended/v1/block: get: summary: Get recent blocks diff --git a/migrations/1705013096459_update-re-org-indexes.js b/migrations/1705013096459_update-re-org-indexes.js new file mode 100644 index 0000000000..64d8ec649d --- /dev/null +++ b/migrations/1705013096459_update-re-org-indexes.js @@ -0,0 +1,91 @@ +/* eslint-disable camelcase */ + +exports.shorthands = undefined; + +exports.up = pgm => { + pgm.dropIndex('txs', 'index_block_hash'); + pgm.createIndex('txs', ['index_block_hash', 'canonical']); + + pgm.dropIndex('miner_rewards', 'index_block_hash'); + pgm.createIndex('miner_rewards', ['index_block_hash', 'canonical']); + + pgm.dropIndex('stx_lock_events', 'index_block_hash'); + pgm.createIndex('stx_lock_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('stx_events', 'index_block_hash'); + pgm.createIndex('stx_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('ft_events', 'index_block_hash'); + pgm.createIndex('ft_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('nft_events', 'index_block_hash'); + pgm.createIndex('nft_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('pox2_events', 'index_block_hash'); + pgm.createIndex('pox2_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('pox3_events', 'index_block_hash'); + pgm.createIndex('pox3_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('pox4_events', 'index_block_hash'); + pgm.createIndex('pox4_events', ['index_block_hash', 'canonical']); + + pgm.dropIndex('contract_logs', 'index_block_hash'); + pgm.createIndex('contract_logs', ['index_block_hash', 'canonical']); + + pgm.dropIndex('smart_contracts', 'index_block_hash'); + pgm.createIndex('smart_contracts', ['index_block_hash', 'canonical']); + + pgm.dropIndex('names', 'index_block_hash'); + pgm.createIndex('names', ['index_block_hash', 'canonical']); + + pgm.dropIndex('namespaces', 'index_block_hash'); + pgm.createIndex('namespaces', ['index_block_hash', 'canonical']); + + pgm.dropIndex('subdomains', 'index_block_hash'); + pgm.createIndex('subdomains', ['index_block_hash', 'canonical']); +}; + +exports.down = pgm => { + pgm.dropIndex('txs', ['index_block_hash', 'canonical']); + pgm.createIndex('txs', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('miner_rewards', ['index_block_hash', 'canonical']); + pgm.createIndex('miner_rewards', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('stx_lock_events', ['index_block_hash', 'canonical']); + pgm.createIndex('stx_lock_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('stx_events', ['index_block_hash', 'canonical']); + pgm.createIndex('stx_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('ft_events', ['index_block_hash', 'canonical']); + pgm.createIndex('ft_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('nft_events', ['index_block_hash', 'canonical']); + pgm.createIndex('nft_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('pox2_events', ['index_block_hash', 'canonical']); + pgm.createIndex('pox2_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('pox3_events', ['index_block_hash', 'canonical']); + pgm.createIndex('pox3_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('pox4_events', ['index_block_hash', 'canonical']); + pgm.createIndex('pox4_events', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('contract_logs', ['index_block_hash', 'canonical']); + pgm.createIndex('contract_logs', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('smart_contracts', ['index_block_hash', 'canonical']); + pgm.createIndex('smart_contracts', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('names', ['index_block_hash', 'canonical']); + pgm.createIndex('names', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('namespaces', ['index_block_hash', 'canonical']); + pgm.createIndex('namespaces', 'index_block_hash', { method: 'hash' }); + + pgm.dropIndex('subdomains', ['index_block_hash', 'canonical']); + pgm.createIndex('subdomains', 'index_block_hash', { method: 'hash' }); +}; diff --git a/migrations/1705077567281_remove-hash-indexes.js b/migrations/1705077567281_remove-hash-indexes.js new file mode 100644 index 0000000000..2b07d6954c --- /dev/null +++ b/migrations/1705077567281_remove-hash-indexes.js @@ -0,0 +1,108 @@ +/* eslint-disable camelcase */ + +exports.shorthands = undefined; + +function replaceIndex(pgm, table, column, method = 'btree') { + pgm.dropIndex(table, column); + pgm.createIndex(table, column, { method: method }); +} + +exports.up = pgm => { + pgm.dropIndex('txs', [{ name: 'tx_index', sort: 'DESC' }], { ifExists: true }); + pgm.dropIndex('txs', 'tx_id', { ifExists: true }); + replaceIndex(pgm, 'txs', 'token_transfer_recipient_address'); + replaceIndex(pgm, 'txs', 'sponsor_address'); + replaceIndex(pgm, 'txs', 'smart_contract_contract_id'); + replaceIndex(pgm, 'txs', 'sender_address'); + replaceIndex(pgm, 'txs', 'microblock_hash'); + replaceIndex(pgm, 'txs', 'contract_call_contract_id'); + + replaceIndex(pgm, 'stx_events', 'tx_id'); + replaceIndex(pgm, 'stx_events', 'sender'); + replaceIndex(pgm, 'stx_events', 'recipient'); + replaceIndex(pgm, 'stx_events', 'microblock_hash'); + + replaceIndex(pgm, 'miner_rewards', 'recipient'); + + pgm.dropIndex('stx_lock_events', 'block_height', { ifExists: true }); + replaceIndex(pgm, 'stx_lock_events', 'tx_id'); + replaceIndex(pgm, 'stx_lock_events', 'microblock_hash'); + replaceIndex(pgm, 'stx_lock_events', 'locked_address'); + + replaceIndex(pgm, 'ft_events', 'tx_id'); + replaceIndex(pgm, 'ft_events', 'sender'); + replaceIndex(pgm, 'ft_events', 'recipient'); + replaceIndex(pgm, 'ft_events', 'microblock_hash'); + + replaceIndex(pgm, 'nft_events', 'tx_id'); + replaceIndex(pgm, 'nft_events', 'sender'); + replaceIndex(pgm, 'nft_events', 'recipient'); + replaceIndex(pgm, 'nft_events', 'microblock_hash'); + replaceIndex(pgm, 'nft_events', 'asset_identifier'); + + replaceIndex(pgm, 'contract_logs', 'tx_id'); + replaceIndex(pgm, 'contract_logs', 'microblock_hash'); + + replaceIndex(pgm, 'smart_contracts', 'contract_id'); + replaceIndex(pgm, 'smart_contracts', 'microblock_hash'); + + pgm.dropIndex('principal_stx_txs', 'principal', { ifExists: true }); + replaceIndex(pgm, 'principal_stx_txs', 'tx_id'); + + pgm.dropIndex('mempool_txs', 'tx_id', { ifExists: true }); + replaceIndex(pgm, 'mempool_txs', 'token_transfer_recipient_address'); + replaceIndex(pgm, 'mempool_txs', 'sponsor_address'); + replaceIndex(pgm, 'mempool_txs', 'smart_contract_contract_id'); + replaceIndex(pgm, 'mempool_txs', 'sender_address'); + replaceIndex(pgm, 'mempool_txs', 'contract_call_contract_id'); +}; + +exports.down = pgm => { + pgm.createIndex('txs', [{ name: 'tx_index', sort: 'DESC' }]); + pgm.createIndex('txs', 'tx_id', { method: 'hash' }); + replaceIndex(pgm, 'txs', 'token_transfer_recipient_address', 'hash'); + replaceIndex(pgm, 'txs', 'sponsor_address', 'hash'); + replaceIndex(pgm, 'txs', 'smart_contract_contract_id', 'hash'); + replaceIndex(pgm, 'txs', 'sender_address', 'hash'); + replaceIndex(pgm, 'txs', 'microblock_hash', 'hash'); + replaceIndex(pgm, 'txs', 'contract_call_contract_id', 'hash'); + + replaceIndex(pgm, 'stx_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'stx_events', 'sender', 'hash'); + replaceIndex(pgm, 'stx_events', 'recipient', 'hash'); + replaceIndex(pgm, 'stx_events', 'microblock_hash', 'hash'); + + replaceIndex(pgm, 'miner_rewards', 'recipient', 'hash'); + + pgm.createIndex('stx_lock_events', [{ name: 'block_height', sort: 'DESC' }]); + replaceIndex(pgm, 'stx_lock_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'stx_lock_events', 'microblock_hash', 'hash'); + replaceIndex(pgm, 'stx_lock_events', 'locked_address', 'hash'); + + replaceIndex(pgm, 'ft_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'ft_events', 'sender', 'hash'); + replaceIndex(pgm, 'ft_events', 'recipient', 'hash'); + replaceIndex(pgm, 'ft_events', 'microblock_hash', 'hash'); + + replaceIndex(pgm, 'nft_events', 'tx_id', 'hash'); + replaceIndex(pgm, 'nft_events', 'sender', 'hash'); + replaceIndex(pgm, 'nft_events', 'recipient', 'hash'); + replaceIndex(pgm, 'nft_events', 'microblock_hash', 'hash'); + replaceIndex(pgm, 'nft_events', 'asset_identifier', 'hash'); + + replaceIndex(pgm, 'contract_logs', 'tx_id', 'hash'); + replaceIndex(pgm, 'contract_logs', 'microblock_hash', 'hash'); + + replaceIndex(pgm, 'smart_contracts', 'contract_id', 'hash'); + replaceIndex(pgm, 'smart_contracts', 'microblock_hash', 'hash'); + + pgm.createIndex('principal_stx_txs', 'principal', { method: 'hash' }); + replaceIndex(pgm, 'principal_stx_txs', 'tx_id', 'hash'); + + pgm.createIndex('mempool_txs', 'tx_id', { method: 'hash' }); + replaceIndex(pgm, 'mempool_txs', 'token_transfer_recipient_address', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'sponsor_address', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'smart_contract_contract_id', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'sender_address', 'hash'); + replaceIndex(pgm, 'mempool_txs', 'contract_call_contract_id', 'hash'); +}; diff --git a/package-lock.json b/package-lock.json index 94e15a732c..db14e9e91d 100644 --- a/package-lock.json +++ b/package-lock.json @@ -87,7 +87,7 @@ "@types/express": "4.17.13", "@types/is-ci": "3.0.0", "@types/jest": "29.5.6", - "@types/node": "18.13.0", + "@types/node": "20.11.4", "@types/node-fetch": "2.5.12", "@types/pg": "7.14.11", "@types/pg-copy-streams": "1.2.1", @@ -121,7 +121,7 @@ "why-is-node-running": "2.2.0" }, "engines": { - "node": ">=18" + "node": ">=20" }, "optionalDependencies": { "bufferutil": "4.0.5", @@ -2550,6 +2550,14 @@ "@types/node": "^18.0.4" } }, + "node_modules/@stacks/common/node_modules/@types/node": { + "version": "18.19.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.7.tgz", + "integrity": "sha512-IGRJfoNX10N/PfrReRZ1br/7SQ+2vF/tK3KXNwzXz82D32z5dMQEoOlFew18nLSN+vMNcLY4GrKfzwi/yWI8/w==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, "node_modules/@stacks/encryption": { "version": "6.9.0", "resolved": "https://registry.npmjs.org/@stacks/encryption/-/encryption-6.9.0.tgz", @@ -2566,6 +2574,14 @@ "varuint-bitcoin": "^1.1.2" } }, + "node_modules/@stacks/encryption/node_modules/@types/node": { + "version": "18.19.7", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.7.tgz", + "integrity": "sha512-IGRJfoNX10N/PfrReRZ1br/7SQ+2vF/tK3KXNwzXz82D32z5dMQEoOlFew18nLSN+vMNcLY4GrKfzwi/yWI8/w==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, "node_modules/@stacks/eslint-config": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/@stacks/eslint-config/-/eslint-config-1.2.0.tgz", @@ -3221,9 +3237,12 @@ "dev": true }, "node_modules/@types/node": { - "version": "18.13.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.13.0.tgz", - "integrity": "sha512-gC3TazRzGoOnoKAhUx+Q0t8S9Tzs74z7m0ipwGpSqQrleP14hKxP4/JUeEQcD3W1/aIpnWl8pHowI7WokuZpXg==" + "version": "20.11.4", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.11.4.tgz", + "integrity": "sha512-6I0fMH8Aoy2lOejL3s4LhyIYX34DPwY8bl5xlNjBvUEk8OHrcuzsFt+Ied4LvJihbtXPM+8zUqdydfIti86v9g==", + "dependencies": { + "undici-types": "~5.26.4" + } }, "node_modules/@types/node-fetch": { "version": "2.5.12", @@ -13669,6 +13688,11 @@ "node": ">=14.0" } }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==" + }, "node_modules/unique-filename": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/unique-filename/-/unique-filename-3.0.0.tgz", diff --git a/package.json b/package.json index 3a3e0cf6a9..2a31983303 100644 --- a/package.json +++ b/package.json @@ -70,7 +70,7 @@ "homepage": "https://github.com/hirosystems/stacks-blockchain-api#readme", "prettier": "@stacks/prettier-config", "engines": { - "node": ">=18" + "node": ">=20" }, "engineStrict": true, "commitlint": { @@ -162,7 +162,7 @@ "@types/express": "4.17.13", "@types/is-ci": "3.0.0", "@types/jest": "29.5.6", - "@types/node": "18.13.0", + "@types/node": "20.11.4", "@types/node-fetch": "2.5.12", "@types/pg": "7.14.11", "@types/pg-copy-streams": "1.2.1", diff --git a/src/api/controllers/db-controller.ts b/src/api/controllers/db-controller.ts index fe9399b09d..b2132286f7 100644 --- a/src/api/controllers/db-controller.ts +++ b/src/api/controllers/db-controller.ts @@ -149,7 +149,9 @@ export function getTxTypeId(typeString: Transaction['tx_type']): DbTxTypeId[] { } } -function getTxStatusString(txStatus: DbTxStatus): TransactionStatus | MempoolTransactionStatus { +export function getTxStatusString( + txStatus: DbTxStatus +): TransactionStatus | MempoolTransactionStatus { switch (txStatus) { case DbTxStatus.Pending: return 'pending'; diff --git a/src/api/init.ts b/src/api/init.ts index 2376235500..c4515c0ae0 100644 --- a/src/api/init.ts +++ b/src/api/init.ts @@ -53,6 +53,7 @@ import { createV2BlocksRouter } from './routes/v2/blocks'; import { getReqQuery } from './query-helpers'; import { createV2BurnBlocksRouter } from './routes/v2/burn-blocks'; import { createMempoolRouter } from './routes/v2/mempool'; +import { createV2SmartContractsRouter } from './routes/v2/smart-contracts'; export interface ApiServer { expressApp: express.Express; @@ -234,6 +235,7 @@ export async function startApiServer(opts: { const v2 = express.Router(); v2.use('/blocks', createV2BlocksRouter(datastore)); v2.use('/burn-blocks', createV2BurnBlocksRouter(datastore)); + v2.use('/smart-contracts', createV2SmartContractsRouter(datastore)); v2.use('/mempool', createMempoolRouter(datastore)); return v2; })() diff --git a/src/api/routes/v2/helpers.ts b/src/api/routes/v2/helpers.ts index 41d0ce2a2e..23ff0d18d3 100644 --- a/src/api/routes/v2/helpers.ts +++ b/src/api/routes/v2/helpers.ts @@ -1,6 +1,8 @@ -import { BurnBlock, NakamotoBlock } from 'docs/generated'; -import { DbBlock, DbBurnBlock } from '../../../datastore/common'; +import { BurnBlock, NakamotoBlock, SmartContractsStatusResponse } from 'docs/generated'; +import { DbBlock, DbBurnBlock, DbSmartContractStatus } from '../../../datastore/common'; import { unixEpochToIso } from '../../../helpers'; +import { SmartContractStatusParams } from './schemas'; +import { getTxStatusString } from '../../../api/controllers/db-controller'; export function parseDbNakamotoBlock(block: DbBlock): NakamotoBlock { const apiBlock: NakamotoBlock = { @@ -35,3 +37,27 @@ export function parseDbBurnBlock(block: DbBurnBlock): BurnBlock { }; return burnBlock; } + +export function parseDbSmartContractStatusArray( + params: SmartContractStatusParams, + status: DbSmartContractStatus[] +): SmartContractsStatusResponse { + const ids = new Set( + Array.isArray(params.contract_id) ? params.contract_id : [params.contract_id] + ); + const response: SmartContractsStatusResponse = {}; + for (const s of status) { + ids.delete(s.smart_contract_contract_id); + response[s.smart_contract_contract_id] = { + found: true, + result: { + contract_id: s.smart_contract_contract_id, + block_height: s.block_height, + status: getTxStatusString(s.status), + tx_id: s.tx_id, + }, + }; + } + for (const missingId of ids) response[missingId] = { found: false }; + return response; +} diff --git a/src/api/routes/v2/schemas.ts b/src/api/routes/v2/schemas.ts index 5be6a3b7e5..689c9bf7e7 100644 --- a/src/api/routes/v2/schemas.ts +++ b/src/api/routes/v2/schemas.ts @@ -125,3 +125,15 @@ const BlockParamsSchema = Type.Object( ); export type BlockParams = Static; export const CompiledBlockParams = ajv.compile(BlockParamsSchema); + +const SmartContractPrincipal = Type.RegExp( + /^[0123456789ABCDEFGHJKMNPQRSTVWXYZ]{28,41}\.[a-zA-Z]([a-zA-Z0-9]|[-_]){0,39}$/ +); +const SmartContractStatusParamsSchema = Type.Object( + { + contract_id: Type.Union([Type.Array(SmartContractPrincipal), SmartContractPrincipal]), + }, + { additionalProperties: false } +); +export type SmartContractStatusParams = Static; +export const CompiledSmartContractStatusParams = ajv.compile(SmartContractStatusParamsSchema); diff --git a/src/api/routes/v2/smart-contracts.ts b/src/api/routes/v2/smart-contracts.ts new file mode 100644 index 0000000000..81a57e5285 --- /dev/null +++ b/src/api/routes/v2/smart-contracts.ts @@ -0,0 +1,30 @@ +import * as express from 'express'; +import { PgStore } from '../../../datastore/pg-store'; +import { getETagCacheHandler, setETagCacheHeaders } from '../../controllers/cache-controller'; +import { asyncHandler } from '../../async-handler'; +import { + validRequestQuery, + CompiledSmartContractStatusParams, + SmartContractStatusParams, +} from './schemas'; +import { parseDbSmartContractStatusArray } from './helpers'; + +export function createV2SmartContractsRouter(db: PgStore): express.Router { + const router = express.Router(); + const cacheHandler = getETagCacheHandler(db); + + router.get( + '/status', + cacheHandler, + asyncHandler(async (req, res) => { + if (!validRequestQuery(req, res, CompiledSmartContractStatusParams)) return; + const query = req.query as SmartContractStatusParams; + + const result = await db.v2.getSmartContractStatus(query); + setETagCacheHeaders(res); + res.json(parseDbSmartContractStatusArray(query, result)); + }) + ); + + return router; +} diff --git a/src/datastore/common.ts b/src/datastore/common.ts index 452e6005fa..7e6bb5abad 100644 --- a/src/datastore/common.ts +++ b/src/datastore/common.ts @@ -1555,3 +1555,10 @@ export enum IndexesState { Off = 0, On = 1, } + +export interface DbSmartContractStatus { + smart_contract_contract_id: string; + tx_id: string; + status: DbTxStatus; + block_height?: number; +} diff --git a/src/datastore/helpers.ts b/src/datastore/helpers.ts index a5aabbaa1e..fba14fdaa8 100644 --- a/src/datastore/helpers.ts +++ b/src/datastore/helpers.ts @@ -1,4 +1,4 @@ -import { parseEnum, unwrapOptionalProp } from '../helpers'; +import { getUintEnvOrDefault, parseEnum, unwrapOptionalProp } from '../helpers'; import { BlockQueryResult, ContractTxQueryResult, @@ -66,6 +66,7 @@ import { PgStoreEventEmitter } from './pg-store-event-emitter'; import { SyntheticPoxEventName } from '../pox-helpers'; import { logger } from '../logger'; import { PgSqlClient } from '@hirosystems/api-toolkit'; +import PQueue from 'p-queue'; export const TX_COLUMNS = [ 'tx_id', @@ -1335,3 +1336,21 @@ export function newReOrgUpdatedEntities(): ReOrgUpdatedEntities { restoredMempoolTxs: 0, }; } + +/** + * Priority queue for parallel Postgres write query execution. This helps performance because it + * parallelizes the work postgres.js has to do when serializing JS types to PG types. + */ +export class PgWriteQueue { + readonly queue: PQueue; + constructor() { + const concurrency = Math.max(1, getUintEnvOrDefault('STACKS_BLOCK_DATA_INSERT_CONCURRENCY', 4)); + this.queue = new PQueue({ concurrency, autoStart: true }); + } + enqueue(task: Parameters[0]): void { + void this.queue.add(task); + } + done(): Promise { + return this.queue.onIdle(); + } +} diff --git a/src/datastore/pg-store-v2.ts b/src/datastore/pg-store-v2.ts index 53b63f4aca..1e7554594b 100644 --- a/src/datastore/pg-store-v2.ts +++ b/src/datastore/pg-store-v2.ts @@ -6,6 +6,7 @@ import { TransactionLimitParamSchema, BlockParams, BlockPaginationQueryParams, + SmartContractStatusParams, } from '../api/routes/v2/schemas'; import { InvalidRequestError, InvalidRequestErrorType } from '../errors'; import { normalizeHashString } from '../helpers'; @@ -16,6 +17,9 @@ import { DbTx, TxQueryResult, DbBurnBlock, + DbTxTypeId, + DbSmartContractStatus, + DbTxStatus, } from './common'; import { BLOCK_COLUMNS, parseBlockQueryResult, TX_COLUMNS, parseTxQueryResult } from './helpers'; @@ -230,4 +234,39 @@ export class PgStoreV2 extends BasePgStoreModule { if (blockQuery.count > 0) return blockQuery[0]; }); } + + async getSmartContractStatus(args: SmartContractStatusParams): Promise { + return await this.sqlTransaction(async sql => { + const statusArray: DbSmartContractStatus[] = []; + const contractArray = Array.isArray(args.contract_id) ? args.contract_id : [args.contract_id]; + + // Search confirmed txs. + const confirmed = await sql` + SELECT DISTINCT ON (smart_contract_contract_id) smart_contract_contract_id, tx_id, block_height, status + FROM txs + WHERE type_id IN ${sql([DbTxTypeId.SmartContract, DbTxTypeId.VersionedSmartContract])} + AND smart_contract_contract_id IN ${sql(contractArray)} + AND canonical = TRUE + AND microblock_canonical = TRUE + ORDER BY smart_contract_contract_id, block_height DESC, microblock_sequence DESC, tx_index DESC, status + `; + statusArray.push(...confirmed); + if (confirmed.count < contractArray.length) { + // Search mempool txs. + const confirmedIds = confirmed.map(c => c.smart_contract_contract_id); + const remainingIds = contractArray.filter(c => !confirmedIds.includes(c)); + const mempool = await sql` + SELECT DISTINCT ON (smart_contract_contract_id) smart_contract_contract_id, tx_id, status + FROM mempool_txs + WHERE pruned = FALSE + AND type_id IN ${sql([DbTxTypeId.SmartContract, DbTxTypeId.VersionedSmartContract])} + AND smart_contract_contract_id IN ${sql(remainingIds)} + ORDER BY smart_contract_contract_id, nonce + `; + statusArray.push(...mempool); + } + + return statusArray; + }); + } } diff --git a/src/datastore/pg-write-store.ts b/src/datastore/pg-write-store.ts index 6694aea04b..91002f332a 100644 --- a/src/datastore/pg-write-store.ts +++ b/src/datastore/pg-write-store.ts @@ -1,3 +1,4 @@ +import * as assert from 'assert'; import { getOrAdd, I32_MAX, getIbdBlockHeight, getUintEnvOrDefault } from '../helpers'; import { DbBlock, @@ -73,6 +74,7 @@ import { TX_METADATA_TABLES, validateZonefileHash, newReOrgUpdatedEntities, + PgWriteQueue, } from './helpers'; import { PgNotifier } from './pg-notifier'; import { MIGRATIONS_DIR, PgStore } from './pg-store'; @@ -188,11 +190,7 @@ export class PgWriteStore extends PgStore { markBlockUpdateDataAsNonCanonical(data); } else { const txIds = data.txs.map(d => d.tx.tx_id); - const pruneRes = await this.pruneMempoolTxs(sql, txIds); - if (pruneRes.removedTxs.length > 0) - logger.debug( - `Removed ${pruneRes.removedTxs.length} txs from mempool table during new block ingestion` - ); + await this.pruneMempoolTxs(sql, txIds); } setTotalBlockUpdateDataExecutionCost(data); @@ -203,8 +201,8 @@ export class PgWriteStore extends PgStore { return !insertedMicroblockHashes.has(entry.tx.microblock_hash); }); - // When processing an immediately-non-canonical block, do not orphan and possible existing microblocks - // which may be still considered canonical by the canonical block at this height. + // When processing an immediately-non-canonical block, do not orphan and possible existing + // microblocks which may be still considered canonical by the canonical block at this height. if (isCanonical) { const { acceptedMicroblockTxs, orphanedMicroblockTxs } = await this.updateMicroCanonical( sql, @@ -220,7 +218,8 @@ export class PgWriteStore extends PgStore { } ); - // Identify any micro-orphaned txs that also didn't make it into this anchor block, and restore them into the mempool + // Identify any micro-orphaned txs that also didn't make it into this anchor block, and + // restore them into the mempool const orphanedAndMissingTxs = orphanedMicroblockTxs.filter( tx => !data.txs.find(r => tx.tx_id === r.tx.tx_id) ); @@ -232,7 +231,8 @@ export class PgWriteStore extends PgStore { logger.info(`Restored micro-orphaned tx to mempool ${txId}`); }); - // Clear accepted microblock txs from the anchor-block update data to avoid duplicate inserts. + // Clear accepted microblock txs from the anchor-block update data to avoid duplicate + // inserts. batchedTxData = batchedTxData.filter(entry => { const matchingTx = acceptedMicroblockTxs.find(tx => tx.tx_id === entry.tx.tx_id); return !matchingTx; @@ -251,27 +251,35 @@ export class PgWriteStore extends PgStore { } } if ((await this.updateBlock(sql, data.block)) !== 0) { - await this.updateMinerRewards(sql, data.minerRewards); - for (const entry of batchedTxData) { - await this.updateTx(sql, entry.tx); - await this.updateStxEvents(sql, entry.tx, entry.stxEvents); - await this.updatePrincipalStxTxs(sql, entry.tx, entry.stxEvents); - await this.updateSmartContractEvents(sql, entry.tx, entry.contractLogEvents); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox2_events', entry.pox2Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox3_events', entry.pox3Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox4_events', entry.pox4Events); - await this.updateStxLockEvents(sql, entry.tx, entry.stxLockEvents); - await this.updateFtEvents(sql, entry.tx, entry.ftEvents); - await this.updateNftEvents(sql, entry.tx, entry.nftEvents); - await this.updateSmartContracts(sql, entry.tx, entry.smartContracts); - await this.updateNamespaces(sql, entry.tx, entry.namespaces); - await this.updateNames(sql, entry.tx, entry.names); - } - const mempoolGarbageResults = await this.deleteGarbageCollectedMempoolTxs(sql); - if (mempoolGarbageResults.deletedTxs.length > 0) { - logger.debug(`Garbage collected ${mempoolGarbageResults.deletedTxs.length} mempool txs`); + const q = new PgWriteQueue(); + q.enqueue(() => this.updateMinerRewards(sql, data.minerRewards)); + if (batchedTxData.length > 0) { + q.enqueue(() => + this.updateTx( + sql, + batchedTxData.map(b => b.tx) + ) + ); + q.enqueue(() => this.updateStxEvents(sql, batchedTxData)); + q.enqueue(() => this.updatePrincipalStxTxs(sql, batchedTxData)); + q.enqueue(() => this.updateSmartContractEvents(sql, batchedTxData)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox2_events', batchedTxData)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox3_events', batchedTxData)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox4_events', batchedTxData)); + q.enqueue(() => this.updateStxLockEvents(sql, batchedTxData)); + q.enqueue(() => this.updateFtEvents(sql, batchedTxData)); + for (const entry of batchedTxData) { + q.enqueue(() => this.updateNftEvents(sql, entry.tx, entry.nftEvents)); + q.enqueue(() => this.updateSmartContracts(sql, entry.tx, entry.smartContracts)); + q.enqueue(() => this.updateNamespaces(sql, entry.tx, entry.namespaces)); + q.enqueue(() => this.updateNames(sql, entry.tx, entry.names)); + } } - garbageCollectedMempoolTxs = mempoolGarbageResults.deletedTxs; + q.enqueue(async () => { + const mempoolGarbageResults = await this.deleteGarbageCollectedMempoolTxs(sql); + garbageCollectedMempoolTxs = mempoolGarbageResults.deletedTxs; + }); + await q.done(); } if (!this.isEventReplay) { @@ -742,14 +750,37 @@ export class PgWriteStore extends PgStore { logger.info('Updated block zero boot data', tablesUpdates); } - async updatePoxSyntheticEvents( - sql: PgSqlClient, - tx: DbTx, - poxTable: PoxSyntheticEventTable, - events: DbPoxSyntheticEvent[] - ) { - for (const batch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values = batch.map(event => { + async updatePoxSyntheticEvents< + T extends PoxSyntheticEventTable, + Entry extends { tx: DbTx } & ('pox2_events' extends T + ? { pox2Events: DbPoxSyntheticEvent[] } + : 'pox3_events' extends T + ? { pox3Events: DbPoxSyntheticEvent[] } + : 'pox4_events' extends T + ? { pox4Events: DbPoxSyntheticEvent[] } + : never) + >(sql: PgSqlClient, poxTable: T, entries: Entry[]) { + const values: PoxSyntheticEventInsertValues[] = []; + for (const entry of entries) { + let events: DbPoxSyntheticEvent[] | null = null; + switch (poxTable) { + case 'pox2_events': + assert('pox2Events' in entry); + events = entry.pox2Events; + break; + case 'pox3_events': + assert('pox3Events' in entry); + events = entry.pox3Events; + break; + case 'pox4_events': + assert('pox4Events' in entry); + events = entry.pox4Events; + break; + default: + throw new Error(`unknown pox table: ${poxTable}`); + } + const tx = entry.tx; + for (const event of events ?? []) { const value: PoxSyntheticEventInsertValues = { event_index: event.event_index, tx_id: event.tx_id, @@ -782,6 +813,7 @@ export class PgWriteStore extends PgStore { reward_cycle: null, amount_ustx: null, }; + // Set event-specific columns switch (event.name) { case SyntheticPoxEventName.HandleUnlock: { @@ -858,63 +890,78 @@ export class PgWriteStore extends PgStore { ); } } - return value; - }); - await sql` - INSERT INTO ${sql(poxTable)} ${sql(values)} + values.push(value); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO ${sql(String(poxTable))} ${sql(batch)} `; + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } - async updateStxLockEvents(sql: PgSqlClient, tx: DbTx, events: DbStxLockEvent[]) { - for (const batch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: StxLockEventInsertValues[] = batch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - locked_amount: event.locked_amount.toString(), - unlock_height: event.unlock_height, - locked_address: event.locked_address, - contract_name: event.contract_name, - })); - await sql` - INSERT INTO stx_lock_events ${sql(values)} + async updateStxLockEvents( + sql: PgSqlClient, + entries: { tx: DbTx; stxLockEvents: DbStxLockEvent[] }[] + ) { + const values: StxLockEventInsertValues[] = []; + for (const { tx, stxLockEvents } of entries) { + for (const event of stxLockEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + locked_amount: event.locked_amount.toString(), + unlock_height: event.unlock_height, + locked_address: event.locked_address, + contract_name: event.contract_name, + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO stx_lock_events ${sql(batch)} `; + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } - async updateStxEvents(sql: PgSqlClient, tx: DbTx, events: DbStxEvent[]) { - for (const eventBatch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: StxEventInsertValues[] = eventBatch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - asset_event_type_id: event.asset_event_type_id, - sender: event.sender ?? null, - recipient: event.recipient ?? null, - amount: event.amount, - memo: event.memo ?? null, - })); + async updateStxEvents(sql: PgSqlClient, entries: { tx: DbTx; stxEvents: DbStxEvent[] }[]) { + const values: StxEventInsertValues[] = []; + for (const { tx, stxEvents } of entries) { + for (const event of stxEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + asset_event_type_id: event.asset_event_type_id, + sender: event.sender ?? null, + recipient: event.recipient ?? null, + amount: event.amount, + memo: event.memo ?? null, + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { const res = await sql` - INSERT INTO stx_events ${sql(values)} + INSERT INTO stx_events ${sql(batch)} `; - if (res.count !== eventBatch.length) { - throw new Error(`Expected ${eventBatch.length} inserts, got ${res.count}`); - } + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } @@ -922,45 +969,43 @@ export class PgWriteStore extends PgStore { * Update the `principal_stx_tx` table with the latest `tx_id`s that resulted in a STX * transfer relevant to a principal (stx address or contract id). * @param sql - DB client - * @param tx - Transaction - * @param events - Transaction STX events + * @param entries - list of tx and stxEvents */ - async updatePrincipalStxTxs(sql: PgSqlClient, tx: DbTx, events: DbStxEvent[]) { - const insertPrincipalStxTxs = async (principals: string[]) => { - principals = [...new Set(principals)]; // Remove duplicates - const values: PrincipalStxTxsInsertValues[] = principals.map(principal => ({ - principal: principal, - tx_id: tx.tx_id, - block_height: tx.block_height, - index_block_hash: tx.index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - tx_index: tx.tx_index, - canonical: tx.canonical, - microblock_canonical: tx.microblock_canonical, - })); + async updatePrincipalStxTxs(sql: PgSqlClient, entries: { tx: DbTx; stxEvents: DbStxEvent[] }[]) { + const values: PrincipalStxTxsInsertValues[] = []; + for (const { tx, stxEvents } of entries) { + const principals = new Set( + [ + tx.sender_address, + tx.token_transfer_recipient_address, + tx.contract_call_contract_id, + tx.smart_contract_contract_id, + ].filter((p): p is string => !!p) + ); + for (const event of stxEvents) { + if (event.sender) principals.add(event.sender); + if (event.recipient) principals.add(event.recipient); + } + for (const principal of principals) { + values.push({ + principal: principal, + tx_id: tx.tx_id, + block_height: tx.block_height, + index_block_hash: tx.index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + tx_index: tx.tx_index, + canonical: tx.canonical, + microblock_canonical: tx.microblock_canonical, + }); + } + } + + for (const eventBatch of batchIterate(values, INSERT_BATCH_SIZE)) { await sql` - INSERT INTO principal_stx_txs ${sql(values)} + INSERT INTO principal_stx_txs ${sql(eventBatch)} ON CONFLICT ON CONSTRAINT unique_principal_tx_id_index_block_hash_microblock_hash DO NOTHING `; - }; - // Insert tx data - await insertPrincipalStxTxs( - [ - tx.sender_address, - tx.token_transfer_recipient_address, - tx.contract_call_contract_id, - tx.smart_contract_contract_id, - ].filter((p): p is string => !!p) // Remove undefined - ); - // Insert stx_event data - for (const eventBatch of batchIterate(events, INSERT_BATCH_SIZE)) { - const principals: string[] = []; - for (const event of eventBatch) { - if (event.sender) principals.push(event.sender); - if (event.recipient) principals.push(event.recipient); - } - await insertPrincipalStxTxs(principals); } } @@ -999,9 +1044,10 @@ export class PgWriteStore extends PgStore { ON CONFLICT ON CONSTRAINT unique_name_zonefile_hash_tx_id_index_block_hash DO UPDATE SET zonefile = EXCLUDED.zonefile `; - if (result.count !== zonefileValues.length) { - throw new Error(`Expected ${result.count} zonefile inserts, got ${zonefileValues.length}`); - } + assert( + result.count === zonefileValues.length, + `Expecting ${result.count} zonefile inserts, got ${zonefileValues.length}` + ); } async updateBatchSubdomains( @@ -1057,9 +1103,10 @@ export class PgWriteStore extends PgStore { microblock_sequence = EXCLUDED.microblock_sequence, microblock_canonical = EXCLUDED.microblock_canonical `; - if (result.count !== subdomainValues.length) { - throw new Error(`Expected ${subdomainValues.length} subdomain inserts, got ${result.count}`); - } + assert( + result.count === subdomainValues.length, + `Expecting ${subdomainValues.length} subdomain inserts, got ${result.count}` + ); } async resolveBnsSubdomains( @@ -1079,51 +1126,34 @@ export class PgWriteStore extends PgStore { }); } - async updateStxEvent(sql: PgSqlClient, tx: DbTx, event: DbStxEvent) { - const values: StxEventInsertValues = { - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - asset_event_type_id: event.asset_event_type_id, - sender: event.sender ?? null, - recipient: event.recipient ?? null, - amount: event.amount, - memo: event.memo ?? null, - }; - await sql` - INSERT INTO stx_events ${sql(values)} - `; - } - - async updateFtEvents(sql: PgSqlClient, tx: DbTx, events: DbFtEvent[]) { - for (const batch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: FtEventInsertValues[] = batch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - asset_event_type_id: event.asset_event_type_id, - sender: event.sender ?? null, - recipient: event.recipient ?? null, - asset_identifier: event.asset_identifier, - amount: event.amount.toString(), - })); - await sql` - INSERT INTO ft_events ${sql(values)} + async updateFtEvents(sql: PgSqlClient, entries: { tx: DbTx; ftEvents: DbFtEvent[] }[]) { + const values: FtEventInsertValues[] = []; + for (const { tx, ftEvents } of entries) { + for (const event of ftEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + asset_event_type_id: event.asset_event_type_id, + sender: event.sender ?? null, + recipient: event.recipient ?? null, + asset_identifier: event.asset_identifier, + amount: event.amount.toString(), + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO ft_events ${sql(batch)} `; + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } @@ -1222,29 +1252,35 @@ export class PgWriteStore extends PgStore { } } - async updateSmartContractEvents(sql: PgSqlClient, tx: DbTx, events: DbSmartContractEvent[]) { - for (const eventBatch of batchIterate(events, INSERT_BATCH_SIZE)) { - const values: SmartContractEventInsertValues[] = eventBatch.map(event => ({ - event_index: event.event_index, - tx_id: event.tx_id, - tx_index: event.tx_index, - block_height: event.block_height, - index_block_hash: tx.index_block_hash, - parent_index_block_hash: tx.parent_index_block_hash, - microblock_hash: tx.microblock_hash, - microblock_sequence: tx.microblock_sequence, - microblock_canonical: tx.microblock_canonical, - canonical: event.canonical, - contract_identifier: event.contract_identifier, - topic: event.topic, - value: event.value, - })); + async updateSmartContractEvents( + sql: PgSqlClient, + entries: { tx: DbTx; contractLogEvents: DbSmartContractEvent[] }[] + ) { + const values: SmartContractEventInsertValues[] = []; + for (const { tx, contractLogEvents } of entries) { + for (const event of contractLogEvents) { + values.push({ + event_index: event.event_index, + tx_id: event.tx_id, + tx_index: event.tx_index, + block_height: event.block_height, + index_block_hash: tx.index_block_hash, + parent_index_block_hash: tx.parent_index_block_hash, + microblock_hash: tx.microblock_hash, + microblock_sequence: tx.microblock_sequence, + microblock_canonical: tx.microblock_canonical, + canonical: event.canonical, + contract_identifier: event.contract_identifier, + topic: event.topic, + value: event.value, + }); + } + } + for (const batch of batchIterate(values, INSERT_BATCH_SIZE)) { const res = await sql` - INSERT INTO contract_logs ${sql(values)} + INSERT INTO contract_logs ${sql(batch)} `; - if (res.count !== eventBatch.length) { - throw new Error(`Expected ${eventBatch.length} inserts, got ${res.count}`); - } + assert(res.count === batch.length, `Expecting ${batch.length} inserts, got ${res.count}`); } } @@ -1371,8 +1407,10 @@ export class PgWriteStore extends PgStore { acceptedMicroblocks: string[]; orphanedMicroblocks: string[]; }> { - // Find the parent microblock if this anchor block points to one. If not, perform a sanity check for expected block headers in this case: - // > Anchored blocks that do not have parent microblock streams will have their parent microblock header hashes set to all 0's, and the parent microblock sequence number set to 0. + // Find the parent microblock if this anchor block points to one. If not, perform a sanity check + // for expected block headers in this case: Anchored blocks that do not have parent microblock + // streams will have their parent microblock header hashes set to all 0's, and the parent + // microblock sequence number set to 0. let acceptedMicroblockTip: DbMicroblock | undefined; if (BigInt(blockData.parentMicroblockHash) === 0n) { if (blockData.parentMicroblockSequence !== 0) { @@ -1530,8 +1568,9 @@ export class PgWriteStore extends PgStore { } } - async updateTx(sql: PgSqlClient, tx: DbTxRaw): Promise { - const values: TxInsertValues = { + async updateTx(sql: PgSqlClient, txs: DbTxRaw | DbTxRaw[]): Promise { + if (!Array.isArray(txs)) txs = [txs]; + const values: TxInsertValues[] = txs.map(tx => ({ tx_id: tx.tx_id, raw_tx: tx.raw_tx, tx_index: tx.tx_index, @@ -1587,12 +1626,17 @@ export class PgWriteStore extends PgStore { execution_cost_runtime: tx.execution_cost_runtime, execution_cost_write_count: tx.execution_cost_write_count, execution_cost_write_length: tx.execution_cost_write_length, - }; - const result = await sql` - INSERT INTO txs ${sql(values)} - ON CONFLICT ON CONSTRAINT unique_tx_id_index_block_hash_microblock_hash DO NOTHING - `; - return result.count; + })); + + let count = 0; + for (const eventBatch of batchIterate(values, INSERT_BATCH_SIZE)) { + const res = await sql` + INSERT INTO txs ${sql(eventBatch)} + ON CONFLICT ON CONSTRAINT unique_tx_id_index_block_hash_microblock_hash DO NOTHING + `; + count += res.count; + } + return count; } async insertDbMempoolTxs( @@ -1978,9 +2022,10 @@ export class PgWriteStore extends PgStore { const res = await sql` INSERT INTO token_offering_locked ${sql(lockedInfos, 'address', 'value', 'block')} `; - if (res.count !== lockedInfos.length) { - throw new Error(`Expected ${lockedInfos.length} inserts, got ${res.count}`); - } + assert( + res.count === lockedInfos.length, + `Expecting ${lockedInfos.length} inserts, got ${res.count}` + ); } catch (e: any) { logger.error(e, `Locked Info errors ${e.message}`); throw e; @@ -2107,26 +2152,33 @@ export class PgWriteStore extends PgStore { } } - for (const entry of txs) { - const rowsUpdated = await this.updateTx(sql, entry.tx); - if (rowsUpdated !== 1) { - throw new Error( - `Unexpected amount of rows updated for microblock tx insert: ${rowsUpdated}` + if (txs.length > 0) { + const q = new PgWriteQueue(); + q.enqueue(async () => { + const rowsUpdated = await this.updateTx( + sql, + txs.map(t => t.tx) ); + if (rowsUpdated !== txs.length) + throw new Error( + `Unexpected amount of rows updated for microblock tx insert: ${rowsUpdated}, expecting ${txs.length}` + ); + }); + q.enqueue(() => this.updateStxEvents(sql, txs)); + q.enqueue(() => this.updatePrincipalStxTxs(sql, txs)); + q.enqueue(() => this.updateSmartContractEvents(sql, txs)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox2_events', txs)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox3_events', txs)); + q.enqueue(() => this.updatePoxSyntheticEvents(sql, 'pox4_events', txs)); + q.enqueue(() => this.updateStxLockEvents(sql, txs)); + q.enqueue(() => this.updateFtEvents(sql, txs)); + for (const entry of txs) { + q.enqueue(() => this.updateNftEvents(sql, entry.tx, entry.nftEvents, true)); + q.enqueue(() => this.updateSmartContracts(sql, entry.tx, entry.smartContracts)); + q.enqueue(() => this.updateNamespaces(sql, entry.tx, entry.namespaces)); + q.enqueue(() => this.updateNames(sql, entry.tx, entry.names)); } - - await this.updateStxEvents(sql, entry.tx, entry.stxEvents); - await this.updatePrincipalStxTxs(sql, entry.tx, entry.stxEvents); - await this.updateSmartContractEvents(sql, entry.tx, entry.contractLogEvents); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox2_events', entry.pox2Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox3_events', entry.pox3Events); - await this.updatePoxSyntheticEvents(sql, entry.tx, 'pox4_events', entry.pox4Events); - await this.updateStxLockEvents(sql, entry.tx, entry.stxLockEvents); - await this.updateFtEvents(sql, entry.tx, entry.ftEvents); - await this.updateNftEvents(sql, entry.tx, entry.nftEvents, true); - await this.updateSmartContracts(sql, entry.tx, entry.smartContracts); - await this.updateNamespaces(sql, entry.tx, entry.namespaces); - await this.updateNames(sql, entry.tx, entry.names); + await q.done(); } } @@ -2454,6 +2506,8 @@ export class PgWriteStore extends PgStore { ) SELECT tx_id FROM pruned `; + const txIds = deletedTxResults.map(r => r.tx_id); + if (txIds.length > 0) logger.debug(`Garbage collected ${txIds.length} mempool txs`); return { deletedTxs: deletedTxResults.map(r => r.tx_id) }; } @@ -2463,181 +2517,199 @@ export class PgWriteStore extends PgStore { canonical: boolean, updatedEntities: ReOrgUpdatedEntities ): Promise<{ txsMarkedCanonical: string[]; txsMarkedNonCanonical: string[] }> { - const txResult = await sql` - UPDATE txs - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - RETURNING ${sql(TX_COLUMNS)} - `; - const txIds = txResult.map(row => parseTxQueryResult(row)); - if (canonical) { - updatedEntities.markedCanonical.txs += txResult.length; - } else { - updatedEntities.markedNonCanonical.txs += txResult.length; - } - for (const txId of txIds) { - logger.debug(`Marked tx as ${canonical ? 'canonical' : 'non-canonical'}: ${txId.tx_id}`); - } - if (txIds.length) { - await sql` - UPDATE principal_stx_txs + const result: { txsMarkedCanonical: string[]; txsMarkedNonCanonical: string[] } = { + txsMarkedCanonical: [], + txsMarkedNonCanonical: [], + }; + + const q = new PgWriteQueue(); + q.enqueue(async () => { + const txResult = await sql<{ tx_id: string }[]>` + UPDATE txs SET canonical = ${canonical} - WHERE tx_id IN ${sql(txIds.map(tx => tx.tx_id))} - AND index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + RETURNING tx_id `; - } - - const minerRewardResults = await sql` - UPDATE miner_rewards - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.minerRewards += minerRewardResults.count; - } else { - updatedEntities.markedNonCanonical.minerRewards += minerRewardResults.count; - } - - const stxLockResults = await sql` - UPDATE stx_lock_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.stxLockEvents += stxLockResults.count; - } else { - updatedEntities.markedNonCanonical.stxLockEvents += stxLockResults.count; - } - - const stxResults = await sql` - UPDATE stx_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.stxEvents += stxResults.count; - } else { - updatedEntities.markedNonCanonical.stxEvents += stxResults.count; - } - - const ftResult = await sql` - UPDATE ft_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.ftEvents += ftResult.count; - } else { - updatedEntities.markedNonCanonical.ftEvents += ftResult.count; - } - - const nftResult = await sql` - UPDATE nft_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.nftEvents += nftResult.count; - } else { - updatedEntities.markedNonCanonical.nftEvents += nftResult.count; - } - await this.updateNftCustodyFromReOrg(sql, { - index_block_hash: indexBlockHash, - microblocks: [], + const txIds = txResult.map(row => row.tx_id); + if (canonical) { + updatedEntities.markedCanonical.txs += txResult.count; + result.txsMarkedCanonical = txIds; + } else { + updatedEntities.markedNonCanonical.txs += txResult.count; + result.txsMarkedNonCanonical = txIds; + } + if (txResult.count) + await sql` + UPDATE principal_stx_txs + SET canonical = ${canonical} + WHERE tx_id IN ${sql(txIds)} + AND index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; }); + q.enqueue(async () => { + const minerRewardResults = await sql` + UPDATE miner_rewards + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.minerRewards += minerRewardResults.count; + } else { + updatedEntities.markedNonCanonical.minerRewards += minerRewardResults.count; + } + }); + q.enqueue(async () => { + const stxLockResults = await sql` + UPDATE stx_lock_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.stxLockEvents += stxLockResults.count; + } else { + updatedEntities.markedNonCanonical.stxLockEvents += stxLockResults.count; + } + }); + q.enqueue(async () => { + const stxResults = await sql` + UPDATE stx_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.stxEvents += stxResults.count; + } else { + updatedEntities.markedNonCanonical.stxEvents += stxResults.count; + } + }); + q.enqueue(async () => { + const ftResult = await sql` + UPDATE ft_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.ftEvents += ftResult.count; + } else { + updatedEntities.markedNonCanonical.ftEvents += ftResult.count; + } + }); + q.enqueue(async () => { + const nftResult = await sql` + UPDATE nft_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.nftEvents += nftResult.count; + } else { + updatedEntities.markedNonCanonical.nftEvents += nftResult.count; + } + if (nftResult.count) + await this.updateNftCustodyFromReOrg(sql, { + index_block_hash: indexBlockHash, + microblocks: [], + }); + }); + q.enqueue(async () => { + const pox2Result = await sql` + UPDATE pox2_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.pox2Events += pox2Result.count; + } else { + updatedEntities.markedNonCanonical.pox2Events += pox2Result.count; + } + }); + q.enqueue(async () => { + const pox3Result = await sql` + UPDATE pox3_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.pox3Events += pox3Result.count; + } else { + updatedEntities.markedNonCanonical.pox3Events += pox3Result.count; + } + }); + q.enqueue(async () => { + const pox4Result = await sql` + UPDATE pox4_events + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.pox4Events += pox4Result.count; + } else { + updatedEntities.markedNonCanonical.pox4Events += pox4Result.count; + } + }); + q.enqueue(async () => { + const contractLogResult = await sql` + UPDATE contract_logs + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.contractLogs += contractLogResult.count; + } else { + updatedEntities.markedNonCanonical.contractLogs += contractLogResult.count; + } + }); + q.enqueue(async () => { + const smartContractResult = await sql` + UPDATE smart_contracts + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.smartContracts += smartContractResult.count; + } else { + updatedEntities.markedNonCanonical.smartContracts += smartContractResult.count; + } + }); + q.enqueue(async () => { + const nameResult = await sql` + UPDATE names + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.names += nameResult.count; + } else { + updatedEntities.markedNonCanonical.names += nameResult.count; + } + }); + q.enqueue(async () => { + const namespaceResult = await sql` + UPDATE namespaces + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.namespaces += namespaceResult.count; + } else { + updatedEntities.markedNonCanonical.namespaces += namespaceResult.count; + } + }); + q.enqueue(async () => { + const subdomainResult = await sql` + UPDATE subdomains + SET canonical = ${canonical} + WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} + `; + if (canonical) { + updatedEntities.markedCanonical.subdomains += subdomainResult.count; + } else { + updatedEntities.markedNonCanonical.subdomains += subdomainResult.count; + } + }); + await q.done(); - const pox2Result = await sql` - UPDATE pox2_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.pox2Events += pox2Result.count; - } else { - updatedEntities.markedNonCanonical.pox2Events += pox2Result.count; - } - - const pox3Result = await sql` - UPDATE pox3_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.pox3Events += pox3Result.count; - } else { - updatedEntities.markedNonCanonical.pox3Events += pox3Result.count; - } - - const pox4Result = await sql` - UPDATE pox4_events - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.pox4Events += pox4Result.count; - } else { - updatedEntities.markedNonCanonical.pox4Events += pox4Result.count; - } - - const contractLogResult = await sql` - UPDATE contract_logs - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.contractLogs += contractLogResult.count; - } else { - updatedEntities.markedNonCanonical.contractLogs += contractLogResult.count; - } - - const smartContractResult = await sql` - UPDATE smart_contracts - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.smartContracts += smartContractResult.count; - } else { - updatedEntities.markedNonCanonical.smartContracts += smartContractResult.count; - } - - const nameResult = await sql` - UPDATE names - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.names += nameResult.count; - } else { - updatedEntities.markedNonCanonical.names += nameResult.count; - } - - const namespaceResult = await sql` - UPDATE namespaces - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.namespaces += namespaceResult.count; - } else { - updatedEntities.markedNonCanonical.namespaces += namespaceResult.count; - } - - const subdomainResult = await sql` - UPDATE subdomains - SET canonical = ${canonical} - WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical} - `; - if (canonical) { - updatedEntities.markedCanonical.subdomains += subdomainResult.count; - } else { - updatedEntities.markedNonCanonical.subdomains += subdomainResult.count; - } - - return { - txsMarkedCanonical: canonical ? txIds.map(t => t.tx_id) : [], - txsMarkedNonCanonical: canonical ? [] : txIds.map(t => t.tx_id), - }; + return result; } async restoreOrphanedChain( diff --git a/src/event-replay/parquet-based/importers/new-block-importer.ts b/src/event-replay/parquet-based/importers/new-block-importer.ts index ff7e76a005..a0b3ba4f82 100644 --- a/src/event-replay/parquet-based/importers/new-block-importer.ts +++ b/src/event-replay/parquet-based/importers/new-block-importer.ts @@ -376,7 +376,7 @@ const populateBatchInserters = (db: PgWriteStore) => { const insertStxLockEvents = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updateStxLockEvents(db.sql, entry.tx, entry.stxLockEvents); + await db.updateStxLockEvents(db.sql, [entry]); } }; @@ -386,19 +386,19 @@ const populateBatchInserters = (db: PgWriteStore) => { const insertPox2Events = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updatePoxSyntheticEvents(db.sql, entry.tx, 'pox2_events', entry.pox2Events); + await db.updatePoxSyntheticEvents(db.sql, 'pox2_events', [entry]); } }; const insertPox3Events = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updatePoxSyntheticEvents(db.sql, entry.tx, 'pox3_events', entry.pox3Events); + await db.updatePoxSyntheticEvents(db.sql, 'pox3_events', [entry]); } }; const insertPox4Events = async (dbData: DataStoreBlockUpdateData) => { for (const entry of dbData.txs) { - await db.updatePoxSyntheticEvents(db.sql, entry.tx, 'pox4_events', entry.pox4Events); + await db.updatePoxSyntheticEvents(db.sql, 'pox4_events', [entry]); } }; diff --git a/src/event-stream/event-server.ts b/src/event-stream/event-server.ts index 56b6b648d9..0082604f25 100644 --- a/src/event-stream/event-server.ts +++ b/src/event-stream/event-server.ts @@ -46,6 +46,7 @@ import { CoreNodeMsgBlockData, parseMicroblocksFromTxs, isPoxPrintEvent, + newCoreNoreBlockEventCounts, } from './reader'; import { decodeTransaction, @@ -230,6 +231,7 @@ async function handleBlockMessage( db: PgWriteStore ): Promise { const ingestionTimer = stopwatch(); + const counts = newCoreNoreBlockEventCounts(); const parsedTxs: CoreNodeParsedTxMessage[] = []; const blockData: CoreNodeMsgBlockData = { ...msg, @@ -238,8 +240,42 @@ async function handleBlockMessage( const parsedTx = parseMessageTransaction(chainId, item, blockData, msg.events); if (parsedTx) { parsedTxs.push(parsedTx); + counts.tx_total += 1; + switch (parsedTx.parsed_tx.payload.type_id) { + case TxPayloadTypeID.Coinbase: + counts.txs.coinbase += 1; + break; + case TxPayloadTypeID.CoinbaseToAltRecipient: + counts.txs.coinbase_to_alt_recipient += 1; + break; + case TxPayloadTypeID.ContractCall: + counts.txs.contract_call += 1; + break; + case TxPayloadTypeID.NakamotoCoinbase: + counts.txs.nakamoto_coinbase += 1; + break; + case TxPayloadTypeID.PoisonMicroblock: + counts.txs.poison_microblock += 1; + break; + case TxPayloadTypeID.SmartContract: + counts.txs.smart_contract += 1; + break; + case TxPayloadTypeID.TenureChange: + counts.txs.tenure_change += 1; + break; + case TxPayloadTypeID.TokenTransfer: + counts.txs.token_transfer += 1; + break; + case TxPayloadTypeID.VersionedSmartContract: + counts.txs.versioned_smart_contract += 1; + break; + } } }); + for (const event of msg.events) { + counts.event_total += 1; + counts.events[event.type] += 1; + } const dbBlock: DbBlock = { canonical: true, @@ -281,6 +317,7 @@ async function handleBlockMessage( tx_fees_streamed_produced: BigInt(minerReward.tx_fees_streamed_produced), }; dbMinerRewards.push(dbMinerReward); + counts.miner_rewards += 1; } logger.debug(`Received ${dbMinerRewards.length} matured miner rewards`); @@ -304,18 +341,10 @@ async function handleBlockMessage( index_block_hash: msg.index_block_hash, block_hash: msg.block_hash, }; + counts.microblocks += 1; return microblock; }); - parsedTxs.forEach(tx => { - logger.debug(`Received anchor block mined tx: ${tx.core_tx.txid}`); - logger.info('Transaction confirmed', { - txid: tx.core_tx.txid, - in_microblock: tx.microblock_hash != '', - stacks_height: dbBlock.block_height, - }); - }); - const dbData: DataStoreBlockUpdateData = { block: dbBlock, microblocks: dbMicroblocks, @@ -328,7 +357,10 @@ async function handleBlockMessage( await db.update(dbData); const ingestionTime = ingestionTimer.getElapsed(); - logger.info(`Ingested block ${msg.block_height} (${msg.block_hash}) in ${ingestionTime}ms`); + logger.info( + counts, + `Ingested block ${msg.block_height} (${msg.block_hash}) in ${ingestionTime}ms` + ); } function parseDataStoreTxEventData( diff --git a/src/event-stream/reader.ts b/src/event-stream/reader.ts index 1a2eb06374..7ebb922b3e 100644 --- a/src/event-stream/reader.ts +++ b/src/event-stream/reader.ts @@ -847,3 +847,67 @@ export function isPoxPrintEvent(event: SmartContractEvent): boolean { if (event.contract_event.topic !== 'print') return false; return PoxContractIdentifiers.includes(event.contract_event.contract_identifier); } + +interface CoreNodeBlockEventCounts { + microblocks: number; + tx_total: number; + txs: { + token_transfer: number; + smart_contract: number; + contract_call: number; + poison_microblock: number; + coinbase: number; + coinbase_to_alt_recipient: number; + versioned_smart_contract: number; + tenure_change: number; + nakamoto_coinbase: number; + }; + event_total: number; + events: { + contract_event: number; + stx_transfer_event: number; + stx_mint_event: number; + stx_burn_event: number; + stx_lock_event: number; + nft_transfer_event: number; + nft_mint_event: number; + nft_burn_event: number; + ft_transfer_event: number; + ft_mint_event: number; + ft_burn_event: number; + }; + miner_rewards: number; +} + +export function newCoreNoreBlockEventCounts(): CoreNodeBlockEventCounts { + return { + microblocks: 0, + tx_total: 0, + txs: { + token_transfer: 0, + smart_contract: 0, + contract_call: 0, + poison_microblock: 0, + coinbase: 0, + coinbase_to_alt_recipient: 0, + versioned_smart_contract: 0, + tenure_change: 0, + nakamoto_coinbase: 0, + }, + event_total: 0, + events: { + contract_event: 0, + stx_transfer_event: 0, + stx_mint_event: 0, + stx_burn_event: 0, + stx_lock_event: 0, + nft_transfer_event: 0, + nft_mint_event: 0, + nft_burn_event: 0, + ft_transfer_event: 0, + ft_mint_event: 0, + ft_burn_event: 0, + }, + miner_rewards: 0, + }; +} diff --git a/src/tests/datastore-tests.ts b/src/tests/datastore-tests.ts index f5466b7762..54012798ae 100644 --- a/src/tests/datastore-tests.ts +++ b/src/tests/datastore-tests.ts @@ -167,7 +167,7 @@ describe('postgres datastore', () => { createStxEvent('addrA', 'addrC', 35), ]; for (const event of events) { - await db.updateStxEvent(client, tx, event); + await db.updateStxEvents(client, [{ tx, stxEvents: [event] }]); } const createStxLockEvent = ( @@ -195,7 +195,7 @@ describe('postgres datastore', () => { createStxLockEvent('addrA', 222n, 1), createStxLockEvent('addrB', 333n, 1), ]; - await db.updateStxLockEvents(client, tx, stxLockEvents); + await db.updateStxLockEvents(client, [{ tx, stxLockEvents }]); await db.updateTx(client, tx); await db.updateTx(client, tx2); @@ -3546,7 +3546,7 @@ describe('postgres datastore', () => { } // insert stx lock events directly - await db.updateStxLockEvents(client, tx1, [stxLockEvent1]); + await db.updateStxLockEvents(client, [{ tx: tx1, stxLockEvents: [stxLockEvent1] }]); const block5: DbBlock = { block_hash: '0x55', diff --git a/src/tests/other-tests.ts b/src/tests/other-tests.ts index d64bf877de..19489d4d47 100644 --- a/src/tests/other-tests.ts +++ b/src/tests/other-tests.ts @@ -157,7 +157,7 @@ describe('other tests', () => { event_type: DbEventTypeId.StxAsset, amount: 10_000_000_000_000n, }; - await db.updateStxEvent(client, tx, stxBurnEvent1); + await db.updateStxEvents(client, [{ tx, stxEvents: [stxBurnEvent1] }]); const expectedTotalStx2 = stxMintEvent1.amount + stxMintEvent2.amount - stxBurnEvent1.amount; const result2 = await supertest(api.server).get(`/extended/v1/stx_supply`); expect(result2.status).toBe(200); diff --git a/src/tests/search-tests.ts b/src/tests/search-tests.ts index 5135630497..ada372a422 100644 --- a/src/tests/search-tests.ts +++ b/src/tests/search-tests.ts @@ -717,7 +717,7 @@ describe('search tests', () => { recipient: addr3, sender: 'none', }; - await db.updateStxEvent(client, stxTx1, stxEvent1); + await db.updateStxEvents(client, [{ tx: stxTx1, stxEvents: [stxEvent1] }]); // test address as a stx event recipient const searchResult3 = await supertest(api.server).get(`/extended/v1/search/${addr3}`); @@ -745,7 +745,7 @@ describe('search tests', () => { sender: addr4, }; - await db.updateStxEvent(client, stxTx1, stxEvent2); + await db.updateStxEvents(client, [{ tx: stxTx1, stxEvents: [stxEvent2] }]); // test address as a stx event sender const searchResult4 = await supertest(api.server).get(`/extended/v1/search/${addr4}`); @@ -773,7 +773,7 @@ describe('search tests', () => { recipient: addr5, sender: 'none', }; - await db.updateFtEvents(client, stxTx1, [ftEvent1]); + await db.updateFtEvents(client, [{ tx: stxTx1, ftEvents: [ftEvent1] }]); // test address as a ft event recipient const searchResult5 = await supertest(api.server).get(`/extended/v1/search/${addr5}`); @@ -801,7 +801,7 @@ describe('search tests', () => { recipient: 'none', sender: addr6, }; - await db.updateFtEvents(client, stxTx1, [ftEvent2]); + await db.updateFtEvents(client, [{ tx: stxTx1, ftEvents: [ftEvent2] }]); // test address as a ft event sender const searchResult6 = await supertest(api.server).get(`/extended/v1/search/${addr6}`); diff --git a/src/tests/smart-contract-tests.ts b/src/tests/smart-contract-tests.ts index d3b32161af..2cab93e7ec 100644 --- a/src/tests/smart-contract-tests.ts +++ b/src/tests/smart-contract-tests.ts @@ -13,6 +13,7 @@ import { I32_MAX } from '../helpers'; import { PgWriteStore } from '../datastore/pg-write-store'; import { bufferToHex, PgSqlClient, waiter } from '@hirosystems/api-toolkit'; import { migrate } from '../test-utils/test-helpers'; +import { TestBlockBuilder, testMempoolTx } from '../test-utils/test-builders'; describe('smart contract tests', () => { let db: PgWriteStore; @@ -1715,4 +1716,112 @@ describe('smart contract tests', () => { ); expect(query.status).toBe(431); }); + + test('status for multiple contracts', async () => { + const block1 = new TestBlockBuilder({ block_height: 1, index_block_hash: '0x01' }) + .addTx({ + tx_id: '0x1234', + type_id: DbTxTypeId.SmartContract, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1', + smart_contract_source_code: '(some-contract-src)', + }) + .addTxSmartContract({ + tx_id: '0x1234', + block_height: 1, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1', + contract_source: '(some-contract-src)', + }) + .build(); + await db.update(block1); + const block2 = new TestBlockBuilder({ + block_height: 2, + index_block_hash: '0x02', + parent_index_block_hash: '0x01', + }) + .addTx({ + tx_id: '0x1222', + type_id: DbTxTypeId.SmartContract, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2', + smart_contract_source_code: '(some-contract-src)', + }) + .addTxSmartContract({ + tx_id: '0x1222', + block_height: 2, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2', + contract_source: '(some-contract-src)', + }) + .build(); + await db.update(block2); + + // Contracts are found + let query = await supertest(api.server).get( + `/extended/v2/smart-contracts/status?contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1&contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2` + ); + expect(query.status).toBe(200); + let json = JSON.parse(query.text); + expect(json).toStrictEqual({ + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1': { + found: true, + result: { + block_height: 1, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-1', + status: 'success', + tx_id: '0x1234', + }, + }, + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2': { + found: true, + result: { + block_height: 2, + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-2', + status: 'success', + tx_id: '0x1222', + }, + }, + }); + + // Assume two contract attempts on the mempool + const mempoolTx1 = testMempoolTx({ + tx_id: '0x111111', + type_id: DbTxTypeId.SmartContract, + nonce: 5, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3', + }); + const mempoolTx2 = testMempoolTx({ + tx_id: '0x111122', + type_id: DbTxTypeId.SmartContract, + nonce: 6, + smart_contract_contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3', + }); + await db.updateMempoolTxs({ mempoolTxs: [mempoolTx1, mempoolTx2] }); + query = await supertest(api.server).get( + `/extended/v2/smart-contracts/status?contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3` + ); + expect(query.status).toBe(200); + json = JSON.parse(query.text); + // Only the first one is reported. + expect(json).toStrictEqual({ + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3': { + found: true, + result: { + contract_id: 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.contract-3', + status: 'pending', + tx_id: '0x111111', + }, + }, + }); + + // Check found = false + query = await supertest(api.server).get( + `/extended/v2/smart-contracts/status?contract_id=SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.abcde` + ); + expect(query.status).toBe(200); + json = JSON.parse(query.text); + // Only the first one is reported. + expect(json).toStrictEqual({ + 'SP3K8BC0PPEVCV7NZ6QSRWPQ2JE9E5B6N3PA0KBR9.abcde': { + found: false, + }, + }); + }); });