Skip to content

Commit

Permalink
fix: optimize re-org queries and indexes (#1821)
Browse files Browse the repository at this point in the history
  • Loading branch information
rafaelcr authored Jan 12, 2024
1 parent c2bc6a6 commit 5505d35
Show file tree
Hide file tree
Showing 5 changed files with 113 additions and 46 deletions.
91 changes: 91 additions & 0 deletions migrations/1705013096459_update-re-org-indexes.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,91 @@
/* eslint-disable camelcase */

exports.shorthands = undefined;

exports.up = pgm => {
pgm.dropIndex('txs', 'index_block_hash');
pgm.createIndex('txs', ['index_block_hash', 'canonical']);

pgm.dropIndex('miner_rewards', 'index_block_hash');
pgm.createIndex('miner_rewards', ['index_block_hash', 'canonical']);

pgm.dropIndex('stx_lock_events', 'index_block_hash');
pgm.createIndex('stx_lock_events', ['index_block_hash', 'canonical']);

pgm.dropIndex('stx_events', 'index_block_hash');
pgm.createIndex('stx_events', ['index_block_hash', 'canonical']);

pgm.dropIndex('ft_events', 'index_block_hash');
pgm.createIndex('ft_events', ['index_block_hash', 'canonical']);

pgm.dropIndex('nft_events', 'index_block_hash');
pgm.createIndex('nft_events', ['index_block_hash', 'canonical']);

pgm.dropIndex('pox2_events', 'index_block_hash');
pgm.createIndex('pox2_events', ['index_block_hash', 'canonical']);

pgm.dropIndex('pox3_events', 'index_block_hash');
pgm.createIndex('pox3_events', ['index_block_hash', 'canonical']);

pgm.dropIndex('pox4_events', 'index_block_hash');
pgm.createIndex('pox4_events', ['index_block_hash', 'canonical']);

pgm.dropIndex('contract_logs', 'index_block_hash');
pgm.createIndex('contract_logs', ['index_block_hash', 'canonical']);

pgm.dropIndex('smart_contracts', 'index_block_hash');
pgm.createIndex('smart_contracts', ['index_block_hash', 'canonical']);

pgm.dropIndex('names', 'index_block_hash');
pgm.createIndex('names', ['index_block_hash', 'canonical']);

pgm.dropIndex('namespaces', 'index_block_hash');
pgm.createIndex('namespaces', ['index_block_hash', 'canonical']);

pgm.dropIndex('subdomains', 'index_block_hash');
pgm.createIndex('subdomains', ['index_block_hash', 'canonical']);
};

exports.down = pgm => {
pgm.dropIndex('txs', ['index_block_hash', 'canonical']);
pgm.createIndex('txs', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('miner_rewards', ['index_block_hash', 'canonical']);
pgm.createIndex('miner_rewards', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('stx_lock_events', ['index_block_hash', 'canonical']);
pgm.createIndex('stx_lock_events', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('stx_events', ['index_block_hash', 'canonical']);
pgm.createIndex('stx_events', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('ft_events', ['index_block_hash', 'canonical']);
pgm.createIndex('ft_events', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('nft_events', ['index_block_hash', 'canonical']);
pgm.createIndex('nft_events', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('pox2_events', ['index_block_hash', 'canonical']);
pgm.createIndex('pox2_events', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('pox3_events', ['index_block_hash', 'canonical']);
pgm.createIndex('pox3_events', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('pox4_events', ['index_block_hash', 'canonical']);
pgm.createIndex('pox4_events', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('contract_logs', ['index_block_hash', 'canonical']);
pgm.createIndex('contract_logs', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('smart_contracts', ['index_block_hash', 'canonical']);
pgm.createIndex('smart_contracts', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('names', ['index_block_hash', 'canonical']);
pgm.createIndex('names', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('namespaces', ['index_block_hash', 'canonical']);
pgm.createIndex('namespaces', 'index_block_hash', { method: 'hash' });

pgm.dropIndex('subdomains', ['index_block_hash', 'canonical']);
pgm.createIndex('subdomains', 'index_block_hash', { method: 'hash' });
};
60 changes: 18 additions & 42 deletions src/datastore/pg-write-store.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1134,29 +1134,6 @@ export class PgWriteStore extends PgStore {
});
}

async updateStxEvent(sql: PgSqlClient, tx: DbTx, event: DbStxEvent) {
const values: StxEventInsertValues = {
event_index: event.event_index,
tx_id: event.tx_id,
tx_index: event.tx_index,
block_height: event.block_height,
index_block_hash: tx.index_block_hash,
parent_index_block_hash: tx.parent_index_block_hash,
microblock_hash: tx.microblock_hash,
microblock_sequence: tx.microblock_sequence,
microblock_canonical: tx.microblock_canonical,
canonical: event.canonical,
asset_event_type_id: event.asset_event_type_id,
sender: event.sender ?? null,
recipient: event.recipient ?? null,
amount: event.amount,
memo: event.memo ?? null,
};
await sql`
INSERT INTO stx_events ${sql(values)}
`;
}

async updateFtEvents(sql: PgSqlClient, entries: { tx: DbTx; ftEvents: DbFtEvent[] }[]) {
const values: FtEventInsertValues[] = [];
for (const { tx, ftEvents } of entries) {
Expand Down Expand Up @@ -1438,8 +1415,10 @@ export class PgWriteStore extends PgStore {
acceptedMicroblocks: string[];
orphanedMicroblocks: string[];
}> {
// Find the parent microblock if this anchor block points to one. If not, perform a sanity check for expected block headers in this case:
// > Anchored blocks that do not have parent microblock streams will have their parent microblock header hashes set to all 0's, and the parent microblock sequence number set to 0.
// Find the parent microblock if this anchor block points to one. If not, perform a sanity check
// for expected block headers in this case: Anchored blocks that do not have parent microblock
// streams will have their parent microblock header hashes set to all 0's, and the parent
// microblock sequence number set to 0.
let acceptedMicroblockTip: DbMicroblock | undefined;
if (BigInt(blockData.parentMicroblockHash) === 0n) {
if (blockData.parentMicroblockSequence !== 0) {
Expand Down Expand Up @@ -2521,29 +2500,25 @@ export class PgWriteStore extends PgStore {
canonical: boolean,
updatedEntities: ReOrgUpdatedEntities
): Promise<{ txsMarkedCanonical: string[]; txsMarkedNonCanonical: string[] }> {
const txResult = await sql<TxQueryResult[]>`
const txResult = await sql<{ tx_id: string }[]>`
UPDATE txs
SET canonical = ${canonical}
WHERE index_block_hash = ${indexBlockHash} AND canonical != ${canonical}
RETURNING ${sql(TX_COLUMNS)}
RETURNING tx_id
`;
const txIds = txResult.map(row => parseTxQueryResult(row));
const txIds = txResult.map(row => row.tx_id);
if (canonical) {
updatedEntities.markedCanonical.txs += txResult.length;
updatedEntities.markedCanonical.txs += txResult.count;
} else {
updatedEntities.markedNonCanonical.txs += txResult.length;
updatedEntities.markedNonCanonical.txs += txResult.count;
}
for (const txId of txIds) {
logger.debug(`Marked tx as ${canonical ? 'canonical' : 'non-canonical'}: ${txId.tx_id}`);
}
if (txIds.length) {
if (txResult.count)
await sql`
UPDATE principal_stx_txs
SET canonical = ${canonical}
WHERE tx_id IN ${sql(txIds.map(tx => tx.tx_id))}
WHERE tx_id IN ${sql(txIds)}
AND index_block_hash = ${indexBlockHash} AND canonical != ${canonical}
`;
}

const minerRewardResults = await sql`
UPDATE miner_rewards
Expand Down Expand Up @@ -2599,10 +2574,11 @@ export class PgWriteStore extends PgStore {
} else {
updatedEntities.markedNonCanonical.nftEvents += nftResult.count;
}
await this.updateNftCustodyFromReOrg(sql, {
index_block_hash: indexBlockHash,
microblocks: [],
});
if (nftResult.count)
await this.updateNftCustodyFromReOrg(sql, {
index_block_hash: indexBlockHash,
microblocks: [],
});

const pox2Result = await sql`
UPDATE pox2_events
Expand Down Expand Up @@ -2693,8 +2669,8 @@ export class PgWriteStore extends PgStore {
}

return {
txsMarkedCanonical: canonical ? txIds.map(t => t.tx_id) : [],
txsMarkedNonCanonical: canonical ? [] : txIds.map(t => t.tx_id),
txsMarkedCanonical: canonical ? txIds : [],
txsMarkedNonCanonical: canonical ? [] : txIds,
};
}

Expand Down
2 changes: 1 addition & 1 deletion src/tests/datastore-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ describe('postgres datastore', () => {
createStxEvent('addrA', 'addrC', 35),
];
for (const event of events) {
await db.updateStxEvent(client, tx, event);
await db.updateStxEvents(client, [{ tx, stxEvents: [event] }]);
}

const createStxLockEvent = (
Expand Down
2 changes: 1 addition & 1 deletion src/tests/other-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ describe('other tests', () => {
event_type: DbEventTypeId.StxAsset,
amount: 10_000_000_000_000n,
};
await db.updateStxEvent(client, tx, stxBurnEvent1);
await db.updateStxEvents(client, [{ tx, stxEvents: [stxBurnEvent1] }]);
const expectedTotalStx2 = stxMintEvent1.amount + stxMintEvent2.amount - stxBurnEvent1.amount;
const result2 = await supertest(api.server).get(`/extended/v1/stx_supply`);
expect(result2.status).toBe(200);
Expand Down
4 changes: 2 additions & 2 deletions src/tests/search-tests.ts
Original file line number Diff line number Diff line change
Expand Up @@ -717,7 +717,7 @@ describe('search tests', () => {
recipient: addr3,
sender: 'none',
};
await db.updateStxEvent(client, stxTx1, stxEvent1);
await db.updateStxEvents(client, [{ tx: stxTx1, stxEvents: [stxEvent1] }]);

// test address as a stx event recipient
const searchResult3 = await supertest(api.server).get(`/extended/v1/search/${addr3}`);
Expand Down Expand Up @@ -745,7 +745,7 @@ describe('search tests', () => {
sender: addr4,
};

await db.updateStxEvent(client, stxTx1, stxEvent2);
await db.updateStxEvents(client, [{ tx: stxTx1, stxEvents: [stxEvent2] }]);

// test address as a stx event sender
const searchResult4 = await supertest(api.server).get(`/extended/v1/search/${addr4}`);
Expand Down

0 comments on commit 5505d35

Please sign in to comment.