From 23e17aa57d462d61bc24a2b384e93bb0777cfa78 Mon Sep 17 00:00:00 2001 From: Ivo Yankov Date: Fri, 14 Feb 2025 19:01:59 +0200 Subject: [PATCH] refactor: update solo node start to support multiple clusters (#1385) Signed-off-by: Ivo Yankov --- src/commands/account.ts | 25 ++++- src/commands/mirror_node.ts | 29 ++++- src/commands/node/configs.ts | 47 ++++++-- src/commands/node/tasks.ts | 103 ++++++++++++------ src/commands/relay.ts | 14 ++- src/core/account_manager.ts | 85 ++++++++++----- src/core/helpers.ts | 2 + src/core/model/consensus_node.ts | 3 +- src/core/model/solo_service.ts | 31 ++++++ src/core/network_node_services.ts | 25 +++++ test/e2e/commands/account.test.ts | 9 +- test/e2e/commands/mirror_node.test.ts | 4 +- test/e2e/commands/node_local_hedera.test.ts | 6 +- test/e2e/commands/node_update.test.ts | 6 +- test/e2e/commands/separate_node_add.test.ts | 6 +- .../e2e/commands/separate_node_update.test.ts | 6 +- .../commands/separate_node_upgrade.test.ts | 1 - test/test_add.ts | 6 +- test/test_util.ts | 16 ++- test/unit/commands/base.test.ts | 2 +- 20 files changed, 331 insertions(+), 95 deletions(-) create mode 100644 src/core/model/solo_service.ts diff --git a/src/commands/account.ts b/src/commands/account.ts index f2f252f64..4451165c8 100644 --- a/src/commands/account.ts +++ b/src/commands/account.ts @@ -16,6 +16,7 @@ import {sleep} from '../core/helpers.js'; import {resolveNamespaceFromDeployment} from '../core/resolvers.js'; import {Duration} from '../core/time/duration.js'; import {type NamespaceName} from '../core/kube/resources/namespace/namespace_name.js'; +import {type DeploymentName} from '../core/config/remote/types.js'; export class AccountCommand extends BaseCommand { private readonly accountManager: AccountManager; @@ -178,7 +179,11 @@ export class AccountCommand extends BaseCommand { self.logger.debug('Initialized config', {config}); - await self.accountManager.loadNodeClient(ctx.config.namespace); + await self.accountManager.loadNodeClient( + ctx.config.namespace, + self.getClusterRefs(), + self.configManager.getFlag(flags.deployment), + ); }, }, { @@ -345,7 +350,11 @@ export class AccountCommand extends BaseCommand { self.logger.debug('Initialized config', {config}); - await self.accountManager.loadNodeClient(ctx.config.namespace); + await self.accountManager.loadNodeClient( + ctx.config.namespace, + self.getClusterRefs(), + self.configManager.getFlag(flags.deployment), + ); return ListrLease.newAcquireLeaseTask(lease, task); }, @@ -421,7 +430,11 @@ export class AccountCommand extends BaseCommand { // set config in the context for later tasks to use ctx.config = config; - await self.accountManager.loadNodeClient(config.namespace); + await self.accountManager.loadNodeClient( + config.namespace, + self.getClusterRefs(), + self.configManager.getFlag(flags.deployment), + ); self.logger.debug('Initialized config', {config}); }, @@ -503,7 +516,11 @@ export class AccountCommand extends BaseCommand { // set config in the context for later tasks to use ctx.config = config; - await self.accountManager.loadNodeClient(config.namespace); + await self.accountManager.loadNodeClient( + config.namespace, + self.getClusterRefs(), + self.configManager.getFlag(flags.deployment), + ); self.logger.debug('Initialized config', {config}); }, diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index 9e1f09570..e12b9513d 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -28,6 +28,7 @@ import chalk from 'chalk'; import {type CommandFlag} from '../types/flag_types.js'; import {PvcRef} from '../core/kube/resources/pvc/pvc_ref.js'; import {PvcName} from '../core/kube/resources/pvc/pvc_name.js'; +import {type DeploymentName} from '../core/config/remote/types.js'; interface MirrorNodeDeployConfigClass { chartDirectory: string; @@ -243,7 +244,11 @@ export class MirrorNodeCommand extends BaseCommand { // user defined values later to override predefined values ctx.config.valuesArg += await self.prepareValuesArg(ctx.config); - await self.accountManager.loadNodeClient(ctx.config.namespace); + await self.accountManager.loadNodeClient( + ctx.config.namespace, + self.getClusterRefs(), + self.configManager.getFlag(flags.deployment), + ); if (ctx.config.pinger) { const startAccId = constants.HEDERA_NODE_ACCOUNT_ID_START; @@ -459,8 +464,20 @@ export class MirrorNodeCommand extends BaseCommand { const exchangeRatesFileIdNum = 112; const timestamp = Date.now(); - const fees = await this.accountManager.getFileContents(namespace, feesFileIdNum); - const exchangeRates = await this.accountManager.getFileContents(namespace, exchangeRatesFileIdNum); + const clusterRefs = this.getClusterRefs(); + const deployment = this.configManager.getFlag(flags.deployment); + const fees = await this.accountManager.getFileContents( + namespace, + feesFileIdNum, + clusterRefs, + deployment, + ); + const exchangeRates = await this.accountManager.getFileContents( + namespace, + exchangeRatesFileIdNum, + clusterRefs, + deployment, + ); const importFeesQuery = `INSERT INTO public.file_data(file_data, consensus_timestamp, entity_id, transaction_type) @@ -616,7 +633,11 @@ export class MirrorNodeCommand extends BaseCommand { isChartInstalled, }; - await self.accountManager.loadNodeClient(ctx.config.namespace); + await self.accountManager.loadNodeClient( + ctx.config.namespace, + self.getClusterRefs(), + self.configManager.getFlag(flags.deployment), + ); return ListrLease.newAcquireLeaseTask(lease, task); }, diff --git a/src/commands/node/configs.ts b/src/commands/node/configs.ts index 29d1a455a..c53151ad4 100644 --- a/src/commands/node/configs.ts +++ b/src/commands/node/configs.ts @@ -19,6 +19,7 @@ import {type NamespaceName} from '../../core/kube/resources/namespace/namespace_ import {type PodRef} from '../../core/kube/resources/pod/pod_ref.js'; import {type K8Factory} from '../../core/kube/k8_factory.js'; import {type ConsensusNode} from '../../core/model/consensus_node.js'; +import {type DeploymentName} from '../../core/config/remote/types.js'; export const PREPARE_UPGRADE_CONFIGS_NAME = 'prepareUpgradeConfig'; export const DOWNLOAD_GENERATED_FILES_CONFIGS_NAME = 'downloadGeneratedFilesConfig'; @@ -64,7 +65,11 @@ export const prepareUpgradeConfigBuilder = async function (argv, ctx, task) { config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); await initializeSetup(config, this.k8Factory); - config.nodeClient = await this.accountManager.loadNodeClient(config.namespace); + config.nodeClient = await this.accountManager.loadNodeClient( + config.namespace, + this.parent.getClusterRefs(), + config.deployment, + ); const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace); config.freezeAdminPrivateKey = accountKeys.privateKey; @@ -119,7 +124,11 @@ export const upgradeConfigBuilder = async function (argv, ctx, task, shouldLoadN constants.SOLO_DEPLOYMENT_CHART, ); if (shouldLoadNodeClient) { - ctx.config.nodeClient = await this.accountManager.loadNodeClient(ctx.config.namespace); + ctx.config.nodeClient = await this.accountManager.loadNodeClient( + ctx.config.namespace, + this.parent.getClusterRefs(), + config.deployment, + ); } const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace); @@ -160,7 +169,11 @@ export const updateConfigBuilder = async function (argv, ctx, task, shouldLoadNo ); if (shouldLoadNodeClient) { - ctx.config.nodeClient = await this.accountManager.loadNodeClient(ctx.config.namespace); + ctx.config.nodeClient = await this.accountManager.loadNodeClient( + ctx.config.namespace, + this.parent.getClusterRefs(), + config.deployment, + ); } const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace); @@ -207,7 +220,11 @@ export const deleteConfigBuilder = async function (argv, ctx, task, shouldLoadNo ); if (shouldLoadNodeClient) { - ctx.config.nodeClient = await this.accountManager.loadNodeClient(ctx.config.namespace); + ctx.config.nodeClient = await this.accountManager.loadNodeClient( + ctx.config.namespace, + this.parent.getClusterRefs(), + config.deployment, + ); } const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace); @@ -260,7 +277,11 @@ export const addConfigBuilder = async function (argv, ctx, task, shouldLoadNodeC ); if (shouldLoadNodeClient) { - ctx.config.nodeClient = await this.accountManager.loadNodeClient(ctx.config.namespace); + ctx.config.nodeClient = await this.accountManager.loadNodeClient( + ctx.config.namespace, + this.parent.getClusterRefs(), + config.deployment, + ); } const accountKeys = await this.accountManager.getAccountKeysFromSecret(FREEZE_ADMIN_ACCOUNT, config.namespace); @@ -270,7 +291,11 @@ export const addConfigBuilder = async function (argv, ctx, task, shouldLoadNodeC const treasuryAccountPrivateKey = treasuryAccount.privateKey; config.treasuryKey = PrivateKey.fromStringED25519(treasuryAccountPrivateKey); - config.serviceMap = await this.accountManager.getNodeServiceMap(config.namespace); + config.serviceMap = await this.accountManager.getNodeServiceMap( + config.namespace, + this.parent.getClusterRefs(), + config.deployment, + ); config.consensusNodes = this.parent.getConsensusNodes(); config.contexts = this.parent.getContexts(); @@ -373,9 +398,13 @@ export const startConfigBuilder = async function (argv, ctx, task) { 'contexts', ]) as NodeStartConfigClass; config.namespace = await resolveNamespaceFromDeployment(this.parent.localConfig, this.configManager, task); + config.consensusNodes = this.parent.getConsensusNodes(); - if (!(await this.k8Factory.default().namespaces().has(config.namespace))) { - throw new SoloError(`namespace ${config.namespace} does not exist`); + for (const consensusNode of config.consensusNodes) { + const k8 = this.k8Factory.getK8(consensusNode.context); + if (!(await k8.namespaces().has(config.namespace))) { + throw new SoloError(`namespace ${config.namespace} does not exist`); + } } config.nodeAliases = helpers.parseNodeAliases(config.nodeAliasesUnparsed); @@ -445,6 +474,7 @@ export interface NodeKeysConfigClass { export interface NodeStartConfigClass { app: string; cacheDir: string; + consensusNodes: ConsensusNode[]; debugNodeAlias: NodeAlias; namespace: NamespaceName; deployment: string; @@ -452,7 +482,6 @@ export interface NodeStartConfigClass { stagingDir: string; podRefs: Record; nodeAliasesUnparsed: string; - consensusNodes: ConsensusNode[]; contexts: string[]; } diff --git a/src/commands/node/tasks.ts b/src/commands/node/tasks.ts index 3ab00a992..07bc24109 100644 --- a/src/commands/node/tasks.ts +++ b/src/commands/node/tasks.ts @@ -68,7 +68,8 @@ import {ContainerRef} from '../../core/kube/resources/container/container_ref.js import {NetworkNodes} from '../../core/network_nodes.js'; import {container} from 'tsyringe-neo'; import * as helpers from '../../core/helpers.js'; -import {type Optional} from '../../types/index.js'; +import {type Optional, type SoloListrTask, type SoloListrTaskWrapper} from '../../types/index.js'; +import {type DeploymentName} from '../../core/config/remote/types.js'; import {ConsensusNode} from '../../core/model/consensus_node.js'; import {type K8} from '../../core/kube/k8.js'; @@ -461,16 +462,18 @@ export class NodeCommandTasks { for (const nodeAlias of nodeAliases) { subTasks.push({ title: `Check proxy for node: ${chalk.yellow(nodeAlias)}`, - task: async ctx => - await this.k8Factory - .default() + task: async ctx => { + const context = helpers.extractContextFromConsensusNodes(nodeAlias, ctx.config.consensusNodes); + const k8 = this.k8Factory.getK8(context); + await k8 .pods() .waitForReadyStatus( ctx.config.namespace, [`app=haproxy-${nodeAlias}`, 'solo.hedera.com/type=haproxy'], constants.NETWORK_PROXY_MAX_ATTEMPTS, constants.NETWORK_PROXY_DELAY, - ), + ); + }, }); } @@ -557,7 +560,8 @@ export class NodeCommandTasks { stakeAmount: number = HEDERA_NODE_DEFAULT_STAKE_AMOUNT, ) { try { - await this.accountManager.loadNodeClient(namespace); + const deploymentName = this.configManager.getFlag(flags.deployment); + await this.accountManager.loadNodeClient(namespace, this.parent.getClusterRefs(), deploymentName); const client = this.accountManager._nodeClient; const treasuryKey = await this.accountManager.getTreasuryAccountKeys(namespace); const treasuryPrivateKey = PrivateKey.fromStringED25519(treasuryKey.privateKey); @@ -900,7 +904,8 @@ export class NodeCommandTasks { return new Task('Identify existing network nodes', async (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; config.existingNodeAliases = []; - config.serviceMap = await self.accountManager.getNodeServiceMap(config.namespace); + const clusterRefs = this.parent.getClusterRefs(); + config.serviceMap = await self.accountManager.getNodeServiceMap(config.namespace, clusterRefs, config.deployment); for (const networkNodeServices of config.serviceMap.values()) { config.existingNodeAliases.push(networkNodeServices.nodeAlias); } @@ -919,25 +924,21 @@ export class NodeCommandTasks { const zipFile = config.stateFile; self.logger.debug(`zip file: ${zipFile}`); for (const nodeAlias of ctx.config.nodeAliases) { + const context = helpers.extractContextFromConsensusNodes(nodeAlias, config.consensusNodes); + const k8 = this.k8Factory.getK8(context); const podRef = ctx.config.podRefs[nodeAlias]; const containerRef = ContainerRef.of(podRef, constants.ROOT_CONTAINER); self.logger.debug(`Uploading state files to pod ${podRef.name}`); - await self.k8Factory - .default() - .containers() - .readByRef(containerRef) - .copyTo(zipFile, `${constants.HEDERA_HAPI_PATH}/data`); + await k8.containers().readByRef(containerRef).copyTo(zipFile, `${constants.HEDERA_HAPI_PATH}/data`); self.logger.info( `Deleting the previous state files in pod ${podRef.name} directory ${constants.HEDERA_HAPI_PATH}/data/saved`, ); - await self.k8Factory - .default() + await k8 .containers() .readByRef(containerRef) .execContainer(['rm', '-rf', `${constants.HEDERA_HAPI_PATH}/data/saved/*`]); - await self.k8Factory - .default() + await k8 .containers() .readByRef(containerRef) .execContainer([ @@ -986,7 +987,11 @@ export class NodeCommandTasks { populateServiceMap() { return new Task('Populate serviceMap', async (ctx: any, task: ListrTaskWrapper) => { - ctx.config.serviceMap = await this.accountManager.getNodeServiceMap(ctx.config.namespace); + ctx.config.serviceMap = await this.accountManager.getNodeServiceMap( + ctx.config.namespace, + this.parent.getClusterRefs(), + ctx.config.deployment, + ); ctx.config.podRefs[ctx.config.nodeAlias] = PodRef.of( ctx.config.namespace, ctx.config.serviceMap.get(ctx.config.nodeAlias).nodePodName, @@ -1032,7 +1037,12 @@ export class NodeCommandTasks { } private async generateNodeOverridesJson(namespace: NamespaceName, nodeAliases: NodeAliases, stagingDir: string) { - const networkNodeServiceMap = await this.accountManager.getNodeServiceMap(namespace); + const deploymentName = this.configManager.getFlag(flags.deployment); + const networkNodeServiceMap = await this.accountManager.getNodeServiceMap( + namespace, + this.parent.getClusterRefs(), + deploymentName, + ); const nodeOverridesModel = new NodeOverridesModel(nodeAliases, networkNodeServiceMap); @@ -1054,7 +1064,12 @@ export class NodeCommandTasks { keysDir: string, stagingDir: string, ) { - const networkNodeServiceMap = await this.accountManager.getNodeServiceMap(namespace); + const deploymentName = this.configManager.getFlag(flags.deployment); + const networkNodeServiceMap = await this.accountManager.getNodeServiceMap( + namespace, + this.parent.getClusterRefs(), + deploymentName, + ); const adminPublicKeys = splitFlagInput(this.configManager.getFlag(flags.adminPublicKeys)); const genesisNetworkData = await GenesisNetworkDataConstructor.initialize( @@ -1102,9 +1117,7 @@ export class NodeCommandTasks { return new Task('Starting nodes', (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; const nodeAliases = config[nodeAliasesProperty]; - const subTasks = []; - // ctx.config.allNodeAliases = ctx.config.existingNodeAliases for (const nodeAlias of nodeAliases) { const podRef = config.podRefs[nodeAlias]; @@ -1112,11 +1125,9 @@ export class NodeCommandTasks { subTasks.push({ title: `Start node: ${chalk.yellow(nodeAlias)}`, task: async () => { - await this.k8Factory - .default() - .containers() - .readByRef(containerRef) - .execContainer(['systemctl', 'restart', 'network-node']); + const context = helpers.extractContextFromConsensusNodes(nodeAlias, config.consensusNodes); + const k8 = this.k8Factory.getK8(context); + await k8.containers().readByRef(containerRef).execContainer(['systemctl', 'restart', 'network-node']); }, }); } @@ -1209,7 +1220,12 @@ export class NodeCommandTasks { } } - config.nodeClient = await self.accountManager.refreshNodeClient(config.namespace, skipNodeAlias); + config.nodeClient = await self.accountManager.refreshNodeClient( + config.namespace, + skipNodeAlias, + this.parent.getClusterRefs(), + this.configManager.getFlag(flags.deployment), + ); // send some write transactions to invoke the handler that will trigger the stake weight recalculate for (const nodeAlias of accountMap.keys()) { @@ -1254,7 +1270,12 @@ export class NodeCommandTasks { stakeNewNode() { const self = this; return new Task('Stake new node', async (ctx: any, task: ListrTaskWrapper) => { - await self.accountManager.refreshNodeClient(ctx.config.namespace, ctx.config.nodeAlias); + await self.accountManager.refreshNodeClient( + ctx.config.namespace, + ctx.config.nodeAlias, + this.parent.getClusterRefs(), + this.configManager.getFlag(flags.deployment), + ); await this._addStake(ctx.config.namespace, ctx.newNode.accountId, ctx.config.nodeAlias); }); } @@ -1487,7 +1508,12 @@ export class NodeCommandTasks { self.logger.info(`nodeId: ${nodeId}, config.newAccountNumber: ${config.newAccountNumber}`); if (config.existingNodeAliases.length > 1) { - config.nodeClient = await self.accountManager.refreshNodeClient(config.namespace, config.nodeAlias); + config.nodeClient = await self.accountManager.refreshNodeClient( + config.namespace, + config.nodeAlias, + this.parent.getClusterRefs(), + this.configManager.getFlag(flags.deployment), + ); } try { @@ -1592,7 +1618,11 @@ export class NodeCommandTasks { const config = ctx.config; if (!config.serviceMap) { - config.serviceMap = await self.accountManager.getNodeServiceMap(config.namespace); + config.serviceMap = await self.accountManager.getNodeServiceMap( + config.namespace, + this.parent.getClusterRefs(), + config.deployment, + ); } let maxNodeId = 0; @@ -1714,15 +1744,24 @@ export class NodeCommandTasks { 'Kill nodes to pick up updated configMaps', async (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; + const clusterRefs = this.parent.getClusterRefs(); // the updated node will have a new pod ID if its account ID changed which is a label - config.serviceMap = await this.accountManager.getNodeServiceMap(config.namespace); + config.serviceMap = await this.accountManager.getNodeServiceMap( + config.namespace, + clusterRefs, + config.deployment, + ); for (const service of config.serviceMap.values()) { await this.k8Factory.default().pods().readByRef(PodRef.of(config.namespace, service.nodePodName)).killPod(); } // again, the pod names will change after the pods are killed - config.serviceMap = await this.accountManager.getNodeServiceMap(config.namespace); + config.serviceMap = await this.accountManager.getNodeServiceMap( + config.namespace, + clusterRefs, + config.deployment, + ); config.podRefs = {}; for (const service of config.serviceMap.values()) { diff --git a/src/commands/relay.ts b/src/commands/relay.ts index 5e704b44e..b3186ca51 100644 --- a/src/commands/relay.ts +++ b/src/commands/relay.ts @@ -147,8 +147,12 @@ export class RelayCommand extends BaseCommand { const networkIds = {}; const accountMap = getNodeAccountMap(nodeAliases); - - const networkNodeServicesMap = await this.accountManager.getNodeServiceMap(namespace); + const deploymentName = this.configManager.getFlag(flags.deployment); + const networkNodeServicesMap = await this.accountManager.getNodeServiceMap( + namespace, + this.getClusterRefs(), + deploymentName, + ); nodeAliases.forEach(nodeAlias => { const haProxyClusterIp = networkNodeServicesMap.get(nodeAlias).haProxyClusterIp; const haProxyGrpcPort = networkNodeServicesMap.get(nodeAlias).haProxyGrpcPort; @@ -242,7 +246,11 @@ export class RelayCommand extends BaseCommand { constants.JSON_RPC_RELAY_CHART, constants.JSON_RPC_RELAY_CHART, ); - await self.accountManager.loadNodeClient(ctx.config.namespace); + await self.accountManager.loadNodeClient( + ctx.config.namespace, + self.getClusterRefs(), + self.configManager.getFlag(flags.deployment), + ); config.valuesArg = await self.prepareValuesArg( config.valuesFile, config.nodeAliases, diff --git a/src/core/account_manager.ts b/src/core/account_manager.ts index 241b22ac0..6e79a8a14 100644 --- a/src/core/account_manager.ts +++ b/src/core/account_manager.ts @@ -41,6 +41,9 @@ import {PodRef} from './kube/resources/pod/pod_ref.js'; import {SecretType} from './kube/resources/secret/secret_type.js'; import {type V1Pod} from '@kubernetes/client-node'; import {InjectTokens} from './dependency_injection/inject_tokens.js'; +import {type ClusterRef, type DeploymentName, type ClusterRefs} from './config/remote/types.js'; +import {type Service} from './kube/resources/service/service.js'; +import {SoloService} from './model/solo_service.js'; const REASON_FAILED_TO_GET_KEYS = 'failed to get keys for accountId'; const REASON_SKIPPED = 'skipped since it does not have a genesis key'; @@ -160,8 +163,10 @@ export class AccountManager { /** * loads and initializes the Node Client * @param namespace - the namespace of the network + * @param clusterRefs + * @param deployment */ - async loadNodeClient(namespace: NamespaceName) { + async loadNodeClient(namespace: NamespaceName, clusterRefs?: ClusterRefs, deployment?: DeploymentName) { try { this.logger.debug( `loading node client: [!this._nodeClient=${!this._nodeClient}, this._nodeClient.isClientShutDown=${this._nodeClient?.isClientShutDown}]`, @@ -170,7 +175,7 @@ export class AccountManager { this.logger.debug( `refreshing node client: [!this._nodeClient=${!this._nodeClient}, this._nodeClient.isClientShutDown=${this._nodeClient?.isClientShutDown}]`, ); - await this.refreshNodeClient(namespace); + await this.refreshNodeClient(namespace, undefined, clusterRefs, deployment); } else { try { if (!constants.SKIP_NODE_PING) { @@ -178,7 +183,7 @@ export class AccountManager { } } catch { this.logger.debug('node client ping failed, refreshing node client'); - await this.refreshNodeClient(namespace); + await this.refreshNodeClient(namespace, undefined, clusterRefs, deployment); } } @@ -195,11 +200,16 @@ export class AccountManager { * @param namespace - the namespace of the network * @param skipNodeAlias - the node alias to skip */ - async refreshNodeClient(namespace: NamespaceName, skipNodeAlias?: NodeAlias) { + async refreshNodeClient( + namespace: NamespaceName, + skipNodeAlias?: NodeAlias, + clusterRefs?: ClusterRefs, + deployment?: DeploymentName, + ) { try { await this.close(); const treasuryAccountInfo = await this.getTreasuryAccountKeys(namespace); - const networkNodeServicesMap = await this.getNodeServiceMap(namespace); + const networkNodeServicesMap = await this.getNodeServiceMap(namespace, clusterRefs, deployment); this._nodeClient = await this._getNodeClient( namespace, @@ -424,19 +434,29 @@ export class AccountManager { /** * Gets a Map of the Hedera node services and the attributes needed, throws a SoloError if anything fails * @param namespace - the namespace of the solo network deployment + * @param clusterRefs - the cluster references to use + * @param deployment - the deployment to use * @returns a map of the network node services */ - async getNodeServiceMap(namespace: NamespaceName) { + async getNodeServiceMap(namespace: NamespaceName, clusterRefs?: ClusterRefs, deployment?: string) { const labelSelector = 'solo.hedera.com/node-name'; const serviceBuilderMap = new Map(); try { - const serviceList = await this.k8Factory.default().services().list(namespace, [labelSelector]); + const services: SoloService[] = []; + for (const [clusterRef, context] of Object.entries(clusterRefs)) { + const serviceList: Service[] = await this.k8Factory.getK8(context).services().list(namespace, [labelSelector]); + services.push( + ...serviceList.map(service => SoloService.getFromK8Service(service, clusterRef, context, deployment)), + ); + } - let nodeId = '0'; // retrieve the list of services and build custom objects for the attributes we need - for (const service of serviceList) { + for (const service of services) { + let nodeId; + const clusterRef = service.clusterRef; + let serviceBuilder = new NetworkNodeServicesBuilder( service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias, ); @@ -448,6 +468,9 @@ export class AccountManager { service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias, ); serviceBuilder.withNamespace(namespace); + serviceBuilder.withClusterRef(clusterRef); + serviceBuilder.withContext(clusterRefs[clusterRef]); + serviceBuilder.withDeployment(deployment); } const serviceType = service.metadata.labels['solo.hedera.com/type']; @@ -493,7 +516,6 @@ export class AccountManager { } serviceBuilder - .withNodeId(nodeId) .withAccountId(service.metadata!.labels!['solo.hedera.com/account-id']) .withNodeServiceName(service.metadata!.name as string) .withNodeServiceClusterIp(service.spec!.clusterIP as string) @@ -503,6 +525,8 @@ export class AccountManager { .withNodeServiceGossipPort(service.spec!.ports!.filter(port => port.name === 'gossip')[0].port) .withNodeServiceGrpcPort(service.spec!.ports!.filter(port => port.name === 'grpc-non-tls')[0].port) .withNodeServiceGrpcsPort(service.spec!.ports!.filter(port => port.name === 'grpc-tls')[0].port); + + if (nodeId) serviceBuilder.withNodeId(nodeId); break; } serviceBuilderMap.set(serviceBuilder.key(), serviceBuilder); @@ -511,27 +535,29 @@ export class AccountManager { // get the pod name for the service to use with portForward if needed for (const serviceBuilder of serviceBuilderMap.values()) { const podList: V1Pod[] = await this.k8Factory - .default() + .getK8(serviceBuilder.context) .pods() .list(namespace, [`app=${serviceBuilder.haProxyAppSelector}`]); serviceBuilder.withHaProxyPodName(PodName.of(podList[0].metadata.name)); } - // get the pod name of the network node - const pods: V1Pod[] = await this.k8Factory - .default() - .pods() - .list(namespace, ['solo.hedera.com/type=network-node']); - for (const pod of pods) { - // eslint-disable-next-line no-prototype-builtins - if (!pod.metadata?.labels?.hasOwnProperty('solo.hedera.com/node-name')) { - // TODO Review why this fixes issue - continue; + for (const [clusterRef, context] of Object.entries(clusterRefs)) { + // get the pod name of the network node + const pods: V1Pod[] = await this.k8Factory + .getK8(context) + .pods() + .list(namespace, ['solo.hedera.com/type=network-node']); + for (const pod of pods) { + // eslint-disable-next-line no-prototype-builtins + if (!pod.metadata?.labels?.hasOwnProperty('solo.hedera.com/node-name')) { + // TODO Review why this fixes issue + continue; + } + const podName = PodName.of(pod.metadata!.name); + const nodeAlias = pod.metadata!.labels!['solo.hedera.com/node-name'] as NodeAlias; + const serviceBuilder = serviceBuilderMap.get(nodeAlias) as NetworkNodeServicesBuilder; + serviceBuilder.withNodePodName(podName); } - const podName = PodName.of(pod.metadata!.name); - const nodeAlias = pod.metadata!.labels!['solo.hedera.com/node-name'] as NodeAlias; - const serviceBuilder = serviceBuilderMap.get(nodeAlias) as NetworkNodeServicesBuilder; - serviceBuilder.withNodePodName(podName); } const serviceMap = new Map(); @@ -928,8 +954,13 @@ export class AccountManager { return Base64.encode(addressBookBytes); } - async getFileContents(namespace: NamespaceName, fileNum: number) { - await this.loadNodeClient(namespace); + async getFileContents( + namespace: NamespaceName, + fileNum: number, + clusterRefs?: ClusterRefs, + deployment?: DeploymentName, + ) { + await this.loadNodeClient(namespace, clusterRefs, deployment); const client = this._nodeClient; const fileId = FileId.fromString(`0.0.${fileNum}`); const queryFees = new FileContentsQuery().setFileId(fileId); diff --git a/src/core/helpers.ts b/src/core/helpers.ts index 8eb2e5c37..f1fc21f08 100644 --- a/src/core/helpers.ts +++ b/src/core/helpers.ts @@ -19,6 +19,8 @@ import {type NodeAddConfigClass} from '../commands/node/node_add_config.js'; import {type ConsensusNode} from './model/consensus_node.js'; import {type Optional} from '../types/index.js'; import {type NamespaceName} from './kube/resources/namespace/namespace_name.js'; +import {type K8Factory} from './kube/k8_factory.js'; +import {type Context} from './config/remote/types.js'; export function getInternalIp(releaseVersion: semver.SemVer, namespaceName: NamespaceName, nodeAlias: NodeAlias) { //? Explanation: for v0.59.x the internal IP address is set to 127.0.0.1 to avoid an ISS diff --git a/src/core/model/consensus_node.ts b/src/core/model/consensus_node.ts index b6b1e6f8e..41193e40a 100644 --- a/src/core/model/consensus_node.ts +++ b/src/core/model/consensus_node.ts @@ -1,11 +1,10 @@ /** * SPDX-License-Identifier: Apache-2.0 */ -import {type NodeAlias} from '../../types/aliases.js'; export class ConsensusNode { constructor( - public readonly name: NodeAlias, + public readonly name: string, public readonly nodeId: number, public readonly namespace: string, public readonly cluster: string, diff --git a/src/core/model/solo_service.ts b/src/core/model/solo_service.ts new file mode 100644 index 000000000..543e6f61a --- /dev/null +++ b/src/core/model/solo_service.ts @@ -0,0 +1,31 @@ +/** + * SPDX-License-Identifier: Apache-2.0 + */ +import {type ClusterRef, type DeploymentName} from '../config/remote/types.js'; +import {type ObjectMeta} from '../kube/resources/object_meta.js'; +import {type ServiceSpec} from '../kube/resources/service/service_spec.js'; +import {type ServiceStatus} from '../kube/resources/service/service_status.js'; +import {type Service} from '../kube/resources/service/service.js'; +import {K8ClientService} from '../kube/k8_client/resources/service/k8_client_service.js'; + +export class SoloService extends K8ClientService { + constructor( + public readonly metadata: ObjectMeta, + public readonly spec: ServiceSpec, + public readonly status?: ServiceStatus, + public readonly clusterRef?: ClusterRef, + public readonly context?: string, + public readonly deployment?: string, + ) { + super(metadata, spec, status); + } + + public static getFromK8Service( + service: Service, + clusterRef: ClusterRef, + context: string, + deployment: DeploymentName, + ) { + return new SoloService(service.metadata, service.spec, service.status, clusterRef, context, deployment); + } +} diff --git a/src/core/network_node_services.ts b/src/core/network_node_services.ts index 19793fba7..288762c31 100644 --- a/src/core/network_node_services.ts +++ b/src/core/network_node_services.ts @@ -5,8 +5,12 @@ import {type NodeAlias} from '../types/aliases.js'; import {type PodName} from './kube/resources/pod/pod_name.js'; import {type NamespaceName} from './kube/resources/namespace/namespace_name.js'; +import {type ClusterRef, type Context, type DeploymentName} from './config/remote/types.js'; export class NetworkNodeServices { + public readonly clusterRef?: ClusterRef; + public readonly context?: Context; + public readonly deployment?: DeploymentName; public readonly nodeAlias: NodeAlias; public readonly namespace: NamespaceName; public readonly nodeId: string | number; @@ -31,6 +35,9 @@ export class NetworkNodeServices { public readonly envoyProxyGrpcWebPort: number; constructor(builder: NetworkNodeServicesBuilder) { + this.clusterRef = builder.clusterRef; + this.context = builder.context; + this.deployment = builder.deployment; this.nodeAlias = builder.nodeAlias; this.namespace = builder.namespace; this.nodeId = builder.nodeId; @@ -62,6 +69,9 @@ export class NetworkNodeServices { export class NetworkNodeServicesBuilder { public namespace?: NamespaceName; + public clusterRef?: ClusterRef; + public context?: Context; + public deployment?: DeploymentName; public nodeId?: string | number; public haProxyName?: string; public accountId?: string; @@ -91,6 +101,21 @@ export class NetworkNodeServicesBuilder { return this; } + withClusterRef(clusterRef: ClusterRef) { + this.clusterRef = clusterRef; + return this; + } + + withContext(context: Context) { + this.context = context; + return this; + } + + withDeployment(deployment: DeploymentName) { + this.deployment = deployment; + return this; + } + withNodeId(nodeId: string | number) { this.nodeId = nodeId; return this; diff --git a/test/e2e/commands/account.test.ts b/test/e2e/commands/account.test.ts index 639cd414a..17417f9b0 100644 --- a/test/e2e/commands/account.test.ts +++ b/test/e2e/commands/account.test.ts @@ -103,7 +103,8 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin before(async function () { this.timeout(Duration.ofSeconds(20).toMillis()); - await accountManager.loadNodeClient(namespace); + const clusterRefs = accountCmd.getClusterRefs(); + await accountManager.loadNodeClient(namespace, clusterRefs, argv[flags.deployment.name]); }); after(async function () { @@ -280,7 +281,8 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin `${accountId.realm}.${accountId.shard}.${ecdsaPrivateKey.publicKey.toEvmAddress()}`, ); - await accountManager.loadNodeClient(namespace); + const clusterRefs = accountCmd.getClusterRefs(); + await accountManager.loadNodeClient(namespace, clusterRefs, argv[flags.deployment.name]); const accountAliasInfo = await accountManager.accountInfoQuery(newAccountInfo.accountAlias); expect(accountAliasInfo).not.to.be.null; } catch (e) { @@ -306,7 +308,8 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin it('Create new account', async () => { try { - await accountManager.loadNodeClient(namespace); + const clusterRefs = accountCmd.getClusterRefs(); + await accountManager.loadNodeClient(namespace, clusterRefs, argv[flags.deployment.name]); const privateKey = PrivateKey.generate(); const amount = 100; diff --git a/test/e2e/commands/mirror_node.test.ts b/test/e2e/commands/mirror_node.test.ts index 20be958bb..1a3f4839e 100644 --- a/test/e2e/commands/mirror_node.test.ts +++ b/test/e2e/commands/mirror_node.test.ts @@ -28,6 +28,7 @@ import {type NetworkNodes} from '../../../src/core/network_nodes.js'; import {container} from 'tsyringe-neo'; import {type V1Pod} from '@kubernetes/client-node'; import {InjectTokens} from '../../../src/core/dependency_injection/inject_tokens.js'; +import {type ClusterRefs} from '../../../src/core/config/remote/types.js'; const testName = 'mirror-cmd-e2e'; const namespace = NamespaceName.of(testName); @@ -112,7 +113,8 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin }).timeout(Duration.ofMinutes(10).toMillis()); it('mirror node API should be running', async () => { - await accountManager.loadNodeClient(namespace); + const clusterRefs: ClusterRefs = mirrorNodeCmd.getClusterRefs(); + await accountManager.loadNodeClient(namespace, clusterRefs, argv[flags.deployment.name]); try { // find hedera explorer pod const pods: V1Pod[] = await k8Factory diff --git a/test/e2e/commands/node_local_hedera.test.ts b/test/e2e/commands/node_local_hedera.test.ts index 80904897f..2ab8ce194 100644 --- a/test/e2e/commands/node_local_hedera.test.ts +++ b/test/e2e/commands/node_local_hedera.test.ts @@ -20,6 +20,7 @@ import {NamespaceName} from '../../../src/core/kube/resources/namespace/namespac import {type NetworkNodes} from '../../../src/core/network_nodes.js'; import {container} from 'tsyringe-neo'; import {InjectTokens} from '../../../src/core/dependency_injection/inject_tokens.js'; +import {type ClusterRefs} from '../../../src/core/config/remote/types.js'; const namespace = NamespaceName.of('local-hedera-app'); const argv = getDefaultArgv(namespace); @@ -62,7 +63,8 @@ e2eTestSuite( it('save the state and restart the node with saved state', async () => { // create an account so later we can verify its balance after restart - await accountManager.loadNodeClient(namespace); + const clusterRefs: ClusterRefs = nodeCmd.getClusterRefs(); + await accountManager.loadNodeClient(namespace, clusterRefs, argv[flags.deployment.name]); const privateKey = PrivateKey.generate(); // get random integer between 100 and 1000 const amount = Math.floor(Math.random() * (1000 - 100) + 100); @@ -93,7 +95,7 @@ e2eTestSuite( await nodeCmd.handlers.start(argv); // check balance of accountInfo.accountId - await accountManager.loadNodeClient(namespace); + await accountManager.loadNodeClient(namespace, clusterRefs, argv[flags.deployment.name]); const balance = await new AccountBalanceQuery() .setAccountId(accountInfo.accountId) .execute(accountManager._nodeClient); diff --git a/test/e2e/commands/node_update.test.ts b/test/e2e/commands/node_update.test.ts index b63ac97b6..09b8cbb56 100644 --- a/test/e2e/commands/node_update.test.ts +++ b/test/e2e/commands/node_update.test.ts @@ -76,7 +76,11 @@ e2eTestSuite( }); it('cache current version of private keys', async () => { - existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); + existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap( + namespace, + nodeCmd.getClusterRefs(), + argv[flags.deployment.name], + ); existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( existingServiceMap, k8Factory, diff --git a/test/e2e/commands/separate_node_add.test.ts b/test/e2e/commands/separate_node_add.test.ts index b60e8ef36..c8b8b0af2 100644 --- a/test/e2e/commands/separate_node_add.test.ts +++ b/test/e2e/commands/separate_node_add.test.ts @@ -78,7 +78,11 @@ e2eTestSuite( it('cache current version of private keys', async () => { // @ts-ignore - existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap(namespace); + existingServiceMap = await nodeCmd.accountManager.getNodeServiceMap( + namespace, + nodeCmd.getClusterRefs(), + argv[flags.deployment.name], + ); existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( existingServiceMap, k8Factory, diff --git a/test/e2e/commands/separate_node_update.test.ts b/test/e2e/commands/separate_node_update.test.ts index 928aa3f43..b7c6774a9 100644 --- a/test/e2e/commands/separate_node_update.test.ts +++ b/test/e2e/commands/separate_node_update.test.ts @@ -76,7 +76,11 @@ e2eTestSuite( }); it('cache current version of private keys', async () => { - existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); + existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap( + namespace, + nodeCmd.getClusterRefs(), + argv[flags.deployment.name], + ); existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( existingServiceMap, k8Factory, diff --git a/test/e2e/commands/separate_node_upgrade.test.ts b/test/e2e/commands/separate_node_upgrade.test.ts index f453eceb7..a82613c72 100644 --- a/test/e2e/commands/separate_node_upgrade.test.ts +++ b/test/e2e/commands/separate_node_upgrade.test.ts @@ -90,7 +90,6 @@ e2eTestSuite( flags.localBuildPath.constName, flags.force.constName, 'nodeClient', - 'consensusNodes', 'contexts', ]); }).timeout(Duration.ofMinutes(5).toMillis()); diff --git a/test/test_add.ts b/test/test_add.ts index b75ab8d90..93d047469 100644 --- a/test/test_add.ts +++ b/test/test_add.ts @@ -78,7 +78,11 @@ export function testNodeAdd( }); it('cache current version of private keys', async () => { - existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap(namespace); + existingServiceMap = await bootstrapResp.opts.accountManager.getNodeServiceMap( + namespace, + nodeCmd.getClusterRefs(), + argv[flags.deployment.name], + ); existingNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( existingServiceMap, k8Factory, diff --git a/test/test_util.ts b/test/test_util.ts index 01165d7ac..263d9fdc2 100644 --- a/test/test_util.ts +++ b/test/test_util.ts @@ -366,8 +366,14 @@ export function balanceQueryShouldSucceed( ) { it('Balance query should succeed', async () => { try { + const argv = getDefaultArgv(namespace); expect(accountManager._nodeClient).to.be.null; - await accountManager.refreshNodeClient(namespace, skipNodeAlias); + await accountManager.refreshNodeClient( + namespace, + skipNodeAlias, + cmd.getClusterRefs(), + argv[flags.deployment.name], + ); expect(accountManager._nodeClient).not.to.be.null; const balance = await new AccountBalanceQuery() @@ -391,7 +397,13 @@ export function accountCreationShouldSucceed( ) { it('Account creation should succeed', async () => { try { - await accountManager.refreshNodeClient(namespace, skipNodeAlias); + const argv = getDefaultArgv(namespace); + await accountManager.refreshNodeClient( + namespace, + skipNodeAlias, + nodeCmd.getClusterRefs(), + argv[flags.deployment.name], + ); expect(accountManager._nodeClient).not.to.be.null; const privateKey = PrivateKey.generate(); const amount = 100; diff --git a/test/unit/commands/base.test.ts b/test/unit/commands/base.test.ts index 8fa787c97..64587c922 100644 --- a/test/unit/commands/base.test.ts +++ b/test/unit/commands/base.test.ts @@ -19,7 +19,7 @@ import {resetForTest} from '../../test_container.js'; import {InjectTokens} from '../../../src/core/dependency_injection/inject_tokens.js'; import {ComponentsDataWrapper} from '../../../src/core/config/remote/components_data_wrapper.js'; import {createComponentsDataWrapper} from '../core/config/remote/components_data_wrapper.test.js'; -import {type ClusterRefs} from '../../../src/core/config/remote/types.js'; +import {type ClusterRefs, type ClusterRef} from '../../../src/core/config/remote/types.js'; import {Cluster} from '../../../src/core/config/remote/cluster.js'; describe('BaseCommand', () => {