diff --git a/fullstack-network-manager/src/commands/base.mjs b/fullstack-network-manager/src/commands/base.mjs index a35ff4dd7..0a3846b6b 100644 --- a/fullstack-network-manager/src/commands/base.mjs +++ b/fullstack-network-manager/src/commands/base.mjs @@ -20,6 +20,7 @@ export class BaseCommand extends ShellRunner { if (!opts || !opts.logger) throw new Error('An instance of core/Logger is required') if (!opts || !opts.helm) throw new Error('An instance of core/Helm is required') if (!opts || !opts.kubectl) throw new Error('An instance of core/Kubectl is required') + if (!opts || !opts.kubectl2) throw new Error('An instance of core/Kubectl2 is required') if (!opts || !opts.chartManager) throw new Error('An instance of core/ChartManager is required') if (!opts || !opts.configManager) throw new Error('An instance of core/ConfigManager is required') if (!opts || !opts.depManager) throw new Error('An instance of core/DependencyManager is required') @@ -29,6 +30,7 @@ export class BaseCommand extends ShellRunner { this.helm = opts.helm this.kubectl = opts.kubectl + this.kubectl2 = opts.kubectl2 this.chartManager = opts.chartManager this.configManager = opts.configManager this.depManager = opts.depManager diff --git a/fullstack-network-manager/src/commands/chart.mjs b/fullstack-network-manager/src/commands/chart.mjs index 663f94ba6..0f1de3f1a 100644 --- a/fullstack-network-manager/src/commands/chart.mjs +++ b/fullstack-network-manager/src/commands/chart.mjs @@ -60,6 +60,7 @@ export class ChartCommand extends BaseCommand { async prepareConfig (task, argv) { this.configManager.load(argv) const namespace = this.configManager.flagValue(flags.namespace) + const nodeIds = this.configManager.flagValue(flags.nodeIDs) const chartDir = this.configManager.flagValue(flags.chartDirectory) const valuesFile = this.configManager.flagValue(flags.valuesFile) const deployMirrorNode = this.configManager.flagValue(flags.deployMirrorNode) @@ -74,6 +75,7 @@ export class ChartCommand extends BaseCommand { // prompt if values are missing and create a config object const config = { namespace: await prompts.promptNamespaceArg(task, namespace), + nodeIds: await prompts.promptNodeIdsArg(task, nodeIds), chartDir: await prompts.promptChartDir(task, chartDir), valuesFile: await prompts.promptChartDir(task, valuesFile), deployMirrorNode: await prompts.promptDeployMirrorNode(task, deployMirrorNode), @@ -84,7 +86,7 @@ export class ChartCommand extends BaseCommand { enableHederaExplorerTls: await prompts.promptEnableHederaExplorerTls(task, enableHederaExplorerTls), acmeClusterIssuer: await prompts.promptAcmeClusterIssuer(task, acmeClusterIssuer), selfSignedClusterIssuer: await prompts.promptSelfSignedClusterIssuer(task, selfSignedClusterIssuer), - timeout: '900s', + timeout: 900, version: this.configManager.getVersion() } @@ -124,12 +126,10 @@ export class ChartCommand extends BaseCommand { { title: 'Waiting for network pods to be ready', task: async (ctx, _) => { - const timeout = ctx.config.timeout || '900s' - await this.kubectl.wait('pod', - '--for=jsonpath=\'{.status.phase}\'=Running', - '-l fullstack.hedera.com/type=network-node', - `--timeout=${timeout}` - ) + const timeout = ctx.config.timeout || 900 + await this.kubectl2.waitForPod(constants.POD_STATUS_RUNNING, [ + 'fullstack.hedera.com/type=network-node' + ], ctx.config.nodeIds.length, timeout * 1000, 1000) } } ], { @@ -203,12 +203,10 @@ export class ChartCommand extends BaseCommand { { title: 'Waiting for network pods to be ready', task: async (ctx, _) => { - const timeout = ctx.config.timeout || '900s' - await this.kubectl.wait('pod', - '--for=jsonpath=\'{.status.phase}\'=Running', - '-l fullstack.hedera.com/type=network-node', - `--timeout=${timeout}` - ) + const timeout = ctx.config.timeout || 900 + await this.kubectl2.waitForPod(constants.POD_STATUS_RUNNING, [ + 'fullstack.hedera.com/type=network-node' + ], timeout) } } ], { @@ -237,6 +235,7 @@ export class ChartCommand extends BaseCommand { builder: y => { flags.setCommandFlags(y, flags.namespace, + flags.nodeIDs, flags.deployMirrorNode, flags.deployHederaExplorer, flags.deployJsonRpcRelay, diff --git a/fullstack-network-manager/src/core/kubectl2.mjs b/fullstack-network-manager/src/core/kubectl2.mjs index 6a98db665..6091174cf 100644 --- a/fullstack-network-manager/src/core/kubectl2.mjs +++ b/fullstack-network-manager/src/core/kubectl2.mjs @@ -24,6 +24,10 @@ export class Kubectl2 { init () { this.kubeConfig = new k8s.KubeConfig() this.kubeConfig.loadFromDefault() + + if (!this.kubeConfig.getCurrentContext()) throw new FullstackTestingError('No active context!') + if (!this.kubeConfig.getCurrentCluster()) throw new FullstackTestingError('No active cluster!') + this.kubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api) this.kubeCopy = new k8s.Cp(this.kubeConfig) } @@ -440,22 +444,27 @@ export class Kubectl2 { * Wait for pod * @param status phase of the pod * @param labels pod labels - * @param timeoutSeconds timeout in seconds + * @param podCount number of pod expected + * @param timeout timeout in milliseconds + * @param delay delay between checks in milliseconds * @return {Promise} */ - async waitForPod (status = 'Running', labels = [], timeoutSeconds = 1) { + async waitForPod (status = 'Running', labels = [], podCount = 1, timeout = 1000, delay = 200) { const ns = this._getNamespace() const fieldSelector = `status.phase=${status}` const labelSelector = labels.join(',') - const delay = 200 - const maxAttempts = Math.round(timeoutSeconds * 1000 / delay) - if (maxAttempts <= 0) { - throw new FullstackTestingError(`invalid timeoutSeconds '${timeoutSeconds}'. maxAttempts calculated to be negative or zero`) + timeout = Number.parseInt(`${timeout}`) + if (timeout <= 0 || timeout < delay) { + throw new FullstackTestingError(`invalid timeout '${timeout}' and delay '${delay}'`) } + const maxAttempts = Math.round(timeout / delay) + this.logger.debug(`WaitForPod [${fieldSelector}, ${labelSelector}], maxAttempts: ${maxAttempts}`) + // wait for the pod to be available with the given status and labels for (let attempts = 0; attempts < maxAttempts; attempts++) { + this.logger.debug(`Checking for pod ${fieldSelector}, ${labelSelector} [attempt: ${attempts}/${maxAttempts}]`) const resp = await this.kubeClient.listNamespacedPod( ns, false, @@ -465,15 +474,15 @@ export class Kubectl2 { labelSelector ) - const found = resp.body && resp.body.items && resp.body.items.length - if (found) { + if (resp.body && resp.body.items && resp.body.items.length === podCount) { + this.logger.debug(`Found ${resp.body.items.length} pod with ${fieldSelector}, ${labelSelector} [attempt: ${attempts}/${maxAttempts}]`) return true } await sleep(delay) } - throw new FullstackTestingError('pod not found') + throw new FullstackTestingError(`Expected number of pod (${podCount}) not found ${fieldSelector} ${labelSelector} [maxAttempts = ${maxAttempts}]`) } _getNamespace () { diff --git a/fullstack-network-manager/src/index.mjs b/fullstack-network-manager/src/index.mjs index aaadf7d7e..865628447 100644 --- a/fullstack-network-manager/src/index.mjs +++ b/fullstack-network-manager/src/index.mjs @@ -16,6 +16,7 @@ import { logging } from './core/index.mjs' import 'dotenv/config' +import { Kubectl2 } from './core/kubectl2.mjs' export function main (argv) { const logger = logging.NewLogger('debug') @@ -28,12 +29,14 @@ export function main (argv) { const configManager = new ConfigManager(logger) const depManager = new DependencyManager(logger) const clusterManager = new ClusterManager(kind, kubectl) + const kubectl2 = new Kubectl2(configManager, logger) const opts = { logger, kind, helm, kubectl, + kubectl2, downloader, platformInstaller, chartManager,