Skip to content

Commit

Permalink
fix: replace old kubectl with kubectl2 in chart command
Browse files Browse the repository at this point in the history
Signed-off-by: Lenin Mehedy <lenin.mehedy@swirldslabs.com>
  • Loading branch information
leninmehedy committed Jan 19, 2024
1 parent f24f9d3 commit 1415260
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 22 deletions.
2 changes: 2 additions & 0 deletions fullstack-network-manager/src/commands/base.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ export class BaseCommand extends ShellRunner {
if (!opts || !opts.logger) throw new Error('An instance of core/Logger is required')
if (!opts || !opts.helm) throw new Error('An instance of core/Helm is required')
if (!opts || !opts.kubectl) throw new Error('An instance of core/Kubectl is required')
if (!opts || !opts.kubectl2) throw new Error('An instance of core/Kubectl2 is required')
if (!opts || !opts.chartManager) throw new Error('An instance of core/ChartManager is required')
if (!opts || !opts.configManager) throw new Error('An instance of core/ConfigManager is required')
if (!opts || !opts.depManager) throw new Error('An instance of core/DependencyManager is required')
Expand All @@ -29,6 +30,7 @@ export class BaseCommand extends ShellRunner {

this.helm = opts.helm
this.kubectl = opts.kubectl
this.kubectl2 = opts.kubectl2
this.chartManager = opts.chartManager
this.configManager = opts.configManager
this.depManager = opts.depManager
Expand Down
25 changes: 12 additions & 13 deletions fullstack-network-manager/src/commands/chart.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ export class ChartCommand extends BaseCommand {
async prepareConfig (task, argv) {
this.configManager.load(argv)
const namespace = this.configManager.flagValue(flags.namespace)
const nodeIds = this.configManager.flagValue(flags.nodeIDs)
const chartDir = this.configManager.flagValue(flags.chartDirectory)
const valuesFile = this.configManager.flagValue(flags.valuesFile)
const deployMirrorNode = this.configManager.flagValue(flags.deployMirrorNode)
Expand All @@ -74,6 +75,7 @@ export class ChartCommand extends BaseCommand {
// prompt if values are missing and create a config object
const config = {
namespace: await prompts.promptNamespaceArg(task, namespace),
nodeIds: await prompts.promptNodeIdsArg(task, nodeIds),
chartDir: await prompts.promptChartDir(task, chartDir),
valuesFile: await prompts.promptChartDir(task, valuesFile),
deployMirrorNode: await prompts.promptDeployMirrorNode(task, deployMirrorNode),
Expand All @@ -84,7 +86,7 @@ export class ChartCommand extends BaseCommand {
enableHederaExplorerTls: await prompts.promptEnableHederaExplorerTls(task, enableHederaExplorerTls),
acmeClusterIssuer: await prompts.promptAcmeClusterIssuer(task, acmeClusterIssuer),
selfSignedClusterIssuer: await prompts.promptSelfSignedClusterIssuer(task, selfSignedClusterIssuer),
timeout: '900s',
timeout: 900,
version: this.configManager.getVersion()
}

Expand Down Expand Up @@ -124,12 +126,10 @@ export class ChartCommand extends BaseCommand {
{
title: 'Waiting for network pods to be ready',
task: async (ctx, _) => {
const timeout = ctx.config.timeout || '900s'
await this.kubectl.wait('pod',
'--for=jsonpath=\'{.status.phase}\'=Running',
'-l fullstack.hedera.com/type=network-node',
`--timeout=${timeout}`
)
const timeout = ctx.config.timeout || 900
await this.kubectl2.waitForPod(constants.POD_STATUS_RUNNING, [
'fullstack.hedera.com/type=network-node'
], ctx.config.nodeIds.length, timeout * 1000, 1000)
}
}
], {
Expand Down Expand Up @@ -203,12 +203,10 @@ export class ChartCommand extends BaseCommand {
{
title: 'Waiting for network pods to be ready',
task: async (ctx, _) => {
const timeout = ctx.config.timeout || '900s'
await this.kubectl.wait('pod',
'--for=jsonpath=\'{.status.phase}\'=Running',
'-l fullstack.hedera.com/type=network-node',
`--timeout=${timeout}`
)
const timeout = ctx.config.timeout || 900
await this.kubectl2.waitForPod(constants.POD_STATUS_RUNNING, [
'fullstack.hedera.com/type=network-node'
], timeout)
}
}
], {
Expand Down Expand Up @@ -237,6 +235,7 @@ export class ChartCommand extends BaseCommand {
builder: y => {
flags.setCommandFlags(y,
flags.namespace,
flags.nodeIDs,
flags.deployMirrorNode,
flags.deployHederaExplorer,
flags.deployJsonRpcRelay,
Expand Down
27 changes: 18 additions & 9 deletions fullstack-network-manager/src/core/kubectl2.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ export class Kubectl2 {
init () {
this.kubeConfig = new k8s.KubeConfig()
this.kubeConfig.loadFromDefault()

if (!this.kubeConfig.getCurrentContext()) throw new FullstackTestingError('No active context!')
if (!this.kubeConfig.getCurrentCluster()) throw new FullstackTestingError('No active cluster!')

this.kubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api)
this.kubeCopy = new k8s.Cp(this.kubeConfig)
}
Expand Down Expand Up @@ -440,22 +444,27 @@ export class Kubectl2 {
* Wait for pod
* @param status phase of the pod
* @param labels pod labels
* @param timeoutSeconds timeout in seconds
* @param podCount number of pod expected
* @param timeout timeout in milliseconds
* @param delay delay between checks in milliseconds
* @return {Promise<boolean>}
*/
async waitForPod (status = 'Running', labels = [], timeoutSeconds = 1) {
async waitForPod (status = 'Running', labels = [], podCount = 1, timeout = 1000, delay = 200) {
const ns = this._getNamespace()
const fieldSelector = `status.phase=${status}`
const labelSelector = labels.join(',')

const delay = 200
const maxAttempts = Math.round(timeoutSeconds * 1000 / delay)
if (maxAttempts <= 0) {
throw new FullstackTestingError(`invalid timeoutSeconds '${timeoutSeconds}'. maxAttempts calculated to be negative or zero`)
timeout = Number.parseInt(`${timeout}`)
if (timeout <= 0 || timeout < delay) {
throw new FullstackTestingError(`invalid timeout '${timeout}' and delay '${delay}'`)
}

const maxAttempts = Math.round(timeout / delay)
this.logger.debug(`WaitForPod [${fieldSelector}, ${labelSelector}], maxAttempts: ${maxAttempts}`)

// wait for the pod to be available with the given status and labels
for (let attempts = 0; attempts < maxAttempts; attempts++) {
this.logger.debug(`Checking for pod ${fieldSelector}, ${labelSelector} [attempt: ${attempts}/${maxAttempts}]`)
const resp = await this.kubeClient.listNamespacedPod(
ns,
false,
Expand All @@ -465,15 +474,15 @@ export class Kubectl2 {
labelSelector
)

const found = resp.body && resp.body.items && resp.body.items.length
if (found) {
if (resp.body && resp.body.items && resp.body.items.length === podCount) {
this.logger.debug(`Found ${resp.body.items.length} pod with ${fieldSelector}, ${labelSelector} [attempt: ${attempts}/${maxAttempts}]`)
return true
}

await sleep(delay)
}

throw new FullstackTestingError('pod not found')
throw new FullstackTestingError(`Expected number of pod (${podCount}) not found ${fieldSelector} ${labelSelector} [maxAttempts = ${maxAttempts}]`)
}

_getNamespace () {
Expand Down
3 changes: 3 additions & 0 deletions fullstack-network-manager/src/index.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import {
logging
} from './core/index.mjs'
import 'dotenv/config'
import { Kubectl2 } from './core/kubectl2.mjs'

export function main (argv) {
const logger = logging.NewLogger('debug')
Expand All @@ -28,12 +29,14 @@ export function main (argv) {
const configManager = new ConfigManager(logger)
const depManager = new DependencyManager(logger)
const clusterManager = new ClusterManager(kind, kubectl)
const kubectl2 = new Kubectl2(configManager, logger)

const opts = {
logger,
kind,
helm,
kubectl,
kubectl2,
downloader,
platformInstaller,
chartManager,
Expand Down

0 comments on commit 1415260

Please sign in to comment.