diff --git a/src/commands/cluster/tasks.ts b/src/commands/cluster/tasks.ts index 224b91de9..9ac2f23d1 100644 --- a/src/commands/cluster/tasks.ts +++ b/src/commands/cluster/tasks.ts @@ -29,7 +29,6 @@ import {SoloError} from '../../core/errors.js'; import {RemoteConfigManager} from '../../core/config/remote/remote_config_manager.js'; import type {RemoteConfigDataWrapper} from '../../core/config/remote/remote_config_data_wrapper.js'; import type {K8} from '../../core/k8.js'; -import type {Cluster} from '@kubernetes/client-node/dist/config_types.js'; import type {SoloListrTask, SoloListrTaskWrapper} from '../../types/index.js'; import type {SelectClusterContextContext} from './configs.js'; import type {Namespace} from '../../core/config/remote/types.js'; @@ -72,7 +71,7 @@ export class ClusterCommandTasks { validateRemoteConfigForCluster( cluster: string, - currentCluster: Cluster, + currentClusterName: string, localConfig: LocalConfig, currentRemoteConfig: RemoteConfigDataWrapper, ) { @@ -84,7 +83,7 @@ export class ClusterCommandTasks { self.parent.getK8().setCurrentContext(context); const remoteConfigFromOtherCluster = await self.parent.getRemoteConfigManager().get(); if (!RemoteConfigManager.compare(currentRemoteConfig, remoteConfigFromOtherCluster)) { - throw new SoloError(ErrorMessages.REMOTE_CONFIGS_DO_NOT_MATCH(currentCluster.name, cluster)); + throw new SoloError(ErrorMessages.REMOTE_CONFIGS_DO_NOT_MATCH(currentClusterName, cluster)); } }, }; @@ -96,7 +95,6 @@ export class ClusterCommandTasks { title: 'Read clusters from remote config', task: async (ctx, task) => { const localConfig = this.parent.getLocalConfig(); - const currentCluster = this.parent.getK8().getCurrentCluster(); const currentClusterName = this.parent.getK8().getCurrentClusterName(); const currentRemoteConfig: RemoteConfigDataWrapper = await this.parent.getRemoteConfigManager().get(); const subTasks = []; @@ -110,7 +108,9 @@ export class ClusterCommandTasks { // Pull and validate RemoteConfigs from the other clusters for (const cluster of otherRemoteConfigClusters) { - subTasks.push(self.validateRemoteConfigForCluster(cluster, currentCluster, localConfig, currentRemoteConfig)); + subTasks.push( + self.validateRemoteConfigForCluster(cluster, currentClusterName, localConfig, currentRemoteConfig), + ); } return task.newListr(subTasks, { @@ -196,12 +196,8 @@ export class ClusterCommandTasks { } private async promptForContext(task: SoloListrTaskWrapper, cluster: string) { - const kubeContexts = this.parent.getK8().getContexts(); - return flags.context.prompt( - task, - kubeContexts.map(c => c.name), - cluster, - ); + const kubeContexts = this.parent.getK8().getContextNames(); + return flags.context.prompt(task, kubeContexts, cluster); } private async selectContextForFirstCluster( @@ -366,8 +362,8 @@ export class ClusterCommandTasks { getClusterInfo() { return new Task('Get cluster info', async (ctx: any, task: ListrTaskWrapper) => { try { - const cluster = this.parent.getK8().getCurrentCluster(); - this.parent.logger.showJSON(`Cluster Information (${cluster.name})`, cluster); + const clusterName = this.parent.getK8().getCurrentClusterName(); + this.parent.logger.showUser(`Cluster Name (${clusterName})`); this.parent.logger.showUser('\n'); } catch (e: Error | unknown) { this.parent.logger.showUserError(e); diff --git a/src/commands/deployment.ts b/src/commands/deployment.ts index 2f7c888f3..139dcc7e7 100644 --- a/src/commands/deployment.ts +++ b/src/commands/deployment.ts @@ -53,17 +53,18 @@ export class DeploymentCommand extends BaseCommand { private async create(argv: any): Promise { const self = this; - interface Config { + interface DeploymentCreateConfig { context: string; namespace: Namespace; contextClusterUnparsed: string; contextCluster: ContextClusterStructure; } - interface Context { - config: Config; + + interface DeploymentCreateContext { + config: DeploymentCreateConfig; } - const tasks = new Listr( + const tasks = new Listr( [ { title: 'Initialize', @@ -80,7 +81,7 @@ export class DeploymentCommand extends BaseCommand { ctx.config = { contextClusterUnparsed: self.configManager.getFlag(flags.contextClusterUnparsed), namespace: self.configManager.getFlag(flags.namespace), - } as Config; + } as DeploymentCreateConfig; ctx.config.contextCluster = Templates.parseContextCluster(ctx.config.contextClusterUnparsed); @@ -110,7 +111,7 @@ export class DeploymentCommand extends BaseCommand { { title: 'Validate cluster connections', task: async (ctx, task) => { - const subTasks: SoloListrTask[] = []; + const subTasks: SoloListrTask[] = []; for (const context of Object.keys(ctx.config.contextCluster)) { const cluster = ctx.config.contextCluster[context]; diff --git a/src/commands/explorer.ts b/src/commands/explorer.ts index 366355c04..c7e24c480 100644 --- a/src/commands/explorer.ts +++ b/src/commands/explorer.ts @@ -21,7 +21,7 @@ import * as constants from '../core/constants.js'; import {type ProfileManager} from '../core/profile_manager.js'; import {BaseCommand} from './base.js'; import {Flags as flags} from './flags.js'; -import {RemoteConfigTasks} from '../core/config/remote/remote_config_tasks.js'; +import {ListrRemoteConfig} from '../core/config/remote/listr_config_tasks.js'; import {type CommandBuilder} from '../types/aliases.js'; import {type Opts} from '../types/command_types.js'; import {ListrLease} from '../core/lease/listr_lease.js'; @@ -191,7 +191,7 @@ export class ExplorerCommand extends BaseCommand { return ListrLease.newAcquireLeaseTask(lease, task); }, }, - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), + ListrRemoteConfig.loadRemoteConfig.bind(this)(argv), { title: 'Upgrade solo-setup chart', task: async ctx => { @@ -355,7 +355,7 @@ export class ExplorerCommand extends BaseCommand { return ListrLease.newAcquireLeaseTask(lease, task); }, }, - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), + ListrRemoteConfig.loadRemoteConfig.bind(this)(argv), { title: 'Destroy explorer', task: async ctx => { diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index d7a60335a..e20ab0800 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -55,11 +55,18 @@ interface MirrorNodeDeployConfigClass { storageBucket: string; } -interface Context { +interface MirrorNodeDeployContext { config: MirrorNodeDeployConfigClass; addressBook: string; } +interface MirrorNodeDestroyContext { + config: { + namespace: string; + isChartInstalled: boolean; + }; +} + export class MirrorNodeCommand extends BaseCommand { private readonly accountManager: AccountManager; private readonly profileManager: ProfileManager; @@ -142,7 +149,7 @@ export class MirrorNodeCommand extends BaseCommand { const self = this; const lease = await self.leaseManager.create(); - const tasks = new Listr( + const tasks = new Listr( [ { title: 'Initialize', @@ -222,7 +229,7 @@ export class MirrorNodeCommand extends BaseCommand { { title: 'Enable mirror-node', task: (_, parentTask) => { - return parentTask.newListr( + return parentTask.newListr( [ { title: 'Prepare address book', @@ -425,14 +432,7 @@ export class MirrorNodeCommand extends BaseCommand { const self = this; const lease = await self.leaseManager.create(); - interface Context { - config: { - namespace: string; - isChartInstalled: boolean; - }; - } - - const tasks = new Listr( + const tasks = new Listr( [ { title: 'Initialize', @@ -570,7 +570,7 @@ export class MirrorNodeCommand extends BaseCommand { } /** Removes the mirror node components from remote config. */ - public removeMirrorNodeComponents(): SoloListrTask { + public removeMirrorNodeComponents(): SoloListrTask { return { title: 'Remove mirror node from remote config', skip: (): boolean => !this.remoteConfigManager.isLoaded(), @@ -583,7 +583,7 @@ export class MirrorNodeCommand extends BaseCommand { } /** Adds the mirror node components to remote config. */ - public addMirrorNodeComponents(): SoloListrTask<{config: {namespace: Namespace}}> { + public addMirrorNodeComponents(): SoloListrTask<{config: MirrorNodeDeployConfigClass; addressBook: string}> { return { title: 'Add mirror node to remote config', skip: (): boolean => !this.remoteConfigManager.isLoaded(), diff --git a/src/commands/network.ts b/src/commands/network.ts index 2aead3e95..465e6aa33 100644 --- a/src/commands/network.ts +++ b/src/commands/network.ts @@ -81,6 +81,10 @@ export interface NetworkDeployConfigClass { googleCredential: string; } +interface NetworkDeployContext { + config: NetworkDeployConfigClass; +} + export class NetworkCommand extends BaseCommand { private readonly keyManager: KeyManager; private readonly platformInstaller: PlatformInstaller; @@ -495,11 +499,7 @@ export class NetworkCommand extends BaseCommand { const self = this; const lease = await self.leaseManager.create(); - interface Context { - config: NetworkDeployConfigClass; - } - - const tasks = new Listr( + const tasks = new Listr( [ { title: 'Initialize', @@ -952,7 +952,7 @@ export class NetworkCommand extends BaseCommand { } /** Adds the consensus node, envoy and haproxy components to remote config. */ - public addNodesAndProxies(): SoloListrTask<{config: {namespace: Namespace; nodeAliases: NodeAliases}}> { + public addNodesAndProxies(): SoloListrTask { return { title: 'Add node and proxies to remote config', skip: (): boolean => !this.remoteConfigManager.isLoaded(), diff --git a/src/core/config/local_config.ts b/src/core/config/local_config.ts index e9b465e93..b7b39ca0f 100644 --- a/src/core/config/local_config.ts +++ b/src/core/config/local_config.ts @@ -208,12 +208,8 @@ export class LocalConfig implements LocalConfigData { if (!isQuiet) { const promptedContexts: string[] = []; for (const cluster of parsedClusters) { - const kubeContexts = k8.getContexts(); - const context: string = await flags.context.prompt( - task, - kubeContexts.map(c => c.name), - cluster, - ); + const kubeContexts = k8.getContextNames(); + const context: string = await flags.context.prompt(task, kubeContexts, cluster); self.clusterContextMapping[cluster] = context; promptedContexts.push(context); diff --git a/src/core/config/remote/remote_config_manager.ts b/src/core/config/remote/remote_config_manager.ts index a905dd8b9..477182c6b 100644 --- a/src/core/config/remote/remote_config_manager.ts +++ b/src/core/config/remote/remote_config_manager.ts @@ -29,18 +29,13 @@ import {SoloLogger} from '../../logging.js'; import {ConfigManager} from '../../config_manager.js'; import {LocalConfig} from '../local_config.js'; import type {DeploymentStructure} from '../local_config_data.js'; -import {type ContextClusterStructure} from '../../../types/config_types.js'; -import {type EmptyContextConfig, type Optional, type SoloListrTask} from '../../../types/index.js'; +import {type Optional} from '../../../types/index.js'; import type * as k8s from '@kubernetes/client-node'; import {StatusCodes} from 'http-status-codes'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from '../../container_helper.js'; import {ErrorMessages} from '../../error_messages.js'; -interface ListrContext { - config: {contextCluster: ContextClusterStructure}; -} - /** * Uses Kubernetes ConfigMaps to manage the remote configuration data by creating, loading, modifying, * and saving the configuration data to and from a Kubernetes cluster. diff --git a/src/core/k8.ts b/src/core/k8.ts index 6d0fc23fb..d611109eb 100644 --- a/src/core/k8.ts +++ b/src/core/k8.ts @@ -15,40 +15,33 @@ * */ import * as k8s from '@kubernetes/client-node'; +import {type Context, type V1Lease, V1ObjectMeta, type V1Pod, V1Secret} from '@kubernetes/client-node'; import fs from 'fs'; import net from 'net'; import os from 'os'; import path from 'path'; import {Flags as flags} from '../commands/flags.js'; -import {SoloError, IllegalArgumentError, MissingArgumentError} from './errors.js'; +import {IllegalArgumentError, MissingArgumentError, SoloError} from './errors.js'; import * as tar from 'tar'; import {v4 as uuid4} from 'uuid'; -import {type V1Lease, V1ObjectMeta, V1Secret, type Context, type V1Pod} from '@kubernetes/client-node'; import * as stream from 'node:stream'; import type * as http from 'node:http'; import type * as WebSocket from 'ws'; import {getReasonPhrase, StatusCodes} from 'http-status-codes'; import {sleep} from './helpers.js'; import * as constants from './constants.js'; +import {HEDERA_HAPI_PATH, ROOT_CONTAINER, SOLO_LOGS_DIR} from './constants.js'; import {ConfigManager} from './config_manager.js'; import {SoloLogger} from './logging.js'; import {type PodName, type TarCreateFilter} from '../types/aliases.js'; import type {ExtendedNetServer, LocalContextObject, Optional} from '../types/index.js'; -import {HEDERA_HAPI_PATH, ROOT_CONTAINER, SOLO_LOGS_DIR} from './constants.js'; import {Duration} from './time/duration.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; import type {Namespace} from './config/remote/types.js'; -import {type Cluster} from '@kubernetes/client-node/dist/config_types.js'; - -interface TDirectoryData { - directory: boolean; - owner: string; - group: string; - size: string; - modifiedAt: string; - name: string; -} +import type TK8 from './kube/tk8.js'; +import type TDirectoryData from './kube/t_directory_data.js'; +import {type Namespaces} from './kube/namespaces.js'; /** * A kubernetes API wrapper class providing custom functionalities required by solo @@ -56,9 +49,10 @@ interface TDirectoryData { * Note: Take care if the same instance is used for parallel execution, as the behaviour may be unpredictable. * For parallel execution, create separate instances by invoking clone() */ +// TODO rename to K8Client and move to kube folder @injectable() -export class K8 { - private _cachedContexts: Context[]; +export class K8 implements TK8 { + private cachedContexts: Context[]; static PodReadyCondition = new Map().set( constants.POD_CONDITION_READY, @@ -71,15 +65,25 @@ export class K8 { constructor( @inject(ConfigManager) private readonly configManager?: ConfigManager, - @inject(SoloLogger) public readonly logger?: SoloLogger, + @inject(SoloLogger) private readonly logger?: SoloLogger, ) { this.configManager = patchInject(configManager, ConfigManager, this.constructor.name); this.logger = patchInject(logger, SoloLogger, this.constructor.name); this.init(); - } - - init() { + // const k8 : K8 (interface) = K8Factory.getK8('cluster-123'); + // k8.pod.execInstance(....); + // TODO how do we want to do Pod/K8Pod? to specify the pod? team is leaning towards just providing podName as the + // - first parameter versus a new instance. Is there another good option? + // pod = new K8Pod(kubeConfig, kubeClient, pods, ...); + // private pod: Pod = new K8Pod(kubeConfig, kubeClient, pods : Pods (interface), namespaces : Namespaces (this is interface), ...); + // now in pod.... + // need to list namespaces + // this(pod).namespaces.list(); + } + + // TODO make private, but first we need to require a cluster to be set and address the test cases using this + init(): TK8 { this.kubeConfig = new k8s.KubeConfig(); this.kubeConfig.loadFromDefault(); @@ -98,13 +102,17 @@ export class K8 { return this; // to enable chaining } + namespaces(): Namespaces { + return null; + } + /** * Apply filters to metadata * @param items - list of items * @param [filters] - an object with metadata fields and value * @returns a list of items that match the filters */ - applyMetadataFilter(items: (object | any)[], filters: Record = {}) { + private applyMetadataFilter(items: (object | any)[], filters: Record = {}) { if (!filters) throw new MissingArgumentError('filters are required'); const matched = []; @@ -135,7 +143,7 @@ export class K8 { * @param items - list of items * @param [filters] - an object with metadata fields and value */ - filterItem(items: (object | any)[], filters: Record = {}) { + private filterItem(items: (object | any)[], filters: Record = {}) { const filtered = this.applyMetadataFilter(items, filters); if (filtered.length > 1) throw new SoloError('multiple items found with filters', {filters}); return filtered[0]; @@ -154,6 +162,7 @@ export class K8 { const resp = await this.kubeClient.createNamespace(payload); return resp.response.statusCode === StatusCodes.CREATED; + // return this.namespaces().create(name); } /** @@ -194,7 +203,7 @@ export class K8 { * @param name - podName name */ async getPodByName(name: string): Promise { - const ns = this._getNamespace(); + const ns = this.getNamespace(); const fieldSelector = `metadata.name=${name}`; const resp = await this.kubeClient.listNamespacedPod( ns, @@ -218,7 +227,7 @@ export class K8 { * @param labels - list of labels */ async getPodsByLabel(labels: string[] = []) { - const ns = this._getNamespace(); + const ns = this.getNamespace(); const labelSelector = labels.join(','); const result = await this.kubeClient.listNamespacedPod( ns, @@ -242,7 +251,7 @@ export class K8 { * @param labels - list of labels */ async getSecretsByLabel(labels: string[] = []) { - const ns = this._getNamespace(); + const ns = this.getNamespace(); const labelSelector = labels.join(','); const result = await this.kubeClient.listNamespacedSecret( ns, @@ -277,7 +286,7 @@ export class K8 { * @param name - svc name */ async getSvcByName(name: string): Promise { - const ns = this._getNamespace(); + const ns = this.getNamespace(); const fieldSelector = `metadata.name=${name}`; const resp = await this.kubeClient.listNamespacedService( ns, @@ -296,20 +305,6 @@ export class K8 { return this.filterItem(resp.body.items, {name}); } - /** - * Get cluster IP of a service - * @param svcName - name of the service - * @returns cluster IP - */ - async getClusterIP(svcName: string) { - const svc = await this.getSvcByName(svcName); - if (svc && svc.spec && svc.spec.clusterIP) { - return svc.spec.clusterIP; - } - - throw new SoloError(`unable to find cluster IP for svc: ${svcName}`); - } - /** * Get a list of clusters * @returns a list of cluster names @@ -337,12 +332,12 @@ export class K8 { return contexts; } - getContexts(): Context[] { - if (!this._cachedContexts) { - this._cachedContexts = this.kubeConfig.getContexts(); + private getContexts(): Context[] { + if (!this.cachedContexts) { + this.cachedContexts = this.kubeConfig.getContexts(); } - return this._cachedContexts; + return this.cachedContexts; } /** @@ -365,6 +360,12 @@ export class K8 { * @returns a promise that returns array of directory entries, custom object */ async listDir(podName: PodName, containerName: string, destPath: string) { + // TODO ask Nathan about should podName just be the first parameter in the function versus creating an instance of K8Pod and supplying it in the constructor? + // - flyweight pattern, just pass the podName: https://chatgpt.com/share/67927ea7-d614-8006-b059-4947e8ac90ca + // return pod(podName).listDir(containerName, destPath); + // below implementation moves to K8Pod class + // current usage would still compile. + // future refactor to remove this K8.listDir method and update its usage from k8.listDir to k8.pod(podName).listDir(...) try { const output = (await this.execContainer(podName, containerName, ['ls', '-la', destPath])) as string; if (!output) return []; @@ -474,7 +475,7 @@ export class K8 { return this.execContainer(podName, containerName, ['bash', '-c', 'mkdir -p "' + destPath + '"']); } - exitWithError(localContext: LocalContextObject, errorMessage: string) { + private exitWithError(localContext: LocalContextObject, errorMessage: string) { localContext.errorMessage = localContext.errorMessage ? `${localContext.errorMessage}:${errorMessage}` : errorMessage; @@ -482,24 +483,28 @@ export class K8 { return localContext.reject(new SoloError(localContext.errorMessage)); } - handleCallback(status: string, localContext: LocalContextObject, messagePrefix: string) { + private handleCallback(status: string, localContext: LocalContextObject, messagePrefix: string) { if (status === 'Failure') { return this.exitWithError(localContext, `${messagePrefix} Failure occurred`); } this.logger.debug(`${messagePrefix} callback(status)=${status}`); } - registerConnectionOnError(localContext: LocalContextObject, messagePrefix: string, conn: WebSocket.WebSocket) { + private registerConnectionOnError( + localContext: LocalContextObject, + messagePrefix: string, + conn: WebSocket.WebSocket, + ) { conn.on('error', e => { return this.exitWithError(localContext, `${messagePrefix} failed, connection error: ${e.message}`); }); } - registerConnectionOnMessage(messagePrefix: string) { + private registerConnectionOnMessage(messagePrefix: string) { this.logger.debug(`${messagePrefix} received message`); } - registerErrorStreamOnData(localContext: LocalContextObject, stream: stream.PassThrough) { + private registerErrorStreamOnData(localContext: LocalContextObject, stream: stream.PassThrough) { stream.on('data', data => { localContext.errorMessage = localContext.errorMessage ? `${localContext.errorMessage}${data.toString()}` @@ -507,7 +512,7 @@ export class K8 { }); } - registerErrorStreamOnError( + private registerErrorStreamOnError( localContext: LocalContextObject, messagePrefix: string, stream: stream.PassThrough | fs.WriteStream, @@ -517,7 +522,7 @@ export class K8 { }); } - registerOutputPassthroughStreamOnData( + private registerOutputPassthroughStreamOnData( localContext: LocalContextObject, messagePrefix: string, outputPassthroughStream: stream.PassThrough, @@ -533,7 +538,7 @@ export class K8 { }); } - registerOutputFileStreamOnDrain( + private registerOutputFileStreamOnDrain( localContext: LocalContextObject, messagePrefix: string, outputPassthroughStream: stream.PassThrough, @@ -565,7 +570,7 @@ export class K8 { filter: TarCreateFilter | undefined = undefined, ) { const self = this; - const namespace = this._getNamespace(); + const namespace = this.getNamespace(); const guid = uuid4(); const messagePrefix = `copyTo[${podName},${guid}]: `; @@ -587,7 +592,7 @@ export class K8 { const srcDir = path.dirname(srcPath); // Create a temporary tar file for the source file - const tmpFile = self._tempFileFor(srcFile); + const tmpFile = self.tempFileFor(srcFile); await tar.c({file: tmpFile, cwd: srcDir, filter}, [srcFile]); @@ -630,7 +635,7 @@ export class K8 { // Cleanup temp file after successful copy inputPassthroughStream.end(); // End the passthrough stream - self._deleteTempFile(tmpFile); // Cleanup temp file + self.deleteTempFile(tmpFile); // Cleanup temp file self.logger.info(`${messagePrefix} Successfully copied!`); return resolve(true); }); @@ -659,7 +664,7 @@ export class K8 { */ async copyFrom(podName: PodName, containerName: string, srcPath: string, destDir: string) { const self = this; - const namespace = self._getNamespace(); + const namespace = self.getNamespace(); const guid = uuid4(); const messagePrefix = `copyFrom[${podName},${guid}]: `; @@ -698,7 +703,7 @@ export class K8 { const destPath = path.join(destDir, srcFile); // download the tar file to a temp location - const tmpFile = self._tempFileFor(srcFile); + const tmpFile = self.tempFileFor(srcFile); return new Promise((resolve, reject) => { localContext.reject = reject; @@ -732,7 +737,7 @@ export class K8 { false, ({status}) => { if (status === 'Failure') { - self._deleteTempFile(tmpFile); + self.deleteTempFile(tmpFile); return self.exitWithError(localContext, `${messagePrefix} Failure occurred`); } self.logger.debug(`${messagePrefix} callback(status)=${status}`); @@ -742,7 +747,7 @@ export class K8 { localContext.connection = conn; conn.on('error', e => { - self._deleteTempFile(tmpFile); + self.deleteTempFile(tmpFile); return self.exitWithError(localContext, `${messagePrefix} failed, connection error: ${e.message}`); }); @@ -760,7 +765,7 @@ export class K8 { try { fs.copyFileSync(tmpFile, destPath); - self._deleteTempFile(tmpFile); + self.deleteTempFile(tmpFile); const stat = fs.statSync(destPath); if (stat && stat.size === srcFileSize) { @@ -799,7 +804,7 @@ export class K8 { */ async execContainer(podName: string, containerName: string, command: string | string[]) { const self = this; - const namespace = self._getNamespace(); + const namespace = self.getNamespace(); const guid = uuid4(); const messagePrefix = `execContainer[${podName},${guid}]:`; @@ -816,7 +821,7 @@ export class K8 { const localContext = {} as LocalContextObject; localContext.reject = reject; const execInstance = new k8s.Exec(self.kubeConfig); - const tmpFile = self._tempFileFor(`${podName}-output.txt`); + const tmpFile = self.tempFileFor(`${podName}-output.txt`); const outputFileStream = fs.createWriteStream(tmpFile); const outputPassthroughStream = new stream.PassThrough({highWaterMark: 10 * 1024 * 1024}); const errPassthroughStream = new stream.PassThrough(); @@ -885,7 +890,7 @@ export class K8 { async portForward(podName: PodName, localPort: number, podPort: number) { try { this.logger.debug(`Creating port-forwarder for ${podName}:${podPort} -> ${constants.LOCAL_HOST}:${localPort}`); - const ns = this._getNamespace(); + const ns = this.getNamespace(); const forwarder = new k8s.PortForward(this.kubeConfig, false); const server = (await net.createServer(socket => { forwarder.portForward(ns, podName, [podPort], socket, null, socket, 3); @@ -1009,7 +1014,7 @@ export class K8 { podItemPredicate?: (items: k8s.V1Pod) => boolean, namespace?: string, ): Promise { - const ns = namespace || this._getNamespace(); + const ns = namespace || this.getNamespace(); const labelSelector = labels.join(','); this.logger.info(`WaitForPod [labelSelector: ${labelSelector}, namespace:${ns}, maxAttempts: ${maxAttempts}]`); @@ -1210,7 +1215,8 @@ export class K8 { // --------------------------------------- Utility Methods --------------------------------------- // - public async testClusterConnection(context: string, cluster: string): Promise { + // TODO this can be removed once K8 is context/cluster specific when instantiating + async testClusterConnection(context: string, cluster: string): Promise { this.kubeConfig.setCurrentContext(context); const tempKubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api); @@ -1322,8 +1328,8 @@ export class K8 { * @returns the configmap if found * @throws SoloError - if the response if not found or the response is not OK */ - public async getNamespacedConfigMap(name: string): Promise { - const {response, body} = await this.kubeClient.readNamespacedConfigMap(name, this._getNamespace()).catch(e => e); + async getNamespacedConfigMap(name: string): Promise { + const {response, body} = await this.kubeClient.readNamespacedConfigMap(name, this.getNamespace()).catch(e => e); this.handleKubernetesClientError(response, body, 'Failed to get namespaced configmap'); @@ -1335,12 +1341,12 @@ export class K8 { * @param labels - for the config metadata * @param data - to contain in the config */ - public async createNamespacedConfigMap( + async createNamespacedConfigMap( name: string, labels: Record, data: Record, ): Promise { - const namespace = this._getNamespace(); + const namespace = this.getNamespace(); const configMap = new k8s.V1ConfigMap(); configMap.data = data; @@ -1367,12 +1373,12 @@ export class K8 { * @param labels - for the config metadata * @param data - to contain in the config */ - public async replaceNamespacedConfigMap( + async replaceNamespacedConfigMap( name: string, labels: Record, data: Record, ): Promise { - const namespace = this._getNamespace(); + const namespace = this.getNamespace(); const configMap = new k8s.V1ConfigMap(); configMap.data = data; @@ -1394,7 +1400,7 @@ export class K8 { } } - public async deleteNamespacedConfigMap(name: string, namespace: string): Promise { + async deleteNamespacedConfigMap(name: string, namespace: string): Promise { try { const resp = await this.kubeClient.deleteNamespacedConfigMap(name, namespace); @@ -1487,7 +1493,7 @@ export class K8 { * Check if cert-manager is installed inside any namespace. * @returns if cert-manager is found */ - public async isCertManagerInstalled(): Promise { + async isCertManagerInstalled(): Promise { try { const pods = await this.kubeClient.listPodForAllNamespaces(undefined, undefined, undefined, 'app=cert-manager'); @@ -1503,7 +1509,7 @@ export class K8 { * Check if minio is installed inside the namespace. * @returns if minio is found */ - public async isMinioInstalled(namespace: Namespace): Promise { + async isMinioInstalled(namespace: Namespace): Promise { try { // TODO DETECT THE OPERATOR const pods = await this.kubeClient.listNamespacedPod( @@ -1527,7 +1533,7 @@ export class K8 { * Check if the ingress controller is installed inside any namespace. * @returns if ingress controller is found */ - public async isIngressControllerInstalled(): Promise { + async isIngressControllerInstalled(): Promise { try { const response = await this.networkingApi.listIngressClass(); @@ -1539,7 +1545,7 @@ export class K8 { } } - public async isRemoteConfigPresentInAnyNamespace() { + async isRemoteConfigPresentInAnyNamespace() { try { const configmaps = await this.kubeClient.listConfigMapForAllNamespaces( undefined, @@ -1556,7 +1562,7 @@ export class K8 { } } - public async isPrometheusInstalled(namespace: Namespace) { + async isPrometheusInstalled(namespace: Namespace) { try { const pods = await this.kubeClient.listNamespacedPod( namespace, @@ -1598,18 +1604,18 @@ export class K8 { throw new SoloError(errorMessage, errorMessage, {statusCode: statusCode}); } - private _getNamespace(): Namespace { + private getNamespace(): Namespace { const ns = this.configManager.getFlag(flags.namespace); if (!ns) throw new MissingArgumentError('namespace is not set'); return ns; } - private _tempFileFor(fileName: string) { + private tempFileFor(fileName: string) { const tmpFile = `${fileName}-${uuid4()}`; return path.join(os.tmpdir(), tmpFile); } - private _deleteTempFile(tmpFile: string) { + private deleteTempFile(tmpFile: string) { if (fs.existsSync(tmpFile)) { fs.rmSync(tmpFile); } @@ -1718,7 +1724,7 @@ export class K8 { return await Promise.all(promises); } - async getNodeState(pod: V1Pod, namespace: string) { + private async getNodeState(pod: V1Pod, namespace: string) { const podName = pod.metadata!.name as PodName; this.logger.debug(`getNodeState(${pod.metadata.name}): begin...`); const targetDir = path.join(SOLO_LOGS_DIR, namespace); @@ -1736,7 +1742,8 @@ export class K8 { this.logger.debug(`getNodeState(${pod.metadata.name}): ...end`); } - public setCurrentContext(context: string) { + // TODO make private once we are instantiating multiple K8 instances + setCurrentContext(context: string) { this.kubeConfig.setCurrentContext(context); // Reinitialize clients @@ -1744,19 +1751,15 @@ export class K8 { this.coordinationApiClient = this.kubeConfig.makeApiClient(k8s.CoordinationV1Api); } - public getCurrentContext(): string { + getCurrentContext(): string { return this.kubeConfig.getCurrentContext(); } - public getCurrentContextObject(): Context { - return this.kubeConfig.getContextObject(this.getCurrentContext()); - } - - public getCurrentCluster(): Cluster { - return this.kubeConfig.getCurrentCluster(); + getCurrentContextNamespace(): Namespace { + return this.kubeConfig.getContextObject(this.getCurrentContext()).namespace; } - public getCurrentClusterName(): string { + getCurrentClusterName(): string { const currentCluster = this.kubeConfig.getCurrentCluster(); if (!currentCluster) return ''; return currentCluster.name; diff --git a/src/core/kube/check.ts b/src/core/kube/check.ts new file mode 100644 index 000000000..abd39e616 --- /dev/null +++ b/src/core/kube/check.ts @@ -0,0 +1,25 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import {type Namespace} from '../config/remote/types.js'; + +export default interface check { + isCertManagerInstalled(): Promise; + isIngressControllerInstalled(): Promise; + isMinioInstalled(namespace: Namespace): Promise; + isPrometheusInstalled(namespace: Namespace): Promise; + isRemoteConfigPresentInAnyNamespace(): Promise; +} diff --git a/src/core/kube/cluster.ts b/src/core/kube/cluster.ts new file mode 100644 index 000000000..b2d30dbed --- /dev/null +++ b/src/core/kube/cluster.ts @@ -0,0 +1,20 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +export default interface Cluster { + list(): Promise; + getCurrent(): Promise; +} diff --git a/src/core/kube/context.ts b/src/core/kube/context.ts new file mode 100644 index 000000000..a08282790 --- /dev/null +++ b/src/core/kube/context.ts @@ -0,0 +1,23 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +export default interface Context { + list(): Promise; + getCurrent(): Promise; + getCurrentNamespace(): Promise; + // TODO delete this once we are instantiating multiple K8 instances + setCurrent(): Promise; +} diff --git a/src/core/kube/namespaces.ts b/src/core/kube/namespaces.ts new file mode 100644 index 000000000..4bb8f40a6 --- /dev/null +++ b/src/core/kube/namespaces.ts @@ -0,0 +1,30 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +export interface Namespaces { + // TODO what should the name be if want to create multiple namespaces (theoretical and bad example), create multiple, should it be creates? createNamespaces? or what? createList? createMultiple? + // - TypeScript allows overloading at the interface level, but not at the implementation level, we would need to check the type if doing that + create(names: string): Promise; + // create(names: string[]): Promise; // overloading example, have multiple interfaces, but only one implementation that infers the type + // create(names: string | string[]): Promise; // using alternative types + // create({ name: string, names: string[]}) // using object with values + + delete(name: string): Promise; + + list(): Promise; + + has(namespace: string): Promise; +} diff --git a/src/core/kube/pod.ts b/src/core/kube/pod.ts new file mode 100644 index 000000000..7f31ef6a6 --- /dev/null +++ b/src/core/kube/pod.ts @@ -0,0 +1,37 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import {type ExtendedNetServer} from '../../types/index.js'; +import type TDirectoryData from './t_directory_data.js'; +import {type TarCreateFilter} from '../../types/aliases.js'; + +export interface Pod { + copyFrom(containerName: string, srcPath: string, destDir: string): Promise; + + copyTo( + containerName: string, + srcPath: string, + destDir: string, + filter: TarCreateFilter | undefined, + ): Promise; + + execContainer(containerName: string, command: string | string[]): Promise; + hasDir(containerName: string, destPath: string): Promise; + hasFile(containerName: string, destPath: string, filters: object): Promise; + listDir(containerName: string, destPath: string): Promise; + mkdir(containerName: string, destPath: string): Promise; + portForward(localPort: number, podPort: number): Promise; +} diff --git a/src/core/kube/t_directory_data.ts b/src/core/kube/t_directory_data.ts new file mode 100644 index 000000000..27173fa2a --- /dev/null +++ b/src/core/kube/t_directory_data.ts @@ -0,0 +1,24 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +export default interface TDirectoryData { + directory: boolean; + owner: string; + group: string; + size: string; + modifiedAt: string; + name: string; +} diff --git a/src/core/kube/tk8.ts b/src/core/kube/tk8.ts new file mode 100644 index 000000000..121eea083 --- /dev/null +++ b/src/core/kube/tk8.ts @@ -0,0 +1,405 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import type * as k8s from '@kubernetes/client-node'; +import type {PodName, TarCreateFilter} from '../../types/aliases.js'; +import type {ExtendedNetServer, Optional} from '../../types/index.js'; +import type TDirectoryData from './t_directory_data.js'; +import {type V1Lease} from '@kubernetes/client-node'; +import {type Namespace} from '../config/remote/types.js'; +import {type Namespaces} from './namespaces.js'; + +// TODO rename TK8 to K8 and K8 to K8Client +// TODO talk about get and set implies local with no integration, versus read and update implies remote should we use +// - read and update or something besides get/set to avoid confusion +export default interface TK8 { + namespaces(): Namespaces; + + /** + * Create a new namespace + */ + createNamespace(name: string): Promise; + + /** + * Delete a namespace + * @param name - name of the namespace + */ + deleteNamespace(name: string): Promise; + + /** Get a list of namespaces */ + getNamespaces(): Promise; + + /** + * Returns true if a namespace exists with the given name + * @param namespace namespace name + */ + hasNamespace(namespace: string): Promise; + + /** + * Get a podName by name + * @param name - podName name + */ + getPodByName(name: string): Promise; + + /** + * Get pods by labels + * @param labels - list of labels + */ + getPodsByLabel(labels: string[]): Promise; + + /** + * Get secrets by labels + * @param labels - list of labels + */ + getSecretsByLabel(labels: string[]): Promise; + + /** + * Get host IP of a podName + * @param podNameName - name of the podName + * @returns podName IP + */ + getPodIP(podNameName: string): Promise; + + /** + * Get a svc by name + * @param name - svc name + */ + getSvcByName(name: string): Promise; + + /** + * Get a list of clusters + * @returns a list of cluster names + */ + getClusters(): string[]; + + /** + * Get a list of contexts + * @returns a list of context names + */ + getContextNames(): string[]; + + /** + * List files and directories in a container + * + * It runs ls -la on the specified path and returns a list of object containing the entries. + * For example: + * [{ + * directory: false, + * owner: hedera, + * group: hedera, + * size: 121, + * modifiedAt: Jan 15 13:50 + * name: config.txt + * }] + * + * @param podName + * @param containerName + * @param destPath - path inside the container + * @returns a promise that returns array of directory entries, custom object + */ + listDir(podName: PodName, containerName: string, destPath: string): Promise; + + /** + * Check if a filepath exists in the container + * @param podName + * @param containerName + * @param destPath - path inside the container + * @param [filters] - an object with metadata fields and value + */ + hasFile(podName: PodName, containerName: string, destPath: string, filters: object): Promise; + + /** + * Check if a directory path exists in the container + * @param podName + * @param containerName + * @param destPath - path inside the container + */ + hasDir(podName: string, containerName: string, destPath: string): Promise; + + mkdir(podName: PodName, containerName: string, destPath: string): Promise; + + /** + * Copy a file into a container + * + * It overwrites any existing file inside the container at the destination directory + * + * @param podName + * @param containerName + * @param srcPath - source file path in the local + * @param destDir - destination directory in the container + * @param [filter] - the filter to pass to tar to keep or skip files or directories + * @returns a Promise that performs the copy operation + */ + copyTo( + podName: PodName, + containerName: string, + srcPath: string, + destDir: string, + filter: TarCreateFilter | undefined, + ): Promise; + + /** + * Copy a file from a container + * + * It overwrites any existing file at the destination directory + * + * @param podName + * @param containerName + * @param srcPath - source file path in the container + * @param destDir - destination directory in the local + */ + copyFrom(podName: PodName, containerName: string, srcPath: string, destDir: string): Promise; + + /** + * Invoke sh command within a container and return the console output as string + * @param podName + * @param containerName + * @param command - sh commands as an array to be run within the containerName (e.g 'ls -la /opt/hgcapp') + * @returns console output as string + */ + execContainer(podName: string, containerName: string, command: string | string[]): Promise; + + /** + * Port forward a port from a pod to localhost + * + * This simple server just forwards traffic from itself to a service running in kubernetes + * -> localhost:localPort -> port-forward-tunnel -> kubernetes-pod:targetPort + */ + portForward(podName: PodName, localPort: number, podPort: number): Promise; + + /** + * to test the connection to a pod within the network + * @param host - the host of the target connection + * @param port - the port of the target connection + */ + testSocketConnection(host: string, port: number): Promise; + + /** + * Stop the port forwarder server + * + * @param server - an instance of server returned by portForward method + * @param [maxAttempts] - the maximum number of attempts to check if the server is stopped + * @param [timeout] - the delay between checks in milliseconds + */ + stopPortForward(server: ExtendedNetServer, maxAttempts, timeout): Promise; + + waitForPods( + phases, + labels: string[], + podCount, + maxAttempts, + delay, + podItemPredicate?: (items: k8s.V1Pod) => boolean, + namespace?: string, + ): Promise; + + /** + * Check if pod is ready + * @param [labels] - pod labels + * @param [podCount] - number of pod expected + * @param [maxAttempts] - maximum attempts to check + * @param [delay] - delay between checks in milliseconds + * @param [namespace] - namespace + */ + waitForPodReady(labels: string[], podCount, maxAttempts, delay, namespace?: string): Promise; + + /** + * Check pods for conditions + * @param conditionsMap - a map of conditions and values + * @param [labels] - pod labels + * @param [podCount] - number of pod expected + * @param [maxAttempts] - maximum attempts to check + * @param [delay] - delay between checks in milliseconds + */ + waitForPodConditions( + conditionsMap: Map, + labels: string[], + podCount, + maxAttempts, + delay, + namespace?: string, + ): Promise; + + /** + * Get a list of persistent volume claim names for the given namespace + * @param namespace - the namespace of the persistent volume claims to return + * @param [labels] - labels + * @returns list of persistent volume claim names + */ + listPvcsByNamespace(namespace: string, labels: string[]): Promise; + + /** + * Get a list of secrets for the given namespace + * @param namespace - the namespace of the secrets to return + * @param [labels] - labels + * @returns list of secret names + */ + listSecretsByNamespace(namespace: string, labels: string[]): Promise; + + /** + * Delete a persistent volume claim + * @param name - the name of the persistent volume claim to delete + * @param namespace - the namespace of the persistent volume claim to delete + * @returns true if the persistent volume claim was deleted + */ + deletePvc(name: string, namespace: string): Promise; + + testClusterConnection(context: string, cluster: string): Promise; + + /** + * retrieve the secret of the given namespace and label selector, if there is more than one, it returns the first + * @param namespace - the namespace of the secret to search for + * @param labelSelector - the label selector used to fetch the Kubernetes secret + * @returns a custom secret object with the relevant attributes, the values of the data key:value pair + * objects must be base64 decoded + */ + getSecret( + namespace: string, + labelSelector: string, + ): Promise<{ + data: Record; + name: string; + namespace: string; + type: string; + labels: Record; + }>; + + /** + * creates a new Kubernetes secret with the provided attributes + * @param name - the name of the new secret + * @param namespace - the namespace to store the secret + * @param secretType - the secret type + * @param data - the secret, any values of a key:value pair must be base64 encoded + * @param labels - the label to use for future label selector queries + * @param recreate - if we should first run delete in the case that there the secret exists from a previous install + * @returns whether the secret was created successfully + */ + createSecret( + name: string, + namespace: string, + secretType: string, + data: Record, + labels: Optional>, + recreate: boolean, + ): Promise; + + /** + * Delete a secret from the namespace + * @param name - the name of the existing secret + * @param namespace - the namespace to store the secret + * @returns whether the secret was deleted successfully + */ + deleteSecret(name: string, namespace: string): Promise; + + /** + * @param name - name of the configmap + * @returns the configmap if found + * @throws SoloError - if the response if not found or the response is not OK + */ + getNamespacedConfigMap(name: string): Promise; + + /** + * @param name - for the config name + * @param labels - for the config metadata + * @param data - to contain in the config + */ + createNamespacedConfigMap( + name: string, + labels: Record, + data: Record, + ): Promise; + + /** + * @param name - for the config name + * @param labels - for the config metadata + * @param data - to contain in the config + */ + replaceNamespacedConfigMap( + name: string, + labels: Record, + data: Record, + ): Promise; + + deleteNamespacedConfigMap(name: string, namespace: string): Promise; + + createNamespacedLease( + namespace: string, + leaseName: string, + holderName: string, + durationSeconds, + ): Promise; + + readNamespacedLease(leaseName: string, namespace: string, timesCalled): Promise; + + renewNamespaceLease(leaseName: string, namespace: string, lease: k8s.V1Lease): Promise; + + transferNamespaceLease(lease: k8s.V1Lease, newHolderName: string): Promise; + + deleteNamespacedLease(name: string, namespace: string): Promise; + + /** + * Check if cert-manager is installed inside any namespace. + * @returns if cert-manager is found + */ + isCertManagerInstalled(): Promise; + + /** + * Check if minio is installed inside the namespace. + * @returns if minio is found + */ + isMinioInstalled(namespace: Namespace): Promise; + + /** + * Check if the ingress controller is installed inside any namespace. + * @returns if ingress controller is found + */ + isIngressControllerInstalled(): Promise; + + isRemoteConfigPresentInAnyNamespace(): Promise; + + isPrometheusInstalled(namespace: Namespace): Promise; + + /** + * Get a pod by name and namespace, will check every 1 second until the pod is no longer found. + * Can throw a SoloError if there is an error while deleting the pod. + * @param podName - the name of the pod + * @param namespace - the namespace of the pod + */ + killPod(podName: string, namespace: string): Promise; + + /** + * Download logs files from all network pods and save to local solo log directory + * @param namespace - the namespace of the network + * @returns a promise that resolves when the logs are downloaded + */ + getNodeLogs(namespace: string): Promise[]>; + + /** + * Download state files from a pod + * @param namespace - the namespace of the network + * @param nodeAlias - the pod name + * @returns a promise that resolves when the state files are downloaded + */ + getNodeStatesFromPod(namespace: string, nodeAlias: string): Promise[]>; + + setCurrentContext(context: string): void; + + getCurrentContext(): string; + + getCurrentContextNamespace(): Namespace; + + getCurrentClusterName(): string; +} diff --git a/src/index.ts b/src/index.ts index 11e831515..4d3fc0295 100644 --- a/src/index.ts +++ b/src/index.ts @@ -47,6 +47,7 @@ import {CustomProcessOutput} from './core/process_output.js'; import {type Opts} from './types/command_types.js'; import {SoloLogger} from './core/logging.js'; import {Container} from './core/container_init.js'; +import type {Namespace} from './core/config/remote/types.js'; export function main(argv: any) { Container.getInstance().init(); @@ -79,7 +80,7 @@ export function main(argv: any) { // set cluster and namespace in the global configManager from kubernetes context // so that we don't need to prompt the user - const context = k8.getCurrentContextObject(); + const contextNamespace = k8.getCurrentContextNamespace(); const currentClusterName = k8.getCurrentClusterName(); const opts: Opts = { @@ -107,8 +108,8 @@ export function main(argv: any) { const clusterName = configManager.getFlag(flags.clusterName) || currentClusterName; - if (context.namespace) { - configManager.setFlag(flags.namespace, context.namespace); + if (contextNamespace) { + configManager.setFlag(flags.namespace, contextNamespace); } // apply precedence for flags diff --git a/test/e2e/e2e_node_util.ts b/test/e2e/e2e_node_util.ts index fd6585eae..e8b4f76c9 100644 --- a/test/e2e/e2e_node_util.ts +++ b/test/e2e/e2e_node_util.ts @@ -199,7 +199,7 @@ export function e2eNodeKeyRefreshTest(testName: string, mode: string, releaseTag if (podArray.length > 0) { const podName = podArray[0].metadata.name; - k8.logger.info(`nodeRefreshTestSetup: podName: ${podName}`); + nodeCmd.logger.info(`nodeRefreshTestSetup: podName: ${podName}`); return podName; } throw new Error(`pod for ${nodeAliases} not found`); diff --git a/test/e2e/integration/core/k8_e2e.test.ts b/test/e2e/integration/core/k8_e2e.test.ts index 95f513f2d..5730028f3 100644 --- a/test/e2e/integration/core/k8_e2e.test.ts +++ b/test/e2e/integration/core/k8_e2e.test.ts @@ -146,11 +146,6 @@ describe('K8', () => { expect(contexts).not.to.have.lengthOf(0); }).timeout(defaultTimeout); - it('should be able to list contexts', () => { - const contexts = k8.getContexts(); - expect(contexts).not.to.have.lengthOf(0); - }).timeout(defaultTimeout); - it('should be able to create and delete a namespaces', async () => { const name = uuid4(); expect(await k8.createNamespace(name)).to.be.true; @@ -189,11 +184,6 @@ describe('K8', () => { await expect(k8.getPodIP('INVALID')).to.be.rejectedWith(SoloError); }).timeout(defaultTimeout); - it('should be able to detect cluster IP', async () => { - await expect(k8.getClusterIP(serviceName)).to.eventually.not.be.null; - await expect(k8.getClusterIP('INVALID')).to.be.rejectedWith(SoloError); - }).timeout(defaultTimeout); - it('should be able to check if a path is directory inside a container', async () => { const pods = await k8.getPodsByLabel([`app=${podLabelValue}`]); const podName = pods[0].metadata.name; diff --git a/test/unit/commands/cluster.test.ts b/test/unit/commands/cluster.test.ts index 48b18770a..a277b6de3 100644 --- a/test/unit/commands/cluster.test.ts +++ b/test/unit/commands/cluster.test.ts @@ -37,7 +37,6 @@ import {ROOT_DIR} from '../../../src/core/constants.js'; import path from 'path'; import {container} from 'tsyringe-neo'; import {resetTestContainer} from '../../test_container.js'; -import * as test from 'node:test'; import {ClusterCommandTasks} from '../../../src/commands/cluster/tasks.js'; import type {BaseCommand} from '../../../src/commands/base.js'; import {LocalConfig} from '../../../src/core/config/local_config.js'; @@ -58,8 +57,6 @@ import type {ListrTaskWrapper} from 'listr2'; import fs from 'fs'; import {stringify} from 'yaml'; import {ErrorMessages} from '../../../src/core/error_messages.js'; -import {SoloError} from '../../../src/core/errors.js'; -import {RemoteConfigDataWrapper} from '../../../src/core/config/remote/remote_config_data_wrapper.js'; const getBaseCommandOpts = () => ({ logger: sinon.stub(), @@ -167,11 +164,7 @@ describe('ClusterCommand unit tests', () => { ) => { const loggerStub = sandbox.createStubInstance(SoloLogger); k8Stub = sandbox.createStubInstance(K8); - k8Stub.getContexts.returns([ - {cluster: 'cluster-1', user: 'user-1', name: 'context-1', namespace: 'deployment-1'}, - {cluster: 'cluster-2', user: 'user-2', name: 'context-2', namespace: 'deployment-2'}, - {cluster: 'cluster-3', user: 'user-3', name: 'context-3', namespace: 'deployment-3'}, - ]); + k8Stub.getContextNames.returns(['context-1', 'context-2', 'context-3']); k8Stub.isMinioInstalled.returns(new Promise(() => true)); k8Stub.isPrometheusInstalled.returns(new Promise(() => true)); k8Stub.isCertManagerInstalled.returns(new Promise(() => true)); @@ -202,7 +195,6 @@ describe('ClusterCommand unit tests', () => { remoteConfigManagerStub.get.resolves(remoteConfig); k8Stub.getCurrentClusterName.returns(kubeConfigClusterObject.name); - k8Stub.getCurrentCluster.returns(kubeConfigClusterObject); k8Stub.getCurrentContext.returns('context-from-kubeConfig'); const configManager = sandbox.createStubInstance(ConfigManager);