diff --git a/.github/workflows/flow-deploy-release-artifact.yaml b/.github/workflows/flow-deploy-release-artifact.yaml index 2a4cc13ec..0b9def156 100644 --- a/.github/workflows/flow-deploy-release-artifact.yaml +++ b/.github/workflows/flow-deploy-release-artifact.yaml @@ -153,7 +153,7 @@ jobs: npm run build - name: Setup JFrog CLI - uses: jfrog/setup-jfrog-cli@f0a84f35b0e0bd21838c5fb3e6788072d6540d13 # v4.5.5 + uses: jfrog/setup-jfrog-cli@f748a0599171a192a2668afee8d0497f7c1069df # v4.5.6 env: JF_URL: ${{ vars.JF_URL }} JF_ACCESS_TOKEN: ${{ secrets.JF_ACCESS_TOKEN }} diff --git a/.github/workflows/flow-gcs-test.yaml b/.github/workflows/flow-gcs-test.yaml new file mode 100644 index 000000000..ba339c3a6 --- /dev/null +++ b/.github/workflows/flow-gcs-test.yaml @@ -0,0 +1,113 @@ +## +# Copyright (C) 2023-2024 Hedera Hashgraph, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +name: "Test GCS as bucket storage" + +on: + workflow_dispatch: + workflow_call: + +defaults: + run: + shell: bash + +permissions: + id-token: write + contents: read + actions: read + +jobs: + gcs-storage-test: + timeout-minutes: 20 + runs-on: solo-linux-large + strategy: + matrix: + storageType: ["gcs_only", "gcs_and_minio"] + steps: + - name: Harden Runner + uses: step-security/harden-runner@0080882f6c36860b6ba35c610c98ce87d4e2f26f # v2.10.2 + with: + egress-policy: audit + + - name: Checkout Code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + + - name: Authenticate to Google Cloud + id: google-auth + uses: google-github-actions/auth@6fc4af4b145ae7821d527454aa9bd537d1f2dc5f # v2.1.7 + with: + workload_identity_provider: "projects/652966097426/locations/global/workloadIdentityPools/solo-bucket-dev-pool/providers/gh-provider" + service_account: "solo-bucket-reader-writer@solo-bucket-dev.iam.gserviceaccount.com" + + - name: Setup Google Cloud SDK + uses: google-github-actions/setup-gcloud@6189d56e4096ee891640bb02ac264be376592d6a # v2.1.2 + + - name: Get Current Job Log URL + uses: Tiryoh/gha-jobid-action@v1 + id: jobs + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + job_name: "gcs-storage-test (${{ matrix.storageType }})" + + - name: Create GCS bucket + # create a new bucket and use job runner id as prefix + run: | + export BUCKET_NAME=${{ steps.jobs.outputs.job_id }}-solo-streams + gcloud storage buckets create gs://${BUCKET_NAME} --project=${{ vars.GCP_S3_PROJECT_ID }} + echo "BUCKET_NAME=${BUCKET_NAME}" >> $GITHUB_ENV + + - name: Setup Node + uses: actions/setup-node@39370e3970a6d050c480ffad4ff0ed4d3fdee5af # v4.1.0 + with: + node-version: 20 + cache: npm + + - name: Setup Helm + uses: azure/setup-helm@fe7b79cd5ee1e45176fcad797de68ecaf3ca4814 # v4.2.0 + with: + version: "v3.12.3" # helm version + + - name: Setup Kind + uses: helm/kind-action@a1b0e391336a6ee6713a0583f8c6240d70863de3 # v1.12.0 + with: + install_only: true + node_image: kindest/node:v1.27.3@sha256:3966ac761ae0136263ffdb6cfd4db23ef8a83cba8a463690e98317add2c9ba72 + version: v0.21.0 + kubectl_version: v1.28.6 + verbosity: 3 + wait: 120s + + - name: Install Dependencies + id: npm-deps + run: | + npm ci + npm install -g @hashgraph/solo + + - name: Compile Project + run: npm run build + + - name: Run GCS Test Script for type ${{ matrix.channel }} + env: + GCS_ACCESS_KEY: ${{ secrets.GCP_S3_ACCESS_KEY }} + GCS_SECRET_KEY: ${{ secrets.GCP_S3_SECRET_KEY }} + BUCKET_NAME: ${{ env.BUCKET_NAME }} + STORAGE_TYPE: ${{ matrix.storageType }} + run: | + .github/workflows/script/gcs_test.sh + + - name: Delete Bucket after Test + run: | + gcloud storage rm --recursive gs://${BUCKET_NAME} --project=${{ vars.GCP_S3_PROJECT_ID }} diff --git a/.github/workflows/flow-task-test.yaml b/.github/workflows/flow-task-test.yaml index 6aa191d33..8d4792dca 100644 --- a/.github/workflows/flow-task-test.yaml +++ b/.github/workflows/flow-task-test.yaml @@ -63,13 +63,6 @@ jobs: verbosity: 3 wait: 120s - - name: Install Dependencies - id: npm-deps - run: npm ci - - - name: Compile Project - run: npm run build - - name: Run Example Task File Test run: | task default-with-relay diff --git a/.github/workflows/script/gcs_test.sh b/.github/workflows/script/gcs_test.sh new file mode 100755 index 000000000..1a773134c --- /dev/null +++ b/.github/workflows/script/gcs_test.sh @@ -0,0 +1,60 @@ +#!/bin/bash +set -eo pipefail + +source .github/workflows/script/helper.sh + +if [ -z "${GCS_ACCESS_KEY}" ]; then + echo "GCS_ACCESS_KEY is not set. Exiting..." + exit 1 +fi + +if [ -z "${GCS_SECRET_KEY}" ]; then + echo "GCS_SECRET_KEY is not set. Exiting..." + exit 1 +fi + +if [ -z "${BUCKET_NAME}" ]; then + streamBucket="solo-ci-test-streams" +else + streamBucket=${BUCKET_NAME} +fi + +if [ -z "${STORAGE_TYPE}" ]; then + storageType="gcs_and_minio" +else + storageType=${STORAGE_TYPE} +fi + +echo "Using bucket name: ${streamBucket}" +echo "Test storage type: ${storageType}" + +SOLO_CLUSTER_NAME=solo-e2e +SOLO_NAMESPACE=solo-e2e +SOLO_CLUSTER_SETUP_NAMESPACE=solo-setup + +kind delete cluster -n "${SOLO_CLUSTER_NAME}" +kind create cluster -n "${SOLO_CLUSTER_NAME}" +npm run solo-test -- init +npm run solo-test -- cluster setup \ + -s "${SOLO_CLUSTER_SETUP_NAMESPACE}" +npm run solo-test -- node keys --gossip-keys --tls-keys -i node1 +npm run solo-test -- network deploy -i node1 -n "${SOLO_NAMESPACE}" \ + --storage-endpoint "https://storage.googleapis.com" \ + --storage-access-key "${GCS_ACCESS_KEY}" --storage-secrets "${GCS_SECRET_KEY}" \ + --storage-type "${storageType}" --storage-bucket "${streamBucket}" + +npm run solo-test -- node setup -i node1 -n "${SOLO_NAMESPACE}" +npm run solo-test -- node start -i node1 -n "${SOLO_NAMESPACE}" +npm run solo-test -- mirror-node deploy --namespace "${SOLO_NAMESPACE}" \ + --storage-endpoint "https://storage.googleapis.com" \ + --storage-access-key "${GCS_ACCESS_KEY}" --storage-secrets "${GCS_SECRET_KEY}" \ + --storage-type "${storageType}" --storage-bucket "${streamBucket}" + +kubectl port-forward -n "${SOLO_NAMESPACE}" svc/haproxy-node1-svc 50211:50211 > /dev/null 2>&1 & +kubectl port-forward -n "${SOLO_NAMESPACE}" svc/hedera-explorer 8080:80 > /dev/null 2>&1 & + +cd ..; create_test_account ; cd - + +node examples/create-topic.js + +npm run solo-test -- node stop -i node1 -n "${SOLO_NAMESPACE}" diff --git a/.github/workflows/script/helper.sh b/.github/workflows/script/helper.sh new file mode 100644 index 000000000..ffed2745d --- /dev/null +++ b/.github/workflows/script/helper.sh @@ -0,0 +1,37 @@ +#!/bin/bash +set -eo pipefail + +function create_test_account () +{ + echo "Create test account with solo network" + cd solo + + # create new account and extract account id + npm run solo-test -- account create -n solo-e2e --hbar-amount 100 --generate-ecdsa-key --set-alias > test.log + export OPERATOR_ID=$(grep "accountId" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') + echo "OPERATOR_ID=${OPERATOR_ID}" + rm test.log + + # get private key of the account + npm run solo-test -- account get -n solo-e2e --account-id ${OPERATOR_ID} --private-key > test.log + export OPERATOR_KEY=$(grep "privateKey" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') + export CONTRACT_TEST_KEY_ONE=0x$(grep "privateKeyRaw" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') + echo "CONTRACT_TEST_KEY_ONE=${CONTRACT_TEST_KEY_ONE}" + rm test.log + + npm run solo-test -- account create -n solo-e2e --hbar-amount 100 --generate-ecdsa-key --set-alias > test.log + export SECOND_KEY=$(grep "accountId" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') + npm run solo-test -- account get -n solo-e2e --account-id ${SECOND_KEY} --private-key > test.log + export CONTRACT_TEST_KEY_TWO=0x$(grep "privateKeyRaw" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') + echo "CONTRACT_TEST_KEY_TWO=${CONTRACT_TEST_KEY_TWO}" + rm test.log + + export CONTRACT_TEST_KEYS=${CONTRACT_TEST_KEY_ONE},$'\n'${CONTRACT_TEST_KEY_TWO} + export HEDERA_NETWORK="local-node" + + echo "OPERATOR_KEY=${OPERATOR_KEY}" + echo "HEDERA_NETWORK=${HEDERA_NETWORK}" + echo "CONTRACT_TEST_KEYS=${CONTRACT_TEST_KEYS}" + + cd - +} diff --git a/.github/workflows/script/solo_smoke_test.sh b/.github/workflows/script/solo_smoke_test.sh index b7b0f17fc..199d714a4 100755 --- a/.github/workflows/script/solo_smoke_test.sh +++ b/.github/workflows/script/solo_smoke_test.sh @@ -9,7 +9,7 @@ set -eo pipefail # Then run smart contract test, and also javascript sdk sample test to interact with solo network # -function_name="" +source .github/workflows/script/helper.sh function enable_port_forward () { @@ -73,41 +73,6 @@ function start_contract_test () return $result } -function create_test_account () -{ - echo "Create test account with solo network" - cd solo - - # create new account and extract account id - npm run solo-test -- account create -n solo-e2e --hbar-amount 100 --generate-ecdsa-key --set-alias > test.log - export OPERATOR_ID=$(grep "accountId" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') - echo "OPERATOR_ID=${OPERATOR_ID}" - rm test.log - - # get private key of the account - npm run solo-test -- account get -n solo-e2e --account-id ${OPERATOR_ID} --private-key > test.log - export OPERATOR_KEY=$(grep "privateKey" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') - export CONTRACT_TEST_KEY_ONE=0x$(grep "privateKeyRaw" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') - echo "CONTRACT_TEST_KEY_ONE=${CONTRACT_TEST_KEY_ONE}" - rm test.log - - npm run solo-test -- account create -n solo-e2e --hbar-amount 100 --generate-ecdsa-key --set-alias > test.log - export SECOND_KEY=$(grep "accountId" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') - npm run solo-test -- account get -n solo-e2e --account-id ${SECOND_KEY} --private-key > test.log - export CONTRACT_TEST_KEY_TWO=0x$(grep "privateKeyRaw" test.log | awk '{print $2}' | sed 's/"//g'| sed 's/,//g') - echo "CONTRACT_TEST_KEY_TWO=${CONTRACT_TEST_KEY_TWO}" - rm test.log - - export CONTRACT_TEST_KEYS=${CONTRACT_TEST_KEY_ONE},$'\n'${CONTRACT_TEST_KEY_TWO} - export HEDERA_NETWORK="local-node" - - echo "OPERATOR_KEY=${OPERATOR_KEY}" - echo "HEDERA_NETWORK=${HEDERA_NETWORK}" - echo "CONTRACT_TEST_KEYS=${CONTRACT_TEST_KEYS}" - - cd - -} - function start_sdk_test () { cd solo diff --git a/.github/workflows/zxc-update-readme.yaml b/.github/workflows/zxc-update-readme.yaml index 49dac6020..903e4a1ef 100644 --- a/.github/workflows/zxc-update-readme.yaml +++ b/.github/workflows/zxc-update-readme.yaml @@ -146,7 +146,7 @@ jobs: if : ${{ github.event.inputs.dry-run-enabled != 'true' && !cancelled() && !failure() && inputs.commit-changes }} uses: stefanzweifel/git-auto-commit-action@8621497c8c39c72f3e2a999a26b4ca1b5058a842 # v5.0.1 with: - commit_message: "auto update docs/content/User/StepByStepGuide.md [skip ci]" + commit_message: "chore: auto update docs/content/User/StepByStepGuide.md" commit_options: '--no-verify --signoff' add_options: '-u' file_pattern: 'docs/content/User/StepByStepGuide.md' diff --git a/.prettierrc.json b/.prettierrc.json index 2399e7ec7..677231d93 100644 --- a/.prettierrc.json +++ b/.prettierrc.json @@ -2,5 +2,6 @@ "bracketSpacing": false, "singleQuote": true, "trailingComma": "all", - "arrowParens": "avoid" + "arrowParens": "avoid", + "printWidth": 120 } diff --git a/README.md b/README.md index f52604dd4..ee4bb850b 100644 --- a/README.md +++ b/README.md @@ -29,11 +29,22 @@ To run a three-node network, you will need to set up Docker Desktop with at leas ## Setup -* Install [Node](https://nodejs.org/en/download). You may also use [nvm](https://github.com/nvm-sh/nvm) to manage different Node versions locally: +* Install [Node](https://nodejs.org/en/download). You may also use [nvm](https://github.com/nvm-sh/nvm) to manage different Node versions locally, some examples: ``` -nvm install lts/hydrogen -nvm use lts/hydrogen +# install specific nodejs version +# nvm install + +# install nodejs version 20.18.0 +nvm install v20.18.0 + +# lists available node versions already installed +nvm ls + +# swith to selected node version +# nvm use +nvm use v20.18.0 + ``` * Useful tools: @@ -63,4 +74,4 @@ expected to uphold this code of conduct. ## License -[Apache License 2.0](LICENSE) +[Apache License 2.0](https://www.apache.org/licenses/LICENSE-2.0) diff --git a/Taskfile.helper.yml b/Taskfile.helper.yml index ccc17a41d..b358f6f84 100644 --- a/Taskfile.helper.yml +++ b/Taskfile.helper.yml @@ -14,15 +14,13 @@ vars: solo_keys_dir: "{{ .solo_cache_dir }}/keys" solo_bin_dir: "{{ .solo_user_dir }}/bin" run_build_file: - sh: (echo "/tmp/run-build-$(date +%Y%m%d%H%M%S)") + sh: (echo "/tmp/${USER}-run-build-$(date +%Y%m%d%H%M%S)") var_check_file: - sh: (echo "/tmp/var-check-$(date +%Y%m%d%H%M%S)") + sh: (echo "/tmp/${USER}-var-check-$(date +%Y%m%d%H%M%S)") minio_flag_file: - sh: (echo "/tmp/minio-flag-$(date +%Y%m%d%H%M%S)") - solo_chart_file: - sh: (echo "/tmp/solo-chart-$(date +%Y%m%d%H%M%S)") - solo_consensus_file: - sh: (echo "/tmp/solo-consensus-$(date +%Y%m%d%H%M%S)") + sh: (echo "/tmp/${USER}-minio-flag-$(date +%Y%m%d%H%M%S)") + solo_install_file: + sh: (echo "/tmp/${USER}-solo-install-$(date +%Y%m%d%H%M%S)") env: SOLO_CLUSTER_SETUP_NAMESPACE: solo-setup @@ -33,6 +31,7 @@ env: tasks: init: cmds: + - task: "install:solo" - task: "var:check" - task: "run:build" @@ -74,6 +73,8 @@ tasks: - echo "LOG4J2_FLAG=${LOG4J2_FLAG}" - echo "APPLICATION_PROPERTIES_FLAG=${APPLICATION_PROPERTIES_FLAG}" - echo "LOCAL_BUILD_FLAG=${LOCAL_BUILD_FLAG}" + - echo "DEBUG_NODE_ALIAS=${DEBUG_NODE_ALIAS}" + - echo "SOLO_CHARTS_DIR_FLAG=${SOLO_CHARTS_DIR_FLAG}" - touch {{ .var_check_file }} readme: @@ -91,10 +92,18 @@ tasks: - echo "Use command 'task default-with-relay' to deploy the network with a relay node." install:solo: + silent: true internal: true + status: + - test -f {{ .solo_install_file }} cmds: - - cd .. + - | + if [[ "$(ls -1 package.json)" == "" ]]; then + cd .. + fi + pwd - npm install + - touch {{ .solo_install_file }} install:kubectl:darwin: internal: true @@ -156,14 +165,16 @@ tasks: - task: "init" cmds: - | - unset RELEASE_FLAG - if [[ "${LOCAL_BUILD_FLAG}" == "" ]]; then - export RELEASE_FLAG='--release-tag {{.CONSENSUS_NODE_VERSION}}' + if [[ "${DEBUG_NODE_ALIAS}" != "" ]]; then + export DEBUG_NODE_FLAG="--debug-node-alias {{ .DEBUG_NODE_ALIAS }}" + fi + if [[ "${CONSENSUS_NODE_VERSION}" != "" ]]; then + export CONSENSUS_NODE_FLAG='--release-tag {{.CONSENSUS_NODE_VERSION}}' fi if [[ "${SOLO_CHART_VERSION}" != "" ]]; then export SOLO_CHART_FLAG='--solo-chart-version ${SOLO_CHART_VERSION}' fi - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${RELEASE_FLAG} ${SOLO_CHART_FLAG} ${VALUES_FLAG} ${SETTINGS_FLAG} ${LOG4J2_FLAG} ${APPLICATION_PROPERTIES_FLAG} ${GENESIS_THROTTLES_FLAG} -q + SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${CONSENSUS_NODE_FLAG} ${SOLO_CHART_FLAG} ${VALUES_FLAG} ${SETTINGS_FLAG} ${LOG4J2_FLAG} ${APPLICATION_PROPERTIES_FLAG} ${GENESIS_THROTTLES_FLAG} ${DEBUG_NODE_FLAG} ${SOLO_CHARTS_DIR_FLAG} -q - | if [[ "${CONSENSUS_NODE_VERSION}" != "" ]]; then export CONSENSUS_NODE_FLAG='--release-tag ${CONSENSUS_NODE_VERSION}' @@ -182,7 +193,11 @@ tasks: deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node start --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} -q {{ .CLI_ARGS }} + - | + if [[ "${DEBUG_NODE_ALIAS}" != "" ]]; then + export DEBUG_NODE_FLAG="--debug-node-alias {{ .DEBUG_NODE_ALIAS }}" + fi + SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- node start --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} ${DEBUG_NODE_FLAG} -q {{ .CLI_ARGS }} - | if [[ "{{ .use_port_forwards }}" == "true" ]];then echo "Enable port forwarding for Hedera Node" @@ -258,7 +273,7 @@ tasks: deps: - task: "init" cmds: - - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" -q + - SOLO_HOME_DIR=${SOLO_HOME_DIR} npm run solo -- cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" ${SOLO_CHARTS_DIR_FLAG} -q cluster:destroy: cmds: @@ -408,8 +423,7 @@ tasks: silent: true cmds: - echo "Cleaning up temporary files..." - - rm -f /tmp/run-build-* - - rm -f /tmp/var-check-* - - rm -f /tmp/minio-flag-* - - rm -f /tmp/solo-chart-* - - rm -f /tmp/solo-consensus-* + - rm -f /tmp/${USER}-run-build-* || true + - rm -f /tmp/${USER}-var-check-* || true + - rm -f /tmp/${USER}-minio-flag-* || true + - rm -f /tmp/${USER}-solo-install-* || true diff --git a/Taskfile.yml b/Taskfile.yml index 1f7261e03..f6e15388f 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -8,6 +8,10 @@ env: SOLO_NAMESPACE: solo-e2e # SOLO_CHART_VERSION: 0.39.0 # CONSENSUS_NODE_VERSION: v0.58.0 + HEDERA_SERVICES_ROOT: "/Users/user/source/hedera-services" + # LOCAL_BUILD_FLAG: "--local-build-path {{.HEDERA_SERVICES_ROOT}}/hedera-node/data" + # DEBUG_NODE_ALIAS: "node2" + # SOLO_CHARTS_DIR_FLAG: "-d /Users/user/source/solo-charts/charts" vars: use_port_forwards: "true" @@ -21,7 +25,6 @@ tasks: - echo "This command is meant to deploy a Solo network to a Kind cluster on your local machine, " - echo "ctrl-c if this is not what you want to do." - sleep 5 - - task: "install:solo" - task: "install" - task: "start" diff --git a/docs/content/User/SDK.md b/docs/content/User/SDK.md index 3156e1974..cbb5ef1f8 100644 --- a/docs/content/User/SDK.md +++ b/docs/content/User/SDK.md @@ -1,8 +1,8 @@ # Using Solo with Hedera JavaScript SDK First, please follow solo repository README to install solo and Docker Desktop. -You also need to install the Taskfile tool following the instructions here: -https://taskfile.dev/installation/ +You also need to install the Taskfile tool following the instructions [here](https://taskfile.dev/installation/). + Then we start with launching a local Solo network with the following commands: diff --git a/docs/layouts/_default/_markup/render-link.html b/docs/layouts/_default/_markup/render-link.html index 22e9e912d..aebd36bc9 100644 --- a/docs/layouts/_default/_markup/render-link.html +++ b/docs/layouts/_default/_markup/render-link.html @@ -1,2 +1,6 @@ +{{ $destination := .Destination }} +{{ if strings.HasPrefix $destination "http" }} +{{ .Text | safeHTML }} +{{ else }} {{ .Text | safeHTML }} - +{{ end }} diff --git a/examples/Taskfile.examples.yml b/examples/Taskfile.examples.yml index 38ac0d4db..f8db6a49b 100644 --- a/examples/Taskfile.examples.yml +++ b/examples/Taskfile.examples.yml @@ -11,7 +11,6 @@ tasks: cmds: - task: "install:kubectl:darwin" - task: "install:kubectl:linux" - - task: "install:solo" - task: "install" - task: "start" diff --git a/examples/create-topic.js b/examples/create-topic.js index f44a957a3..605d3759d 100644 --- a/examples/create-topic.js +++ b/examples/create-topic.js @@ -14,9 +14,18 @@ * limitations under the License. * */ -import {Wallet, LocalProvider, TopicCreateTransaction, TopicMessageSubmitTransaction} from '@hashgraph/sdk'; +import { + Wallet, + LocalProvider, + TopicCreateTransaction, + TopicMessageSubmitTransaction, + AccountCreateTransaction, + PrivateKey, + Hbar, +} from '@hashgraph/sdk'; import dotenv from 'dotenv'; +import http from 'http'; dotenv.config(); @@ -30,12 +39,11 @@ async function main() { const wallet = new Wallet(process.env.OPERATOR_ID, process.env.OPERATOR_KEY, provider); + const TEST_MESSAGE = 'Hello World'; try { - console.log('before create topic'); // create topic let transaction = await new TopicCreateTransaction().freezeWithSigner(wallet); transaction = await transaction.signWithSigner(wallet); - console.log('after sign transaction'); const createResponse = await transaction.executeWithSigner(wallet); const createReceipt = await createResponse.getReceiptWithSigner(wallet); @@ -44,7 +52,7 @@ async function main() { // send one message let topicMessageSubmitTransaction = await new TopicMessageSubmitTransaction({ topicId: createReceipt.topicId, - message: 'Hello World', + message: TEST_MESSAGE, }).freezeWithSigner(wallet); topicMessageSubmitTransaction = await topicMessageSubmitTransaction.signWithSigner(wallet); const sendResponse = await topicMessageSubmitTransaction.executeWithSigner(wallet); @@ -52,10 +60,65 @@ async function main() { const sendReceipt = await sendResponse.getReceiptWithSigner(wallet); console.log(`topic sequence number = ${sendReceipt.topicSequenceNumber.toString()}`); + + await new Promise(resolve => setTimeout(resolve, 1000)); + + // send a create account transaction to push record stream files to mirror node + const newKey = PrivateKey.generate(); + let accountCreateTransaction = await new AccountCreateTransaction() + .setInitialBalance(new Hbar(10)) + .setKey(newKey.publicKey) + .freezeWithSigner(wallet); + accountCreateTransaction = await accountCreateTransaction.signWithSigner(wallet); + const accountCreationResponse = await accountCreateTransaction.executeWithSigner(wallet); + const accountCreationReceipt = await accountCreationResponse.getReceiptWithSigner(wallet); + console.log(`account id = ${accountCreationReceipt.accountId.toString()}`); + + await new Promise(resolve => setTimeout(resolve, 1000)); + + // Check submit message result should success + const queryURL = `http://localhost:8080/api/v1/topics/${createReceipt.topicId}/messages`; + let received = false; + let receivedMessage = ''; + + // wait until the transaction reached consensus and retrievable from the mirror node API + let retry = 0; + while (!received && retry < 10) { + const req = http.request(queryURL, {method: 'GET', timeout: 100, headers: {Connection: 'close'}}, res => { + res.setEncoding('utf8'); + res.on('data', chunk => { + // convert chunk to json object + const obj = JSON.parse(chunk); + if (obj.messages.length === 0) { + console.log('No messages yet'); + } else { + // convert message from base64 to utf-8 + const base64 = obj.messages[0].message; + const buff = Buffer.from(base64, 'base64'); + receivedMessage = buff.toString('utf-8'); + console.log(`Received message: ${receivedMessage}`); + received = true; + } + }); + }); + req.on('error', e => { + console.log(`problem with request: ${e.message}`); + }); + req.end(); // make the request + // wait and try again + await new Promise(resolve => setTimeout(resolve, 1000)); + retry++; + } + if (receivedMessage === TEST_MESSAGE) { + console.log('Message received successfully'); + } else { + console.error('Message received but not match: ' + receivedMessage); + // eslint-disable-next-line n/no-process-exit + process.exit(1); + } } catch (error) { console.error(error); } - provider.close(); } diff --git a/examples/custom-network-config/init-containers-values.yaml b/examples/custom-network-config/init-containers-values.yaml index 657aa388c..a5b553376 100644 --- a/examples/custom-network-config/init-containers-values.yaml +++ b/examples/custom-network-config/init-containers-values.yaml @@ -9,6 +9,7 @@ hedera: mountPath: /data-saved nodes: - name: node1 + nodeId: 0 accountId: 0.0.3 root: resources: @@ -19,6 +20,7 @@ hedera: cpu: 24 memory: 256Gi - name: node2 + nodeId: 1 accountId: 0.0.4 root: resources: @@ -29,6 +31,7 @@ hedera: cpu: 24 memory: 256Gi - name: node3 + nodeId: 2 accountId: 0.0.5 root: resources: @@ -39,6 +42,7 @@ hedera: cpu: 24 memory: 256Gi - name: node4 + nodeId: 3 accountId: 0.0.6 root: resources: @@ -49,6 +53,7 @@ hedera: cpu: 24 memory: 256Gi - name: node5 + nodeId: 4 accountId: 0.0.7 root: resources: @@ -59,6 +64,7 @@ hedera: cpu: 24 memory: 256Gi - name: node6 + nodeId: 5 accountId: 0.0.8 root: resources: @@ -69,6 +75,7 @@ hedera: cpu: 24 memory: 256Gi - name: node7 + nodeId: 6 accountId: 0.0.9 root: resources: @@ -79,6 +86,7 @@ hedera: cpu: 24 memory: 256Gi - name: node8 + nodeId: 7 accountId: 0.0.10 root: resources: @@ -89,6 +97,7 @@ hedera: cpu: 24 memory: 256Gi - name: node9 + nodeId: 8 accountId: 0.0.11 root: resources: @@ -99,6 +108,7 @@ hedera: cpu: 24 memory: 256Gi - name: node10 + nodeId: 9 accountId: 0.0.12 root: resources: @@ -109,8 +119,6 @@ hedera: cpu: 24 memory: 256Gi defaults: - envoyProxy: - loadBalancerEnabled: true sidecars: recordStreamUploader: resources: diff --git a/examples/performance-tuning/latitude/init-containers-values.yaml b/examples/performance-tuning/latitude/init-containers-values.yaml index 55713bd70..12888072d 100644 --- a/examples/performance-tuning/latitude/init-containers-values.yaml +++ b/examples/performance-tuning/latitude/init-containers-values.yaml @@ -9,6 +9,7 @@ hedera: mountPath: /data-saved nodes: - name: node1 + nodeId: 0 accountId: 0.0.3 root: resources: @@ -19,6 +20,7 @@ hedera: cpu: 24 memory: 256Gi - name: node2 + nodeId: 1 accountId: 0.0.4 root: resources: @@ -29,6 +31,7 @@ hedera: cpu: 24 memory: 256Gi - name: node3 + nodeId: 2 accountId: 0.0.5 root: resources: @@ -39,6 +42,7 @@ hedera: cpu: 24 memory: 256Gi - name: node4 + nodeId: 3 accountId: 0.0.6 root: resources: @@ -49,6 +53,7 @@ hedera: cpu: 24 memory: 256Gi - name: node5 + nodeId: 4 accountId: 0.0.7 root: resources: @@ -59,6 +64,7 @@ hedera: cpu: 24 memory: 256Gi - name: node6 + nodeId: 5 accountId: 0.0.8 root: resources: @@ -69,6 +75,7 @@ hedera: cpu: 24 memory: 256Gi - name: node7 + nodeId: 6 accountId: 0.0.9 root: resources: @@ -79,6 +86,7 @@ hedera: cpu: 24 memory: 256Gi - name: node8 + nodeId: 7 accountId: 0.0.10 root: resources: @@ -89,6 +97,7 @@ hedera: cpu: 24 memory: 256Gi - name: node9 + nodeId: 8 accountId: 0.0.11 root: resources: @@ -99,6 +108,7 @@ hedera: cpu: 24 memory: 256Gi - name: node10 + nodeId: 9 accountId: 0.0.12 root: resources: @@ -111,8 +121,6 @@ hedera: defaults: haproxy: serviceType: NodePort - envoyProxy: - loadBalancerEnabled: true sidecars: recordStreamUploader: resources: diff --git a/examples/performance-tuning/solo-perf-test/init-containers-values.yaml b/examples/performance-tuning/solo-perf-test/init-containers-values.yaml index 1ed08a4ca..98846be9e 100644 --- a/examples/performance-tuning/solo-perf-test/init-containers-values.yaml +++ b/examples/performance-tuning/solo-perf-test/init-containers-values.yaml @@ -9,6 +9,7 @@ hedera: mountPath: /data-saved nodes: - name: node0 + nodeId: 0 accountId: 0.0.3 root: resources: @@ -19,6 +20,7 @@ hedera: cpu: 4 memory: 31Gi - name: node1 + nodeId: 1 accountId: 0.0.4 root: resources: @@ -29,6 +31,7 @@ hedera: cpu: 4 memory: 31Gi - name: node2 + nodeId: 2 accountId: 0.0.5 root: resources: @@ -39,6 +42,7 @@ hedera: cpu: 4 memory: 31Gi - name: node3 + nodeId: 3 accountId: 0.0.6 root: resources: @@ -49,6 +53,7 @@ hedera: cpu: 4 memory: 31Gi - name: node4 + nodeId: 4 accountId: 0.0.7 root: resources: @@ -59,6 +64,7 @@ hedera: cpu: 4 memory: 31Gi - name: node5 + nodeId: 5 accountId: 0.0.8 root: resources: @@ -69,6 +75,7 @@ hedera: cpu: 4 memory: 31Gi - name: node6 + nodeId: 6 accountId: 0.0.9 root: resources: @@ -79,8 +86,6 @@ hedera: cpu: 4 memory: 31Gi defaults: - envoyProxy: - loadBalancerEnabled: true sidecars: recordStreamUploader: resources: diff --git a/examples/solo-gke-test/Taskfile.yml b/examples/solo-gke-test/Taskfile.yml index e5899e35e..daeaeebd2 100644 --- a/examples/solo-gke-test/Taskfile.yml +++ b/examples/solo-gke-test/Taskfile.yml @@ -18,3 +18,4 @@ env: HEDERA_SERVICES_ROOT: "/Users/user/source/hedera-services" LOCAL_BUILD_FLAG: "--local-build-path {{.HEDERA_SERVICES_ROOT}}/hedera-node/data" GENESIS_THROTTLES_FLAG: "--genesis-throttles-file {{.USER_WORKING_DIR}}/throttles.json" + # SOLO_CHARTS_DIR_FLAG: "-d /Users/user/source/solo-charts/charts" diff --git a/examples/solo-gke-test/init-containers-values.yaml b/examples/solo-gke-test/init-containers-values.yaml index 76e8437a8..f69c67714 100644 --- a/examples/solo-gke-test/init-containers-values.yaml +++ b/examples/solo-gke-test/init-containers-values.yaml @@ -9,6 +9,7 @@ hedera: mountPath: /data-saved nodes: - name: node1 + nodeId: 0 accountId: 0.0.3 root: resources: @@ -19,6 +20,7 @@ hedera: cpu: 4 memory: 31Gi - name: node2 + nodeId: 1 accountId: 0.0.4 root: resources: @@ -29,6 +31,7 @@ hedera: cpu: 4 memory: 31Gi - name: node3 + nodeId: 2 accountId: 0.0.5 root: resources: @@ -39,6 +42,7 @@ hedera: cpu: 4 memory: 31Gi - name: node4 + nodeId: 3 accountId: 0.0.6 root: resources: @@ -49,6 +53,7 @@ hedera: cpu: 4 memory: 31Gi - name: node5 + nodeId: 4 accountId: 0.0.7 root: resources: @@ -59,8 +64,6 @@ hedera: cpu: 4 memory: 31Gi defaults: - envoyProxy: - loadBalancerEnabled: true sidecars: recordStreamUploader: resources: diff --git a/package-lock.json b/package-lock.json index 70fd5c570..0771cb67a 100644 --- a/package-lock.json +++ b/package-lock.json @@ -37,7 +37,7 @@ "stream-buffers": "^3.0.3", "tar": "^7.4.3", "tsyringe-neo": "^5.1.0", - "uuid": "^11.0.3", + "uuid": "^11.0.4", "validator": "^13.12.0", "winston": "^3.17.0", "ws": "^8.18.0", @@ -71,7 +71,7 @@ "@types/uuid": "^10.0.0", "@types/ws": "^8.5.13", "@types/yargs": "^17.0.33", - "@typescript-eslint/utils": "^8.19.0", + "@typescript-eslint/utils": "^8.19.1", "c8": "^10.1.3", "chai": "^5.1.2", "chai-as-promised": "^8.0.1", @@ -105,7 +105,7 @@ "tsx": "^4.19.2", "typedoc": "^0.27.6", "typescript": "^5.7.2", - "typescript-eslint": "^8.19.0" + "typescript-eslint": "^8.19.1" }, "engines": { "node": ">=20.14.0", @@ -2863,20 +2863,20 @@ "license": "MIT" }, "node_modules/@typescript-eslint/eslint-plugin": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.19.0.tgz", - "integrity": "sha512-NggSaEZCdSrFddbctrVjkVZvFC6KGfKfNK0CU7mNK/iKHGKbzT4Wmgm08dKpcZECBu9f5FypndoMyRHkdqfT1Q==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-8.19.1.tgz", + "integrity": "sha512-tJzcVyvvb9h/PB96g30MpxACd9IrunT7GF9wfA9/0TJ1LxGOJx1TdPzSbBBnNED7K9Ka8ybJsnEpiXPktolTLg==", "dev": true, "dependencies": { "@eslint-community/regexpp": "^4.10.0", - "@typescript-eslint/scope-manager": "8.19.0", - "@typescript-eslint/type-utils": "8.19.0", - "@typescript-eslint/utils": "8.19.0", - "@typescript-eslint/visitor-keys": "8.19.0", + "@typescript-eslint/scope-manager": "8.19.1", + "@typescript-eslint/type-utils": "8.19.1", + "@typescript-eslint/utils": "8.19.1", + "@typescript-eslint/visitor-keys": "8.19.1", "graphemer": "^1.4.0", "ignore": "^5.3.1", "natural-compare": "^1.4.0", - "ts-api-utils": "^1.3.0" + "ts-api-utils": "^2.0.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2891,16 +2891,28 @@ "typescript": ">=4.8.4 <5.8.0" } }, + "node_modules/@typescript-eslint/eslint-plugin/node_modules/ts-api-utils": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.0.tgz", + "integrity": "sha512-xCt/TOAc+EOHS1XPnijD3/yzpH6qg2xppZO1YDqGoVsNXfQfzHpOdNuXwrwOU8u4ITXJyDCTyt8w5g1sZv9ynQ==", + "dev": true, + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, "node_modules/@typescript-eslint/parser": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.19.0.tgz", - "integrity": "sha512-6M8taKyOETY1TKHp0x8ndycipTVgmp4xtg5QpEZzXxDhNvvHOJi5rLRkLr8SK3jTgD5l4fTlvBiRdfsuWydxBw==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-8.19.1.tgz", + "integrity": "sha512-67gbfv8rAwawjYx3fYArwldTQKoYfezNUT4D5ioWetr/xCrxXxvleo3uuiFuKfejipvq+og7mjz3b0G2bVyUCw==", "dev": true, "dependencies": { - "@typescript-eslint/scope-manager": "8.19.0", - "@typescript-eslint/types": "8.19.0", - "@typescript-eslint/typescript-estree": "8.19.0", - "@typescript-eslint/visitor-keys": "8.19.0", + "@typescript-eslint/scope-manager": "8.19.1", + "@typescript-eslint/types": "8.19.1", + "@typescript-eslint/typescript-estree": "8.19.1", + "@typescript-eslint/visitor-keys": "8.19.1", "debug": "^4.3.4" }, "engines": { @@ -2916,13 +2928,13 @@ } }, "node_modules/@typescript-eslint/scope-manager": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.19.0.tgz", - "integrity": "sha512-hkoJiKQS3GQ13TSMEiuNmSCvhz7ujyqD1x3ShbaETATHrck+9RaDdUbt+osXaUuns9OFwrDTTrjtwsU8gJyyRA==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-8.19.1.tgz", + "integrity": "sha512-60L9KIuN/xgmsINzonOcMDSB8p82h95hoBfSBtXuO4jlR1R9L1xSkmVZKgCPVfavDlXihh4ARNjXhh1gGnLC7Q==", "dev": true, "dependencies": { - "@typescript-eslint/types": "8.19.0", - "@typescript-eslint/visitor-keys": "8.19.0" + "@typescript-eslint/types": "8.19.1", + "@typescript-eslint/visitor-keys": "8.19.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2933,15 +2945,15 @@ } }, "node_modules/@typescript-eslint/type-utils": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.19.0.tgz", - "integrity": "sha512-TZs0I0OSbd5Aza4qAMpp1cdCYVnER94IziudE3JU328YUHgWu9gwiwhag+fuLeJ2LkWLXI+F/182TbG+JaBdTg==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-8.19.1.tgz", + "integrity": "sha512-Rp7k9lhDKBMRJB/nM9Ksp1zs4796wVNyihG9/TU9R6KCJDNkQbc2EOKjrBtLYh3396ZdpXLtr/MkaSEmNMtykw==", "dev": true, "dependencies": { - "@typescript-eslint/typescript-estree": "8.19.0", - "@typescript-eslint/utils": "8.19.0", + "@typescript-eslint/typescript-estree": "8.19.1", + "@typescript-eslint/utils": "8.19.1", "debug": "^4.3.4", - "ts-api-utils": "^1.3.0" + "ts-api-utils": "^2.0.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2955,10 +2967,22 @@ "typescript": ">=4.8.4 <5.8.0" } }, + "node_modules/@typescript-eslint/type-utils/node_modules/ts-api-utils": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.0.tgz", + "integrity": "sha512-xCt/TOAc+EOHS1XPnijD3/yzpH6qg2xppZO1YDqGoVsNXfQfzHpOdNuXwrwOU8u4ITXJyDCTyt8w5g1sZv9ynQ==", + "dev": true, + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, "node_modules/@typescript-eslint/types": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.19.0.tgz", - "integrity": "sha512-8XQ4Ss7G9WX8oaYvD4OOLCjIQYgRQxO+qCiR2V2s2GxI9AUpo7riNwo6jDhKtTcaJjT8PY54j2Yb33kWtSJsmA==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-8.19.1.tgz", + "integrity": "sha512-JBVHMLj7B1K1v1051ZaMMgLW4Q/jre5qGK0Ew6UgXz1Rqh+/xPzV1aW581OM00X6iOfyr1be+QyW8LOUf19BbA==", "dev": true, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -2969,19 +2993,19 @@ } }, "node_modules/@typescript-eslint/typescript-estree": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.19.0.tgz", - "integrity": "sha512-WW9PpDaLIFW9LCbucMSdYUuGeFUz1OkWYS/5fwZwTA+l2RwlWFdJvReQqMUMBw4yJWJOfqd7An9uwut2Oj8sLw==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-8.19.1.tgz", + "integrity": "sha512-jk/TZwSMJlxlNnqhy0Eod1PNEvCkpY6MXOXE/WLlblZ6ibb32i2We4uByoKPv1d0OD2xebDv4hbs3fm11SMw8Q==", "dev": true, "dependencies": { - "@typescript-eslint/types": "8.19.0", - "@typescript-eslint/visitor-keys": "8.19.0", + "@typescript-eslint/types": "8.19.1", + "@typescript-eslint/visitor-keys": "8.19.1", "debug": "^4.3.4", "fast-glob": "^3.3.2", "is-glob": "^4.0.3", "minimatch": "^9.0.4", "semver": "^7.6.0", - "ts-api-utils": "^1.3.0" + "ts-api-utils": "^2.0.0" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -3018,16 +3042,28 @@ "url": "https://github.com/sponsors/isaacs" } }, + "node_modules/@typescript-eslint/typescript-estree/node_modules/ts-api-utils": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-2.0.0.tgz", + "integrity": "sha512-xCt/TOAc+EOHS1XPnijD3/yzpH6qg2xppZO1YDqGoVsNXfQfzHpOdNuXwrwOU8u4ITXJyDCTyt8w5g1sZv9ynQ==", + "dev": true, + "engines": { + "node": ">=18.12" + }, + "peerDependencies": { + "typescript": ">=4.8.4" + } + }, "node_modules/@typescript-eslint/utils": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.19.0.tgz", - "integrity": "sha512-PTBG+0oEMPH9jCZlfg07LCB2nYI0I317yyvXGfxnvGvw4SHIOuRnQ3kadyyXY6tGdChusIHIbM5zfIbp4M6tCg==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-8.19.1.tgz", + "integrity": "sha512-IxG5gLO0Ne+KaUc8iW1A+XuKLd63o4wlbI1Zp692n1xojCl/THvgIKXJXBZixTh5dd5+yTJ/VXH7GJaaw21qXA==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", - "@typescript-eslint/scope-manager": "8.19.0", - "@typescript-eslint/types": "8.19.0", - "@typescript-eslint/typescript-estree": "8.19.0" + "@typescript-eslint/scope-manager": "8.19.1", + "@typescript-eslint/types": "8.19.1", + "@typescript-eslint/typescript-estree": "8.19.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -3042,12 +3078,12 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.19.0.tgz", - "integrity": "sha512-mCFtBbFBJDCNCWUl5y6sZSCHXw1DEFEk3c/M3nRK2a4XUB8StGFtmcEMizdjKuBzB6e/smJAAWYug3VrdLMr1w==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-8.19.1.tgz", + "integrity": "sha512-fzmjU8CHK853V/avYZAvuVut3ZTfwN5YtMaoi+X9Y9MA9keaWNHC3zEQ9zvyX/7Hj+5JkNyK1l7TOR2hevHB6Q==", "dev": true, "dependencies": { - "@typescript-eslint/types": "8.19.0", + "@typescript-eslint/types": "8.19.1", "eslint-visitor-keys": "^4.2.0" }, "engines": { @@ -12357,14 +12393,14 @@ } }, "node_modules/typescript-eslint": { - "version": "8.19.0", - "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.19.0.tgz", - "integrity": "sha512-Ni8sUkVWYK4KAcTtPjQ/UTiRk6jcsuDhPpxULapUDi8A/l8TSBk+t1GtJA1RsCzIJg0q6+J7bf35AwQigENWRQ==", + "version": "8.19.1", + "resolved": "https://registry.npmjs.org/typescript-eslint/-/typescript-eslint-8.19.1.tgz", + "integrity": "sha512-LKPUQpdEMVOeKluHi8md7rwLcoXHhwvWp3x+sJkMuq3gGm9yaYJtPo8sRZSblMFJ5pcOGCAak/scKf1mvZDlQw==", "dev": true, "dependencies": { - "@typescript-eslint/eslint-plugin": "8.19.0", - "@typescript-eslint/parser": "8.19.0", - "@typescript-eslint/utils": "8.19.0" + "@typescript-eslint/eslint-plugin": "8.19.1", + "@typescript-eslint/parser": "8.19.1", + "@typescript-eslint/utils": "8.19.1" }, "engines": { "node": "^18.18.0 || ^20.9.0 || >=21.1.0" @@ -12754,14 +12790,13 @@ "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==" }, "node_modules/uuid": { - "version": "11.0.3", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.0.3.tgz", - "integrity": "sha512-d0z310fCWv5dJwnX1Y/MncBAqGMKEzlBb1AOf7z9K8ALnd0utBX/msg/fA0+sbyN1ihbMsLhrBlnl1ak7Wa0rg==", + "version": "11.0.4", + "resolved": "https://registry.npmjs.org/uuid/-/uuid-11.0.4.tgz", + "integrity": "sha512-IzL6VtTTYcAhA/oghbFJ1Dkmqev+FpQWnCBaKq/gUluLxliWvO8DPFWfIviRmYbtaavtSQe4WBL++rFjdcGWEg==", "funding": [ "https://github.com/sponsors/broofa", "https://github.com/sponsors/ctavan" ], - "license": "MIT", "bin": { "uuid": "dist/esm/bin/uuid" } diff --git a/package.json b/package.json index 8f9cec81b..72b73686f 100644 --- a/package.json +++ b/package.json @@ -69,7 +69,7 @@ "stream-buffers": "^3.0.3", "tar": "^7.4.3", "tsyringe-neo": "^5.1.0", - "uuid": "^11.0.3", + "uuid": "^11.0.4", "validator": "^13.12.0", "winston": "^3.17.0", "ws": "^8.18.0", @@ -100,7 +100,7 @@ "@types/uuid": "^10.0.0", "@types/ws": "^8.5.13", "@types/yargs": "^17.0.33", - "@typescript-eslint/utils": "^8.19.0", + "@typescript-eslint/utils": "^8.19.1", "c8": "^10.1.3", "chai": "^5.1.2", "chai-as-promised": "^8.0.1", @@ -134,7 +134,7 @@ "tsx": "^4.19.2", "typedoc": "^0.27.6", "typescript": "^5.7.2", - "typescript-eslint": "^8.19.0" + "typescript-eslint": "^8.19.1" }, "repository": { "type": "git", diff --git a/resources/templates/application.properties b/resources/templates/application.properties index 202d86c31..0aa32ff23 100644 --- a/resources/templates/application.properties +++ b/resources/templates/application.properties @@ -15,3 +15,5 @@ hedera.profiles.active=TEST staking.periodMins=1 nodes.updateAccountIdAllowed=true blockStream.streamMode=BOTH +# TODO: uncomment this when we are ready to use genesis-network.json +#addressBook.useRosterLifecycle=true diff --git a/resources/templates/settings.txt b/resources/templates/settings.txt index 94d252e95..c35797abc 100644 --- a/resources/templates/settings.txt +++ b/resources/templates/settings.txt @@ -1,16 +1,2 @@ -checkSignedStateFromDisk, 1 -csvFileName, MainNetStats -doUpnp, false -loadKeysFromPfxFiles, 0 -maxOutgoingSyncs, 1 -reconnect.active, 1 -reconnect.reconnectWindowSeconds, -1 -showInternalStats, 1 -state.saveStatePeriod, 900 -useLoopbackIp, false -waitAtStartup, false state.mainClassNameOverride, com.hedera.services.ServicesMain -maxEventQueueForCons, 1000 -merkleDb.hashesRamToDiskThreshold, 8388608 -event.creation.maxCreationRate, 20 -virtualMap.familyThrottleThreshold, 6000000000 +state.saveStatePeriod, 60 diff --git a/src/commands/base.ts b/src/commands/base.ts index cecdcbaca..a49241663 100644 --- a/src/commands/base.ts +++ b/src/commands/base.ts @@ -31,6 +31,8 @@ import {type CommandFlag} from '../types/flag_types.js'; import {type Lease} from '../core/lease/lease.js'; import {Listr} from 'listr2'; import path from 'path'; +import * as constants from '../core/constants.js'; +import fs from 'fs'; export interface CommandHandlers { parent: BaseCommand; @@ -98,6 +100,10 @@ export abstract class BaseCommand extends ShellRunner { return this.configManager; } + getChartManager(): ChartManager { + return this.chartManager; + } + /** * Dynamically builds a class with properties from the provided list of flags * and extra properties, will keep track of which properties are used. Call @@ -171,6 +177,10 @@ export abstract class BaseCommand extends ShellRunner { return newConfigInstance; } + getLeaseManager(): LeaseManager { + return this.leaseManager; + } + /** * Get the list of unused configurations that were not accessed * @returns an array of unused configurations @@ -212,4 +222,33 @@ export abstract class BaseCommand extends ShellRunner { } }; } + + /** + * Setup home directories + * @param dirs a list of directories that need to be created in sequence + */ + setupHomeDirectory( + dirs: string[] = [ + constants.SOLO_HOME_DIR, + constants.SOLO_LOGS_DIR, + constants.SOLO_CACHE_DIR, + constants.SOLO_VALUES_DIR, + ], + ) { + const self = this; + + try { + dirs.forEach(dirPath => { + if (!fs.existsSync(dirPath)) { + fs.mkdirSync(dirPath, {recursive: true}); + } + self.logger.debug(`OK: setup directory: ${dirPath}`); + }); + } catch (e: Error | any) { + this.logger.error(e); + throw new SoloError(`failed to create directory: ${e.message}`, e); + } + + return dirs; + } } diff --git a/src/commands/cluster.ts b/src/commands/cluster.ts deleted file mode 100644 index ee9beb889..000000000 --- a/src/commands/cluster.ts +++ /dev/null @@ -1,394 +0,0 @@ -/** - * Copyright (C) 2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the ""License""); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an ""AS IS"" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -import {ListrEnquirerPromptAdapter} from '@listr2/prompt-adapter-enquirer'; -import {Listr} from 'listr2'; -import {SoloError} from '../core/errors.js'; -import {Flags as flags} from './flags.js'; -import {BaseCommand} from './base.js'; -import chalk from 'chalk'; -import * as constants from '../core/constants.js'; -import path from 'path'; -import {ListrLease} from '../core/lease/listr_lease.js'; -import {type CommandBuilder} from '../types/aliases.js'; - -/** - * Define the core functionalities of 'cluster' command - */ -export class ClusterCommand extends BaseCommand { - showClusterList() { - this.logger.showList('Clusters', this.k8.getClusters()); - return true; - } - - /** Get cluster-info for the given cluster name */ - getClusterInfo() { - try { - const cluster = this.k8.getKubeConfig().getCurrentCluster(); - this.logger.showJSON(`Cluster Information (${cluster.name})`, cluster); - this.logger.showUser('\n'); - return true; - } catch (e: Error | any) { - this.logger.showUserError(e); - } - - return false; - } - - /** Show list of installed chart */ - async showInstalledChartList(clusterSetupNamespace: string) { - this.logger.showList('Installed Charts', await this.chartManager.getInstalledCharts(clusterSetupNamespace)); - } - - /** Setup cluster with shared components */ - async setup(argv: any) { - const self = this; - - interface Context { - config: { - chartDir: string; - clusterSetupNamespace: string; - deployCertManager: boolean; - deployCertManagerCrds: boolean; - deployMinio: boolean; - deployPrometheusStack: boolean; - soloChartVersion: string; - }; - isChartInstalled: boolean; - chartPath: string; - valuesArg: string; - } - - const tasks = new Listr( - [ - { - title: 'Initialize', - task: async (ctx, task) => { - self.configManager.update(argv); - flags.disablePrompts([flags.chartDirectory]); - - await self.configManager.executePrompt(task, [ - flags.chartDirectory, - flags.clusterSetupNamespace, - flags.deployCertManager, - flags.deployCertManagerCrds, - flags.deployMinio, - flags.deployPrometheusStack, - ]); - - ctx.config = { - chartDir: self.configManager.getFlag(flags.chartDirectory) as string, - clusterSetupNamespace: self.configManager.getFlag(flags.clusterSetupNamespace) as string, - deployCertManager: self.configManager.getFlag(flags.deployCertManager) as boolean, - deployCertManagerCrds: self.configManager.getFlag(flags.deployCertManagerCrds) as boolean, - deployMinio: self.configManager.getFlag(flags.deployMinio) as boolean, - deployPrometheusStack: self.configManager.getFlag(flags.deployPrometheusStack) as boolean, - soloChartVersion: self.configManager.getFlag(flags.soloChartVersion) as string, - }; - - self.logger.debug('Prepare ctx.config', {config: ctx.config, argv}); - - ctx.isChartInstalled = await this.chartManager.isChartInstalled( - ctx.config.clusterSetupNamespace, - constants.SOLO_CLUSTER_SETUP_CHART, - ); - }, - }, - { - title: 'Prepare chart values', - task: async (ctx, _) => { - ctx.chartPath = await this.prepareChartPath( - ctx.config.chartDir, - constants.SOLO_TESTING_CHART_URL, - constants.SOLO_CLUSTER_SETUP_CHART, - ); - ctx.valuesArg = this.prepareValuesArg( - ctx.config.chartDir, - ctx.config.deployPrometheusStack, - ctx.config.deployMinio, - ctx.config.deployCertManager, - ctx.config.deployCertManagerCrds, - ); - }, - skip: ctx => ctx.isChartInstalled, - }, - { - title: `Install '${constants.SOLO_CLUSTER_SETUP_CHART}' chart`, - task: async (ctx, _) => { - const clusterSetupNamespace = ctx.config.clusterSetupNamespace; - const version = ctx.config.soloChartVersion; - const valuesArg = ctx.valuesArg; - - try { - self.logger.debug(`Installing chart chartPath = ${ctx.chartPath}, version = ${version}`); - await self.chartManager.install( - clusterSetupNamespace, - constants.SOLO_CLUSTER_SETUP_CHART, - ctx.chartPath, - version, - valuesArg, - ); - } catch (e: Error | any) { - // if error, uninstall the chart and rethrow the error - self.logger.debug( - `Error on installing ${constants.SOLO_CLUSTER_SETUP_CHART}. attempting to rollback by uninstalling the chart`, - e, - ); - try { - await self.chartManager.uninstall(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); - } catch (ex) { - // ignore error during uninstall since we are doing the best-effort uninstall here - } - - throw e; - } - - if (argv.dev) { - await self.showInstalledChartList(clusterSetupNamespace); - } - }, - skip: ctx => ctx.isChartInstalled, - }, - ], - { - concurrent: false, - rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, - }, - ); - - try { - await tasks.run(); - } catch (e: Error | any) { - throw new SoloError('Error on cluster setup', e); - } - - return true; - } - - async reset(argv: any) { - const self = this; - const lease = await self.leaseManager.create(); - - interface Context { - config: { - clusterName: string; - clusterSetupNamespace: string; - }; - isChartInstalled: boolean; - } - - const tasks = new Listr( - [ - { - title: 'Initialize', - task: async (ctx, task) => { - if (!argv[flags.force.name]) { - const confirm = await task.prompt(ListrEnquirerPromptAdapter).run({ - type: 'toggle', - default: false, - message: 'Are you sure you would like to uninstall solo-cluster-setup chart?', - }); - - if (!confirm) { - process.exit(0); - } - } - - self.configManager.update(argv); - ctx.config = { - clusterName: self.configManager.getFlag(flags.clusterName) as string, - clusterSetupNamespace: self.configManager.getFlag(flags.clusterSetupNamespace) as string, - }; - - ctx.isChartInstalled = await this.chartManager.isChartInstalled( - ctx.config.clusterSetupNamespace, - constants.SOLO_CLUSTER_SETUP_CHART, - ); - if (!ctx.isChartInstalled) { - throw new SoloError('No chart found for the cluster'); - } - - return ListrLease.newAcquireLeaseTask(lease, task); - }, - }, - { - title: `Uninstall '${constants.SOLO_CLUSTER_SETUP_CHART}' chart`, - task: async (ctx, _) => { - const clusterSetupNamespace = ctx.config.clusterSetupNamespace; - await self.chartManager.uninstall(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); - if (argv.dev) { - await self.showInstalledChartList(clusterSetupNamespace); - } - }, - skip: ctx => !ctx.isChartInstalled, - }, - ], - { - concurrent: false, - rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, - }, - ); - - try { - await tasks.run(); - } catch (e: Error | any) { - throw new SoloError('Error on cluster reset', e); - } finally { - await lease.release(); - } - - return true; - } - - /** Return Yargs command definition for 'cluster' command */ - getCommandDefinition(): {command: string; desc: string; builder: CommandBuilder} { - const self = this; - return { - command: 'cluster', - desc: 'Manage solo testing cluster', - builder: (yargs: any) => { - return yargs - .command({ - command: 'list', - desc: 'List all available clusters', - handler: (argv: any) => { - self.logger.debug("==== Running 'cluster list' ===", {argv}); - - try { - const r = self.showClusterList(); - self.logger.debug('==== Finished running `cluster list`===='); - - if (!r) process.exit(1); - } catch (err) { - self.logger.showUserError(err); - process.exit(1); - } - }, - }) - .command({ - command: 'info', - desc: 'Get cluster info', - handler: (argv: any) => { - self.logger.debug("==== Running 'cluster info' ===", {argv}); - try { - const r = this.getClusterInfo(); - self.logger.debug('==== Finished running `cluster info`===='); - - if (!r) process.exit(1); - } catch (err: Error | any) { - self.logger.showUserError(err); - process.exit(1); - } - }, - }) - .command({ - command: 'setup', - desc: 'Setup cluster with shared components', - builder: (y: any) => - flags.setCommandFlags( - y, - flags.chartDirectory, - flags.clusterName, - flags.clusterSetupNamespace, - flags.deployCertManager, - flags.deployCertManagerCrds, - flags.deployMinio, - flags.deployPrometheusStack, - flags.quiet, - flags.soloChartVersion, - ), - handler: (argv: any) => { - self.logger.debug("==== Running 'cluster setup' ===", {argv}); - - self - .setup(argv) - .then(r => { - self.logger.debug('==== Finished running `cluster setup`===='); - - if (!r) process.exit(1); - }) - .catch(err => { - self.logger.showUserError(err); - process.exit(1); - }); - }, - }) - .command({ - command: 'reset', - desc: 'Uninstall shared components from cluster', - builder: (y: any) => - flags.setCommandFlags(y, flags.clusterName, flags.clusterSetupNamespace, flags.force, flags.quiet), - handler: (argv: any) => { - self.logger.debug("==== Running 'cluster reset' ===", {argv}); - - self - .reset(argv) - .then(r => { - self.logger.debug('==== Finished running `cluster reset`===='); - - if (!r) process.exit(1); - }) - .catch(err => { - self.logger.showUserError(err); - process.exit(1); - }); - }, - }) - .demandCommand(1, 'Select a cluster command'); - }, - }; - } - - /** - * Prepare values arg for cluster setup command - * - * @param [chartDir] - local charts directory (default is empty) - * @param [prometheusStackEnabled] - a bool to denote whether to install prometheus stack - * @param [minioEnabled] - a bool to denote whether to install minio - * @param [certManagerEnabled] - a bool to denote whether to install cert manager - * @param [certManagerCrdsEnabled] - a bool to denote whether to install cert manager CRDs - */ - prepareValuesArg( - chartDir = flags.chartDirectory.definition.defaultValue as string, - prometheusStackEnabled = flags.deployPrometheusStack.definition.defaultValue as boolean, - minioEnabled = flags.deployMinio.definition.defaultValue as boolean, - certManagerEnabled = flags.deployCertManager.definition.defaultValue as boolean, - certManagerCrdsEnabled = flags.deployCertManagerCrds.definition.defaultValue as boolean, - ) { - let valuesArg = chartDir ? `-f ${path.join(chartDir, 'solo-cluster-setup', 'values.yaml')}` : ''; - - valuesArg += ` --set cloud.prometheusStack.enabled=${prometheusStackEnabled}`; - valuesArg += ` --set cloud.minio.enabled=${minioEnabled}`; - valuesArg += ` --set cloud.certManager.enabled=${certManagerEnabled}`; - valuesArg += ` --set cert-manager.installCRDs=${certManagerCrdsEnabled}`; - - if (certManagerEnabled && !certManagerCrdsEnabled) { - this.logger.showUser( - chalk.yellowBright('> WARNING:'), - chalk.yellow( - 'cert-manager CRDs are required for cert-manager, please enable it if you have not installed it independently.', - ), - ); - } - - return valuesArg; - } - - close(): Promise { - // no-op - return Promise.resolve(); - } -} diff --git a/src/commands/cluster/configs.ts b/src/commands/cluster/configs.ts new file mode 100644 index 000000000..2aea57685 --- /dev/null +++ b/src/commands/cluster/configs.ts @@ -0,0 +1,125 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import {type NodeAlias} from '../../types/aliases.js'; +import {Flags as flags} from '../flags.js'; +import * as constants from '../../core/constants.js'; +import {ListrEnquirerPromptAdapter} from '@listr2/prompt-adapter-enquirer'; +import {SoloError} from '../../core/errors.js'; + +export const CONNECT_CONFIGS_NAME = 'connectConfig'; + +export const connectConfigBuilder = async function (argv, ctx, task) { + const config = this.getConfig(CONNECT_CONFIGS_NAME, argv.flags, [ + 'currentDeploymentName', + ]) as ClusterConnectConfigClass; + + // set config in the context for later tasks to use + ctx.config = config; + + return ctx.config; +}; + +export const setupConfigBuilder = async function (argv, ctx, task) { + const parent = this.parent; + const configManager = parent.getConfigManager(); + configManager.update(argv); + flags.disablePrompts([flags.chartDirectory]); + + await configManager.executePrompt(task, [ + flags.chartDirectory, + flags.clusterSetupNamespace, + flags.deployCertManager, + flags.deployCertManagerCrds, + flags.deployMinio, + flags.deployPrometheusStack, + ]); + + ctx.config = { + chartDir: configManager.getFlag(flags.chartDirectory) as string, + clusterSetupNamespace: configManager.getFlag(flags.clusterSetupNamespace) as string, + deployCertManager: configManager.getFlag(flags.deployCertManager) as boolean, + deployCertManagerCrds: configManager.getFlag(flags.deployCertManagerCrds) as boolean, + deployMinio: configManager.getFlag(flags.deployMinio) as boolean, + deployPrometheusStack: configManager.getFlag(flags.deployPrometheusStack) as boolean, + soloChartVersion: configManager.getFlag(flags.soloChartVersion) as string, + } as ClusterSetupConfigClass; + + parent.logger.debug('Prepare ctx.config', {config: ctx.config, argv}); + + ctx.isChartInstalled = await parent + .getChartManager() + .isChartInstalled(ctx.config.clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); + + return ctx.config; +}; + +export const resetConfigBuilder = async function (argv, ctx, task) { + if (!argv[flags.force.name]) { + const confirm = await task.prompt(ListrEnquirerPromptAdapter).run({ + type: 'toggle', + default: false, + message: 'Are you sure you would like to uninstall solo-cluster-setup chart?', + }); + + if (!confirm) { + // eslint-disable-next-line n/no-process-exit + process.exit(0); + } + } + + this.parent.getConfigManager().update(argv); + + ctx.config = { + clusterName: this.parent.getConfigManager().getFlag(flags.clusterName) as string, + clusterSetupNamespace: this.parent.getConfigManager().getFlag(flags.clusterSetupNamespace) as string, + } as ClusterResetConfigClass; + + ctx.isChartInstalled = await this.parent + .getChartManager() + .isChartInstalled(ctx.config.clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); + if (!ctx.isChartInstalled) { + throw new SoloError('No chart found for the cluster'); + } + + return ctx.config; +}; + +export interface ClusterConnectConfigClass { + app: string; + cacheDir: string; + devMode: boolean; + namespace: string; + nodeAlias: NodeAlias; + context: string; + clusterName: string; +} + +export interface ClusterSetupConfigClass { + chartDir: string; + clusterSetupNamespace: string; + deployCertManager: boolean; + deployCertManagerCrds: boolean; + deployMinio: boolean; + deployPrometheusStack: boolean; + soloChartVersion: string; +} + +export interface ClusterResetConfigClass { + clusterName: string; + clusterSetupNamespace: string; +} diff --git a/src/commands/context/flags.ts b/src/commands/cluster/flags.ts similarity index 57% rename from src/commands/context/flags.ts rename to src/commands/cluster/flags.ts index 2dc606063..fd318e757 100644 --- a/src/commands/context/flags.ts +++ b/src/commands/cluster/flags.ts @@ -17,6 +17,34 @@ import {Flags as flags} from '../flags.js'; +export const DEFAULT_FLAGS = { + requiredFlags: [], + requiredFlagsWithDisabledPrompt: [], + optionalFlags: [], +}; + +export const SETUP_FLAGS = { + requiredFlags: [], + requiredFlagsWithDisabledPrompt: [], + optionalFlags: [ + flags.chartDirectory, + flags.clusterName, + flags.clusterSetupNamespace, + flags.deployCertManager, + flags.deployCertManagerCrds, + flags.deployMinio, + flags.deployPrometheusStack, + flags.quiet, + flags.soloChartVersion, + ], +}; + +export const RESET_FLAGS = { + requiredFlags: [], + requiredFlagsWithDisabledPrompt: [], + optionalFlags: [flags.clusterName, flags.clusterSetupNamespace, flags.force, flags.quiet], +}; + export const USE_FLAGS = { requiredFlags: [], requiredFlagsWithDisabledPrompt: [], diff --git a/src/commands/cluster/handlers.ts b/src/commands/cluster/handlers.ts new file mode 100644 index 000000000..b1dd0b6c6 --- /dev/null +++ b/src/commands/cluster/handlers.ts @@ -0,0 +1,148 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import {type BaseCommand, type CommandHandlers} from '../base.js'; +import {type ClusterCommandTasks} from './tasks.js'; +import * as helpers from '../../core/helpers.js'; +import * as constants from '../../core/constants.js'; +import * as ContextFlags from './flags.js'; +import {RemoteConfigTasks} from '../../core/config/remote/remote_config_tasks.js'; +import type {RemoteConfigManager} from '../../core/config/remote/remote_config_manager.js'; +import {connectConfigBuilder, resetConfigBuilder, setupConfigBuilder} from './configs.js'; +import {SoloError} from '../../core/errors.js'; + +export class ClusterCommandHandlers implements CommandHandlers { + readonly parent: BaseCommand; + readonly tasks: ClusterCommandTasks; + public readonly remoteConfigManager: RemoteConfigManager; + private getConfig: any; + + constructor(parent: BaseCommand, tasks: ClusterCommandTasks, remoteConfigManager: RemoteConfigManager) { + this.parent = parent; + this.tasks = tasks; + this.remoteConfigManager = remoteConfigManager; + this.getConfig = parent.getConfig.bind(parent); + } + + async connect(argv: any) { + argv = helpers.addFlagsToArgv(argv, ContextFlags.USE_FLAGS); + + const action = this.parent.commandActionBuilder( + [ + this.tasks.initialize(argv, connectConfigBuilder.bind(this)), + this.tasks.setupHomeDirectory(), + this.parent.getLocalConfig().promptLocalConfigTask(this.parent.getK8()), + this.tasks.selectContext(argv), + RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), + this.tasks.updateLocalConfig(argv), + ], + { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, + }, + 'cluster connect', + null, + ); + + await action(argv, this); + return true; + } + + async list(argv: any) { + argv = helpers.addFlagsToArgv(argv, ContextFlags.USE_FLAGS); + + const action = this.parent.commandActionBuilder( + [this.tasks.showClusterList()], + { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, + }, + 'cluster list', + null, + ); + + await action(argv, this); + return true; + } + + async info(argv: any) { + argv = helpers.addFlagsToArgv(argv, ContextFlags.USE_FLAGS); + + const action = this.parent.commandActionBuilder( + [this.tasks.getClusterInfo()], + { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, + }, + 'cluster info', + null, + ); + + await action(argv, this); + return true; + } + + async setup(argv: any) { + argv = helpers.addFlagsToArgv(argv, ContextFlags.USE_FLAGS); + + const action = this.parent.commandActionBuilder( + [ + this.tasks.initialize(argv, setupConfigBuilder.bind(this)), + this.tasks.prepareChartValues(argv), + this.tasks.installClusterChart(argv), + ], + { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, + }, + 'cluster setup', + null, + ); + + try { + await action(argv, this); + } catch (e: Error | any) { + throw new SoloError('Error on cluster setup', e); + } + + return true; + } + + async reset(argv: any) { + argv = helpers.addFlagsToArgv(argv, ContextFlags.USE_FLAGS); + + const action = this.parent.commandActionBuilder( + [ + this.tasks.initialize(argv, resetConfigBuilder.bind(this)), + this.tasks.acquireNewLease(argv), + this.tasks.uninstallClusterChart(argv), + ], + { + concurrent: false, + rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, + }, + 'cluster reset', + null, + ); + + try { + await action(argv, this); + } catch (e: Error | any) { + throw new SoloError('Error on cluster reset', e); + } + return true; + } +} diff --git a/src/commands/cluster/index.ts b/src/commands/cluster/index.ts new file mode 100644 index 000000000..1c1fd474d --- /dev/null +++ b/src/commands/cluster/index.ts @@ -0,0 +1,108 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import * as ContextFlags from './flags.js'; +import {YargsCommand} from '../../core/yargs_command.js'; +import {BaseCommand} from './../base.js'; +import {type Opts} from '../../types/command_types.js'; +import {ClusterCommandTasks} from './tasks.js'; +import {ClusterCommandHandlers} from './handlers.js'; +import {DEFAULT_FLAGS, RESET_FLAGS, SETUP_FLAGS} from './flags.js'; + +/** + * Defines the core functionalities of 'node' command + */ +export class ClusterCommand extends BaseCommand { + public handlers: ClusterCommandHandlers; + + constructor(opts: Opts) { + super(opts); + + this.handlers = new ClusterCommandHandlers(this, new ClusterCommandTasks(this, this.k8), this.remoteConfigManager); + } + + getCommandDefinition() { + return { + command: 'cluster', + desc: 'Manage solo testing cluster', + builder: (yargs: any) => { + return yargs + .command( + new YargsCommand( + { + command: 'connect', + description: 'updates the local configuration by connecting a deployment to a k8s context', + commandDef: this, + handler: 'connect', + }, + ContextFlags.USE_FLAGS, + ), + ) + .command( + new YargsCommand( + { + command: 'list', + description: 'List all available clusters', + commandDef: this, + handler: 'list', + }, + DEFAULT_FLAGS, + ), + ) + .command( + new YargsCommand( + { + command: 'info', + description: 'Get cluster info', + commandDef: this, + handler: 'info', + }, + DEFAULT_FLAGS, + ), + ) + .command( + new YargsCommand( + { + command: 'setup', + description: 'Setup cluster with shared components', + commandDef: this, + handler: 'setup', + }, + SETUP_FLAGS, + ), + ) + .command( + new YargsCommand( + { + command: 'reset', + description: 'Uninstall shared components from cluster', + commandDef: this, + handler: 'reset', + }, + RESET_FLAGS, + ), + ) + .demandCommand(1, 'Select a context command'); + }, + }; + } + + close(): Promise { + // no-op + return Promise.resolve(); + } +} diff --git a/src/commands/cluster/tasks.ts b/src/commands/cluster/tasks.ts new file mode 100644 index 000000000..fca930eff --- /dev/null +++ b/src/commands/cluster/tasks.ts @@ -0,0 +1,399 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import {Task} from '../../core/task.js'; +import {Flags as flags} from '../flags.js'; +import type {ListrTaskWrapper} from 'listr2'; +import type {ConfigBuilder} from '../../types/aliases.js'; +import {type BaseCommand} from '../base.js'; +import {splitFlagInput} from '../../core/helpers.js'; +import * as constants from '../../core/constants.js'; +import path from 'path'; +import chalk from 'chalk'; +import {ListrLease} from '../../core/lease/listr_lease.js'; +import {type K8} from '../../core/k8.js'; +import {ListrEnquirerPromptAdapter} from '@listr2/prompt-adapter-enquirer'; + +export class ClusterCommandTasks { + private readonly parent: BaseCommand; + + constructor( + parent, + private readonly k8: K8, + ) { + this.parent = parent; + } + + updateLocalConfig(argv) { + return new Task('Update local configuration', async (ctx: any, task: ListrTaskWrapper) => { + this.parent.logger.info('Compare local and remote configuration...'); + const configManager = this.parent.getConfigManager(); + const isQuiet = configManager.getFlag(flags.quiet); + + await this.parent.getRemoteConfigManager().modify(async remoteConfig => { + // Update current deployment with cluster list from remoteConfig + const localConfig = this.parent.getLocalConfig(); + const localDeployments = localConfig.deployments; + const remoteClusterList = []; + for (const cluster of Object.keys(remoteConfig.clusters)) { + if (localConfig.currentDeploymentName === remoteConfig.clusters[cluster]) { + remoteClusterList.push(cluster); + } + } + ctx.config.clusters = remoteClusterList; + localDeployments[localConfig.currentDeploymentName].clusters = ctx.config.clusters; + localConfig.setDeployments(localDeployments); + + const contexts = splitFlagInput(configManager.getFlag(flags.context)); + + for (let i = 0; i < ctx.config.clusters.length; i++) { + const cluster = ctx.config.clusters[i]; + const context = contexts[i]; + + // If a context is provided use it to update the mapping + if (context) { + localConfig.clusterContextMapping[cluster] = context; + } else if (!localConfig.clusterContextMapping[cluster]) { + // In quiet mode use the currently selected context to update the mapping + if (isQuiet) { + localConfig.clusterContextMapping[cluster] = this.parent.getK8().getKubeConfig().getCurrentContext(); + } + + // Prompt the user to select a context if mapping value is missing + else { + localConfig.clusterContextMapping[cluster] = await this.promptForContext(task, cluster); + } + } + } + this.parent.logger.info('Update local configuration...'); + await localConfig.write(); + }); + }); + } + + private async getSelectedContext(task, selectedCluster, localConfig, isQuiet) { + let selectedContext; + if (isQuiet) { + selectedContext = this.parent.getK8().getKubeConfig().getCurrentContext(); + } else { + selectedContext = await this.promptForContext(task, selectedCluster); + localConfig.clusterContextMapping[selectedCluster] = selectedContext; + } + return selectedContext; + } + + private async promptForContext(task, cluster) { + const kubeContexts = this.parent.getK8().getContexts(); + return flags.context.prompt( + task, + kubeContexts.map(c => c.name), + cluster, + ); + } + + private async selectContextForFirstCluster(task, clusters, localConfig, isQuiet) { + const selectedCluster = clusters[0]; + + if (localConfig.clusterContextMapping[selectedCluster]) { + return localConfig.clusterContextMapping[selectedCluster]; + } + + // If cluster does not exist in LocalConfig mapping prompt the user to select a context or use the current one + else { + return this.getSelectedContext(task, selectedCluster, localConfig, isQuiet); + } + } + + /** + * Prepare values arg for cluster setup command + * + * @param [chartDir] - local charts directory (default is empty) + * @param [prometheusStackEnabled] - a bool to denote whether to install prometheus stack + * @param [minioEnabled] - a bool to denote whether to install minio + * @param [certManagerEnabled] - a bool to denote whether to install cert manager + * @param [certManagerCrdsEnabled] - a bool to denote whether to install cert manager CRDs + */ + private prepareValuesArg( + chartDir = flags.chartDirectory.definition.defaultValue as string, + prometheusStackEnabled = flags.deployPrometheusStack.definition.defaultValue as boolean, + minioEnabled = flags.deployMinio.definition.defaultValue as boolean, + certManagerEnabled = flags.deployCertManager.definition.defaultValue as boolean, + certManagerCrdsEnabled = flags.deployCertManagerCrds.definition.defaultValue as boolean, + ) { + let valuesArg = chartDir ? `-f ${path.join(chartDir, 'solo-cluster-setup', 'values.yaml')}` : ''; + + valuesArg += ` --set cloud.prometheusStack.enabled=${prometheusStackEnabled}`; + valuesArg += ` --set cloud.certManager.enabled=${certManagerEnabled}`; + valuesArg += ` --set cert-manager.installCRDs=${certManagerCrdsEnabled}`; + valuesArg += ` --set cloud.minio.enabled=${minioEnabled}`; + + if (certManagerEnabled && !certManagerCrdsEnabled) { + this.parent.logger.showUser( + chalk.yellowBright('> WARNING:'), + chalk.yellow( + 'cert-manager CRDs are required for cert-manager, please enable it if you have not installed it independently.', + ), + ); + } + + return valuesArg; + } + + /** Show list of installed chart */ + private async showInstalledChartList(clusterSetupNamespace: string) { + this.parent.logger.showList( + 'Installed Charts', + await this.parent.getChartManager().getInstalledCharts(clusterSetupNamespace), + ); + } + + selectContext(argv) { + return new Task('Read local configuration settings', async (ctx: any, task: ListrTaskWrapper) => { + this.parent.logger.info('Read local configuration settings...'); + const configManager = this.parent.getConfigManager(); + const isQuiet = configManager.getFlag(flags.quiet); + const deploymentName: string = configManager.getFlag(flags.namespace); + let clusters = splitFlagInput(configManager.getFlag(flags.clusterName)); + const contexts = splitFlagInput(configManager.getFlag(flags.context)); + const localConfig = this.parent.getLocalConfig(); + let selectedContext; + + // If one or more contexts are provided use the first one + if (contexts.length) { + selectedContext = contexts[0]; + } + + // If one or more clusters are provided use the first one to determine the context + // from the mapping in the LocalConfig + else if (clusters.length) { + selectedContext = await this.selectContextForFirstCluster(task, clusters, localConfig, isQuiet); + } + + // If a deployment name is provided get the clusters associated with the deployment from the LocalConfig + // and select the context from the mapping, corresponding to the first deployment cluster + else if (deploymentName) { + const deployment = localConfig.deployments[deploymentName]; + + if (deployment && deployment.clusters.length) { + selectedContext = await this.selectContextForFirstCluster(task, deployment.clusters, localConfig, isQuiet); + } + + // The provided deployment does not exist in the LocalConfig + else { + // Add the deployment to the LocalConfig with the currently selected cluster and context in KubeConfig + if (isQuiet) { + selectedContext = this.parent.getK8().getKubeConfig().getCurrentContext(); + const selectedCluster = this.parent.getK8().getKubeConfig().getCurrentCluster().name; + localConfig.deployments[deploymentName] = { + clusters: [selectedCluster], + }; + + if (!localConfig.clusterContextMapping[selectedCluster]) { + localConfig.clusterContextMapping[selectedCluster] = selectedContext; + } + } + + // Prompt user for clusters and contexts + else { + clusters = splitFlagInput(await flags.clusterName.prompt(task, clusters)); + + for (const cluster of clusters) { + if (!localConfig.clusterContextMapping[cluster]) { + localConfig.clusterContextMapping[cluster] = await this.promptForContext(task, cluster); + } + } + + selectedContext = localConfig.clusterContextMapping[clusters[0]]; + } + } + } + + this.parent.getK8().getKubeConfig().setCurrentContext(selectedContext); + }); + } + + initialize(argv: any, configInit: ConfigBuilder) { + const {requiredFlags, optionalFlags} = argv; + + argv.flags = [...requiredFlags, ...optionalFlags]; + + return new Task('Initialize', async (ctx: any, task: ListrTaskWrapper) => { + if (argv[flags.devMode.name]) { + this.parent.logger.setDevMode(true); + } + + ctx.config = await configInit(argv, ctx, task); + }); + } + + showClusterList() { + return new Task('List all available clusters', async (ctx: any, task: ListrTaskWrapper) => { + this.parent.logger.showList('Clusters', this.parent.getK8().getClusters()); + }); + } + + getClusterInfo() { + return new Task('Get cluster info', async (ctx: any, task: ListrTaskWrapper) => { + try { + const cluster = this.parent.getK8().getKubeConfig().getCurrentCluster(); + this.parent.logger.showJSON(`Cluster Information (${cluster.name})`, cluster); + this.parent.logger.showUser('\n'); + } catch (e: Error | unknown) { + this.parent.logger.showUserError(e); + } + }); + } + + prepareChartValues(argv) { + const self = this; + + return new Task( + 'Prepare chart values', + async (ctx: any, task: ListrTaskWrapper) => { + ctx.chartPath = await this.parent.prepareChartPath( + ctx.config.chartDir, + constants.SOLO_TESTING_CHART_URL, + constants.SOLO_CLUSTER_SETUP_CHART, + ); + + // if minio is already present, don't deploy it + if (ctx.config.deployMinio && (await self.k8.isMinioInstalled(ctx.config.clusterSetupNamespace))) { + ctx.config.deployMinio = false; + } + + // if prometheus is found, don't deploy it + if ( + ctx.config.deployPrometheusStack && + !(await self.k8.isPrometheusInstalled(ctx.config.clusterSetupNamespace)) + ) { + ctx.config.deployPrometheusStack = false; + } + + // if cert manager is installed, don't deploy it + if ( + (ctx.config.deployCertManager || ctx.config.deployCertManagerCrds) && + (await self.k8.isCertManagerInstalled()) + ) { + ctx.config.deployCertManager = false; + ctx.config.deployCertManagerCrds = false; + } + + // If all are already present or not wanted, skip installation + if ( + !ctx.config.deployPrometheusStack && + !ctx.config.deployMinio && + !ctx.config.deployCertManager && + !ctx.config.deployCertManagerCrds + ) { + ctx.isChartInstalled = true; + return; + } + + ctx.valuesArg = this.prepareValuesArg( + ctx.config.chartDir, + ctx.config.deployPrometheusStack, + ctx.config.deployMinio, + ctx.config.deployCertManager, + ctx.config.deployCertManagerCrds, + ); + }, + ctx => ctx.isChartInstalled, + ); + } + + installClusterChart(argv) { + const parent = this.parent; + return new Task( + `Install '${constants.SOLO_CLUSTER_SETUP_CHART}' chart`, + async (ctx: any, task: ListrTaskWrapper) => { + const clusterSetupNamespace = ctx.config.clusterSetupNamespace; + const version = ctx.config.soloChartVersion; + const valuesArg = ctx.valuesArg; + + try { + parent.logger.debug(`Installing chart chartPath = ${ctx.chartPath}, version = ${version}`); + await parent + .getChartManager() + .install(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART, ctx.chartPath, version, valuesArg); + } catch (e: Error | unknown) { + // if error, uninstall the chart and rethrow the error + parent.logger.debug( + `Error on installing ${constants.SOLO_CLUSTER_SETUP_CHART}. attempting to rollback by uninstalling the chart`, + e, + ); + try { + await parent.getChartManager().uninstall(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); + } catch { + // ignore error during uninstall since we are doing the best-effort uninstall here + } + + throw e; + } + + if (argv.dev) { + await this.showInstalledChartList(clusterSetupNamespace); + } + }, + ctx => ctx.isChartInstalled, + ); + } + + acquireNewLease(argv) { + return new Task('Acquire new lease', async (ctx: any, task: ListrTaskWrapper) => { + const lease = await this.parent.getLeaseManager().create(); + return ListrLease.newAcquireLeaseTask(lease, task); + }); + } + + uninstallClusterChart(argv) { + const parent = this.parent; + const self = this; + + return new Task( + `Uninstall '${constants.SOLO_CLUSTER_SETUP_CHART}' chart`, + async (ctx: any, task: ListrTaskWrapper) => { + const clusterSetupNamespace = ctx.config.clusterSetupNamespace; + + if (!argv.force && (await self.k8.isRemoteConfigPresentInAnyNamespace())) { + const confirm = await task.prompt(ListrEnquirerPromptAdapter).run({ + type: 'toggle', + default: false, + message: + 'There is remote config for one of the deployments' + + 'Are you sure you would like to uninstall the cluster?', + }); + + if (!confirm) { + // eslint-disable-next-line n/no-process-exit + process.exit(0); + } + } + + await parent.getChartManager().uninstall(clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART); + if (argv.dev) { + await this.showInstalledChartList(clusterSetupNamespace); + } + }, + ctx => !ctx.isChartInstalled, + ); + } + + setupHomeDirectory() { + return new Task('Setup home directory', async () => { + this.parent.setupHomeDirectory(); + }); + } +} diff --git a/src/commands/context/handlers.ts b/src/commands/context/handlers.ts deleted file mode 100644 index 3c183ca08..000000000 --- a/src/commands/context/handlers.ts +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright (C) 2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the ""License""); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an ""AS IS"" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -import {type BaseCommand, type CommandHandlers} from '../base.js'; -import {type ContextCommandTasks} from './tasks.js'; -import * as helpers from '../../core/helpers.js'; -import * as constants from '../../core/constants.js'; -import * as ContextFlags from './flags.js'; -import {RemoteConfigTasks} from '../../core/config/remote/remote_config_tasks.js'; -import type {RemoteConfigManager} from '../../core/config/remote/remote_config_manager.js'; -import {connectConfigBuilder} from './configs.js'; - -export class ContextCommandHandlers implements CommandHandlers { - readonly parent: BaseCommand; - readonly tasks: ContextCommandTasks; - public readonly remoteConfigManager: RemoteConfigManager; - private getConfig: any; - - constructor(parent: BaseCommand, tasks: ContextCommandTasks, remoteConfigManager: RemoteConfigManager) { - this.parent = parent; - this.tasks = tasks; - this.remoteConfigManager = remoteConfigManager; - this.getConfig = parent.getConfig.bind(parent); - } - - async connect(argv: any) { - argv = helpers.addFlagsToArgv(argv, ContextFlags.USE_FLAGS); - - const action = this.parent.commandActionBuilder( - [ - this.tasks.initialize(argv, connectConfigBuilder.bind(this)), - this.parent.getLocalConfig().promptLocalConfigTask(this.parent.getK8()), - this.tasks.selectContext(argv), - RemoteConfigTasks.loadRemoteConfig.bind(this)(argv), - this.tasks.updateLocalConfig(argv), - ], - { - concurrent: false, - rendererOptions: constants.LISTR_DEFAULT_RENDERER_OPTION, - }, - 'context use', - null, - ); - - await action(argv, this); - return true; - } -} diff --git a/src/commands/context/index.ts b/src/commands/context/index.ts deleted file mode 100644 index 54fee60ad..000000000 --- a/src/commands/context/index.ts +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright (C) 2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the ""License""); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an ""AS IS"" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -import * as ContextFlags from './flags.js'; -import {YargsCommand} from '../../core/yargs_command.js'; -import {BaseCommand} from './../base.js'; -import {type Opts} from '../../types/command_types.js'; -import {ContextCommandTasks} from './tasks.js'; -import {ContextCommandHandlers} from './handlers.js'; - -/** - * Defines the core functionalities of 'node' command - */ -export class ContextCommand extends BaseCommand { - private handlers: ContextCommandHandlers; - - constructor(opts: Opts) { - super(opts); - - this.handlers = new ContextCommandHandlers(this, new ContextCommandTasks(this), this.remoteConfigManager); - } - - getCommandDefinition() { - return { - command: 'context', - desc: 'Manage local and remote configurations', - builder: (yargs: any) => { - return yargs - .command( - new YargsCommand( - { - command: 'connect', - description: 'updates the local configuration by connecting a deployment to a k8s context', - commandDef: this, - handler: 'connect', - }, - ContextFlags.USE_FLAGS, - ), - ) - .demandCommand(1, 'Select a context command'); - }, - }; - } - - close(): Promise { - // no-op - return Promise.resolve(); - } -} diff --git a/src/commands/context/tasks.ts b/src/commands/context/tasks.ts deleted file mode 100644 index e8474931c..000000000 --- a/src/commands/context/tasks.ts +++ /dev/null @@ -1,189 +0,0 @@ -/** - * Copyright (C) 2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the ""License""); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an ""AS IS"" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -import {Task} from '../../core/task.js'; -import {Flags as flags} from '../flags.js'; -import type {ListrTaskWrapper} from 'listr2'; -import type {ConfigBuilder} from '../../types/aliases.js'; -import {type BaseCommand} from '../base.js'; -import {splitFlagInput} from '../../core/helpers.js'; - -export class ContextCommandTasks { - private readonly parent: BaseCommand; - - constructor(parent) { - this.parent = parent; - } - - updateLocalConfig(argv) { - return new Task('Update local configuration', async (ctx: any, task: ListrTaskWrapper) => { - this.parent.logger.info('Compare local and remote configuration...'); - const configManager = this.parent.getConfigManager(); - const isQuiet = configManager.getFlag(flags.quiet); - - await this.parent.getRemoteConfigManager().modify(async remoteConfig => { - // Update current deployment with cluster list from remoteConfig - const localConfig = this.parent.getLocalConfig(); - const localDeployments = localConfig.deployments; - const remoteClusterList = []; - for (const cluster of Object.keys(remoteConfig.clusters)) { - if (localConfig.currentDeploymentName === remoteConfig.clusters[cluster]) { - remoteClusterList.push(cluster); - } - } - ctx.config.clusters = remoteClusterList; - localDeployments[localConfig.currentDeploymentName].clusters = ctx.config.clusters; - localConfig.setDeployments(localDeployments); - - const contexts = splitFlagInput(configManager.getFlag(flags.context)); - - for (let i = 0; i < ctx.config.clusters.length; i++) { - const cluster = ctx.config.clusters[i]; - const context = contexts[i]; - - // If a context is provided use it to update the mapping - if (context) { - localConfig.clusterContextMapping[cluster] = context; - } else if (!localConfig.clusterContextMapping[cluster]) { - // In quiet mode use the currently selected context to update the mapping - if (isQuiet) { - localConfig.clusterContextMapping[cluster] = this.parent.getK8().getKubeConfig().getCurrentContext(); - } - - // Prompt the user to select a context if mapping value is missing - else { - localConfig.clusterContextMapping[cluster] = await this.promptForContext(task, cluster); - } - } - } - this.parent.logger.info('Update local configuration...'); - await localConfig.write(); - }); - }); - } - - private async getSelectedContext(task, selectedCluster, localConfig, isQuiet) { - let selectedContext; - if (isQuiet) { - selectedContext = this.parent.getK8().getKubeConfig().getCurrentContext(); - } else { - selectedContext = await this.promptForContext(task, selectedCluster); - localConfig.clusterContextMapping[selectedCluster] = selectedContext; - } - return selectedContext; - } - - private async promptForContext(task, cluster) { - const kubeContexts = this.parent.getK8().getContexts(); - return flags.context.prompt( - task, - kubeContexts.map(c => c.name), - cluster, - ); - } - - private async selectContextForFirstCluster(task, clusters, localConfig, isQuiet) { - const selectedCluster = clusters[0]; - - if (localConfig.clusterContextMapping[selectedCluster]) { - return localConfig.clusterContextMapping[selectedCluster]; - } - - // If cluster does not exist in LocalConfig mapping prompt the user to select a context or use the current one - else { - return this.getSelectedContext(task, selectedCluster, localConfig, isQuiet); - } - } - - selectContext(argv) { - return new Task('Read local configuration settings', async (ctx: any, task: ListrTaskWrapper) => { - this.parent.logger.info('Read local configuration settings...'); - const configManager = this.parent.getConfigManager(); - const isQuiet = configManager.getFlag(flags.quiet); - const deploymentName: string = configManager.getFlag(flags.namespace); - let clusters = splitFlagInput(configManager.getFlag(flags.clusterName)); - const contexts = splitFlagInput(configManager.getFlag(flags.context)); - const localConfig = this.parent.getLocalConfig(); - let selectedContext; - - // If one or more contexts are provided use the first one - if (contexts.length) { - selectedContext = contexts[0]; - } - - // If one or more clusters are provided use the first one to determine the context - // from the mapping in the LocalConfig - else if (clusters.length) { - selectedContext = await this.selectContextForFirstCluster(task, clusters, localConfig, isQuiet); - } - - // If a deployment name is provided get the clusters associated with the deployment from the LocalConfig - // and select the context from the mapping, corresponding to the first deployment cluster - else if (deploymentName) { - const deployment = localConfig.deployments[deploymentName]; - - if (deployment && deployment.clusters.length) { - selectedContext = await this.selectContextForFirstCluster(task, deployment.clusters, localConfig, isQuiet); - } - - // The provided deployment does not exist in the LocalConfig - else { - // Add the deployment to the LocalConfig with the currently selected cluster and context in KubeConfig - if (isQuiet) { - selectedContext = this.parent.getK8().getKubeConfig().getCurrentContext(); - const selectedCluster = this.parent.getK8().getKubeConfig().getCurrentCluster().name; - localConfig.deployments[deploymentName] = { - clusters: [selectedCluster], - }; - - if (!localConfig.clusterContextMapping[selectedCluster]) { - localConfig.clusterContextMapping[selectedCluster] = selectedContext; - } - } - - // Prompt user for clusters and contexts - else { - clusters = splitFlagInput(await flags.clusterName.prompt(task, clusters)); - - for (const cluster of clusters) { - if (!localConfig.clusterContextMapping[cluster]) { - localConfig.clusterContextMapping[cluster] = await this.promptForContext(task, cluster); - } - } - - selectedContext = localConfig.clusterContextMapping[clusters[0]]; - } - } - } - - this.parent.getK8().getKubeConfig().setCurrentContext(selectedContext); - }); - } - - initialize(argv: any, configInit: ConfigBuilder) { - const {requiredFlags, optionalFlags} = argv; - - argv.flags = [...requiredFlags, ...optionalFlags]; - - return new Task('Initialize', async (ctx: any, task: ListrTaskWrapper) => { - if (argv[flags.devMode.name]) { - this.parent.logger.setDevMode(true); - } - - ctx.config = await configInit(argv, ctx, task); - }); - } -} diff --git a/src/commands/deployment.ts b/src/commands/deployment.ts index 4e6e0c0a5..74deb0f06 100644 --- a/src/commands/deployment.ts +++ b/src/commands/deployment.ts @@ -60,7 +60,11 @@ export class DeploymentCommand extends BaseCommand { self.configManager.update(argv); self.logger.debug('Updated config with argv', {config: self.configManager.config}); - await self.configManager.executePrompt(task, DeploymentCommand.DEPLOY_FLAGS_LIST); + await self.configManager.executePrompt(task, [ + flags.contextClusterUnparsed, + flags.namespace, + flags.deploymentClusters, + ]); ctx.config = { contextClusterUnparsed: self.configManager.getFlag(flags.contextClusterUnparsed), diff --git a/src/commands/flags.ts b/src/commands/flags.ts index e73cee854..e5f54b9a7 100644 --- a/src/commands/flags.ts +++ b/src/commands/flags.ts @@ -1619,6 +1619,73 @@ export class Flags { prompt: undefined, }; + static readonly storageType: CommandFlag = { + constName: 'storageType', + name: 'storage-type', + definition: { + defaultValue: constants.StorageType.MINIO_ONLY, + describe: + 'storage type for saving stream files, available options are minio_only, gcs_and_minio, s3_only, gcs_only, s3_and_gcs', + type: 'StorageType', + }, + prompt: undefined, + }; + + static readonly storageAccessKey: CommandFlag = { + constName: 'storageAccessKey', + name: 'storage-access-key', + definition: { + defaultValue: '', + describe: 'storage access key', + type: 'string', + }, + prompt: undefined, + }; + + static readonly storageSecrets: CommandFlag = { + constName: 'storageSecrets', + name: 'storage-secrets', + definition: { + defaultValue: '', + describe: 'storage secret key', + type: 'string', + }, + prompt: undefined, + }; + + static readonly storageEndpoint: CommandFlag = { + constName: 'storageEndpoint', + name: 'storage-endpoint', + definition: { + defaultValue: '', + describe: 'storage endpoint URL', + type: 'string', + }, + prompt: undefined, + }; + + static readonly storageBucket: CommandFlag = { + constName: 'storageBucket', + name: 'storage-bucket', + definition: { + defaultValue: '', + describe: 'name of storage bucket', + type: 'string', + }, + prompt: undefined, + }; + + static readonly loadBalancerEnabled: CommandFlag = { + constName: 'loadBalancerEnabled', + name: 'load-balancer', + definition: { + describe: 'Enable load balancer for network node proxies', + defaultValue: false, + type: 'boolean', + }, + prompt: undefined, + }; + static readonly allFlags: CommandFlag[] = [ Flags.accountId, Flags.amount, @@ -1671,6 +1738,7 @@ export class Flags { Flags.hederaExplorerTlsLoadBalancerIp, Flags.hederaExplorerVersion, Flags.inputDir, + Flags.loadBalancerEnabled, Flags.localBuildPath, Flags.log4j2Xml, Flags.mirrorNodeVersion, @@ -1696,6 +1764,11 @@ export class Flags { Flags.soloChartVersion, Flags.stakeAmounts, Flags.stateFile, + Flags.storageType, + Flags.storageAccessKey, + Flags.storageSecrets, + Flags.storageEndpoint, + Flags.storageBucket, Flags.tlsClusterIssuerType, Flags.tlsPrivateKey, Flags.tlsPublicKey, diff --git a/src/commands/index.ts b/src/commands/index.ts index 4b717ca83..dec7a7de5 100644 --- a/src/commands/index.ts +++ b/src/commands/index.ts @@ -14,8 +14,7 @@ * limitations under the License. * */ -import {ClusterCommand} from './cluster.js'; -import {ContextCommand} from './context/index.js'; +import {ClusterCommand} from './cluster/index.js'; import {InitCommand} from './init.js'; import {MirrorNodeCommand} from './mirror_node.js'; import {NetworkCommand} from './network.js'; @@ -33,7 +32,6 @@ import {type Opts} from '../types/command_types.js'; export function Initialize(opts: Opts) { const initCmd = new InitCommand(opts); const clusterCmd = new ClusterCommand(opts); - const contextCmd = new ContextCommand(opts); const networkCommand = new NetworkCommand(opts); const nodeCmd = new NodeCommand(opts); const relayCmd = new RelayCommand(opts); @@ -45,7 +43,6 @@ export function Initialize(opts: Opts) { initCmd.getCommandDefinition(), accountCmd.getCommandDefinition(), clusterCmd.getCommandDefinition(), - contextCmd.getCommandDefinition(), networkCommand.getCommandDefinition(), nodeCmd.getCommandDefinition(), relayCmd.getCommandDefinition(), diff --git a/src/commands/init.ts b/src/commands/init.ts index 9859e433e..3ad318c0c 100644 --- a/src/commands/init.ts +++ b/src/commands/init.ts @@ -27,35 +27,6 @@ import chalk from 'chalk'; * Defines the core functionalities of 'init' command */ export class InitCommand extends BaseCommand { - /** - * Setup home directories - * @param dirs a list of directories that need to be created in sequence - */ - setupHomeDirectory( - dirs: string[] = [ - constants.SOLO_HOME_DIR, - constants.SOLO_LOGS_DIR, - constants.SOLO_CACHE_DIR, - constants.SOLO_VALUES_DIR, - ], - ) { - const self = this; - - try { - dirs.forEach(dirPath => { - if (!fs.existsSync(dirPath)) { - fs.mkdirSync(dirPath, {recursive: true}); - } - self.logger.debug(`OK: setup directory: ${dirPath}`); - }); - } catch (e: Error | any) { - this.logger.error(e); - throw new SoloError(`failed to create directory: ${e.message}`, e); - } - - return dirs; - } - /** Executes the init CLI command */ async init(argv: any) { const self = this; diff --git a/src/commands/mirror_node.ts b/src/commands/mirror_node.ts index 937ab6790..15f1dc70c 100644 --- a/src/commands/mirror_node.ts +++ b/src/commands/mirror_node.ts @@ -51,6 +51,11 @@ interface MirrorNodeDeployConfigClass { clusterSetupNamespace: string; soloChartVersion: string; pinger: boolean; + storageType: constants.StorageType; + storageAccessKey: string; + storageSecrets: string; + storageEndpoint: string; + storageBucket: string; } interface Context { @@ -95,6 +100,11 @@ export class MirrorNodeCommand extends BaseCommand { flags.pinger, flags.clusterSetupNamespace, flags.soloChartVersion, + flags.storageType, + flags.storageAccessKey, + flags.storageSecrets, + flags.storageEndpoint, + flags.storageBucket, ]; } @@ -118,52 +128,44 @@ export class MirrorNodeCommand extends BaseCommand { /** * @param config * @param config.tlsClusterIssuerType - must be one of - acme-staging, acme-prod, or self-signed - * @param config.enableHederaExplorerTls * @param config.namespace - used for classname ingress class name prefix * @param config.hederaExplorerTlsLoadBalancerIp - can be an empty string * @param config.hederaExplorerTlsHostName */ - private prepareSoloChartSetupValuesArg(config: MirrorNodeDeployConfigClass) { - if (!config.enableHederaExplorerTls) return ''; - - const { - tlsClusterIssuerType, - enableHederaExplorerTls, - namespace, - hederaExplorerTlsLoadBalancerIp, - hederaExplorerTlsHostName, - } = config; + private async prepareSoloChartSetupValuesArg(config: MirrorNodeDeployConfigClass) { + const {tlsClusterIssuerType, namespace, hederaExplorerTlsLoadBalancerIp, hederaExplorerTlsHostName} = config; let valuesArg = ''; - if (enableHederaExplorerTls) { - if (!['acme-staging', 'acme-prod', 'self-signed'].includes(tlsClusterIssuerType)) { - throw new Error( - `Invalid TLS cluster issuer type: ${tlsClusterIssuerType}, must be one of: "acme-staging", "acme-prod", or "self-signed"`, - ); - } + if (!['acme-staging', 'acme-prod', 'self-signed'].includes(tlsClusterIssuerType)) { + throw new Error( + `Invalid TLS cluster issuer type: ${tlsClusterIssuerType}, must be one of: "acme-staging", "acme-prod", or "self-signed"`, + ); + } + // Install ingress controller only if it's not already present + if (!(await this.k8.isIngressControllerInstalled())) { valuesArg += ' --set ingress.enabled=true'; valuesArg += ' --set haproxyIngressController.enabled=true'; valuesArg += ` --set ingressClassName=${namespace}-hedera-explorer-ingress-class`; valuesArg += ` --set-json 'ingress.hosts[0]={"host":"${hederaExplorerTlsHostName}","paths":[{"path":"/","pathType":"Prefix"}]}'`; + } - if (hederaExplorerTlsLoadBalancerIp !== '') { - valuesArg += ` --set haproxy-ingress.controller.service.loadBalancerIP=${hederaExplorerTlsLoadBalancerIp}`; - } + if (hederaExplorerTlsLoadBalancerIp !== '') { + valuesArg += ` --set haproxy-ingress.controller.service.loadBalancerIP=${hederaExplorerTlsLoadBalancerIp}`; + } - if (tlsClusterIssuerType === 'self-signed') { - valuesArg += ' --set selfSignedClusterIssuer.enabled=true'; - } else { - valuesArg += ' --set acmeClusterIssuer.enabled=true'; - valuesArg += ` --set certClusterIssuerType=${tlsClusterIssuerType}`; - } + if (tlsClusterIssuerType === 'self-signed') { + valuesArg += ' --set selfSignedClusterIssuer.enabled=true'; + } else { + valuesArg += ' --set acmeClusterIssuer.enabled=true'; + valuesArg += ` --set certClusterIssuerType=${tlsClusterIssuerType}`; } return valuesArg; } - async prepareValuesArg(config: {valuesFile: string}) { + async prepareValuesArg(config: MirrorNodeDeployConfigClass) { let valuesArg = ''; const profileName = this.configManager.getFlag(flags.profileName) as string; @@ -176,6 +178,28 @@ export class MirrorNodeCommand extends BaseCommand { valuesArg += this.prepareValuesFiles(config.valuesFile); } + if (config.storageBucket) { + valuesArg += ` --set importer.config.hedera.mirror.importer.downloader.bucketName=${config.storageBucket}`; + } + + let storageType = ''; + if (config.storageType && config.storageAccessKey && config.storageSecrets && config.storageEndpoint) { + if ( + config.storageType === constants.StorageType.GCS_ONLY || + config.storageType === constants.StorageType.S3_AND_GCS || + config.storageType === constants.StorageType.GCS_AND_MINIO + ) { + storageType = 'gcp'; + } else if (config.storageType === constants.StorageType.S3_ONLY) { + storageType = 's3'; + } else { + throw new IllegalArgumentError(`Invalid cloud storage type: ${config.storageType}`); + } + valuesArg += ` --set importer.env.HEDERA_MIRROR_IMPORTER_DOWNLOADER_SOURCES_0_TYPE=${storageType}`; + valuesArg += ` --set importer.env.HEDERA_MIRROR_IMPORTER_DOWNLOADER_SOURCES_0_URI=${config.storageEndpoint}`; + valuesArg += ` --set importer.env.HEDERA_MIRROR_IMPORTER_DOWNLOADER_SOURCES_0_CREDENTIALS_ACCESSKEY=${config.storageAccessKey}`; + valuesArg += ` --set importer.env.HEDERA_MIRROR_IMPORTER_DOWNLOADER_SOURCES_0_CREDENTIALS_SECRETKEY=${config.storageSecrets}`; + } return valuesArg; } @@ -217,9 +241,10 @@ export class MirrorNodeCommand extends BaseCommand { constants.MIRROR_NODE_CHART, ); - ctx.config.valuesArg = await self.prepareValuesArg(ctx.config); - + // predefined values first ctx.config.valuesArg += this.prepareValuesFiles(constants.MIRROR_NODE_VALUES_FILE); + // user defined values later to override predefined values + ctx.config.valuesArg += await self.prepareValuesArg(ctx.config); if (ctx.config.pinger) { const startAccId = constants.HEDERA_NODE_ACCOUNT_ID_START; @@ -281,7 +306,7 @@ export class MirrorNodeCommand extends BaseCommand { constants.SOLO_CLUSTER_SETUP_CHART, ); - const soloChartSetupValuesArg = self.prepareSoloChartSetupValuesArg(config); + const soloChartSetupValuesArg = await self.prepareSoloChartSetupValuesArg(config); await self.chartManager.upgrade( clusterSetupNamespace, constants.SOLO_CLUSTER_SETUP_CHART, diff --git a/src/commands/network.ts b/src/commands/network.ts index b3fd759f9..573875fa4 100644 --- a/src/commands/network.ts +++ b/src/commands/network.ts @@ -38,13 +38,15 @@ import {ConsensusNodeComponent} from '../core/config/remote/components/consensus import {ConsensusNodeStates} from '../core/config/remote/enumerations.js'; import {EnvoyProxyComponent} from '../core/config/remote/components/envoy_proxy_component.js'; import {HaProxyComponent} from '../core/config/remote/components/ha_proxy_component.js'; -import {GenesisNetworkDataConstructor} from '../core/genesis_network_models/genesis_network_data_constructor.js'; +import {v4 as uuidv4} from 'uuid'; +import * as Base64 from 'js-base64'; export interface NetworkDeployConfigClass { applicationEnv: string; cacheDir: string; chartDirectory: string; enablePrometheusSvcMonitor: boolean; + loadBalancerEnabled: boolean; soloChartVersion: string; namespace: string; nodeAliasesUnparsed: string; @@ -62,7 +64,6 @@ export interface NetworkDeployConfigClass { grpcWebTlsCertificatePath: string; grpcTlsKeyPath: string; grpcWebTlsKeyPath: string; - genesisNetworkData: GenesisNetworkDataConstructor; genesisThrottlesFile: string; resolvedThrottlesFile: string; getUnusedConfigs: () => string[]; @@ -70,6 +71,11 @@ export interface NetworkDeployConfigClass { envoyIps: string; haproxyIpsParsed?: Record; envoyIpsParsed?: Record; + storageType: constants.StorageType; + storageAccessKey: string; + storageSecrets: string; + storageEndpoint: string; + storageBucket: string; } export class NetworkCommand extends BaseCommand { @@ -116,6 +122,7 @@ export class NetworkCommand extends BaseCommand { flags.enablePrometheusSvcMonitor, flags.soloChartVersion, flags.debugNodeAlias, + flags.loadBalancerEnabled, flags.log4j2Xml, flags.namespace, flags.nodeAliasesUnparsed, @@ -132,9 +139,81 @@ export class NetworkCommand extends BaseCommand { flags.grpcWebTlsKeyPath, flags.haproxyIps, flags.envoyIps, + flags.storageType, + flags.storageAccessKey, + flags.storageSecrets, + flags.storageEndpoint, + flags.storageBucket, ]; } + async prepareStorageSecrets(config: NetworkDeployConfigClass) { + try { + const minioAccessKey = uuidv4(); + const minioSecretKey = uuidv4(); + const minioData = {}; + const namespace = config.namespace; + + // Generating new minio credentials + const envString = `MINIO_ROOT_USER=${minioAccessKey}\nMINIO_ROOT_PASSWORD=${minioSecretKey}`; + minioData['config.env'] = Base64.encode(envString); + const isMinioSecretCreated = await this.k8.createSecret( + constants.MINIO_SECRET_NAME, + namespace, + 'Opaque', + minioData, + undefined, + true, + ); + if (!isMinioSecretCreated) { + throw new SoloError('ailed to create new minio secret'); + } + + // Generating cloud storage secrets + const {storageAccessKey, storageSecrets, storageEndpoint} = config; + const cloudData = {}; + if ( + config.storageType === constants.StorageType.S3_ONLY || + config.storageType === constants.StorageType.S3_AND_GCS + ) { + cloudData['S3_ACCESS_KEY'] = Base64.encode(storageAccessKey); + cloudData['S3_SECRET_KEY'] = Base64.encode(storageSecrets); + cloudData['S3_ENDPOINT'] = Base64.encode(storageEndpoint); + } + if ( + config.storageType === constants.StorageType.GCS_ONLY || + config.storageType === constants.StorageType.S3_AND_GCS || + config.storageType === constants.StorageType.GCS_AND_MINIO + ) { + cloudData['GCS_ACCESS_KEY'] = Base64.encode(storageAccessKey); + cloudData['GCS_SECRET_KEY'] = Base64.encode(storageSecrets); + cloudData['GCS_ENDPOINT'] = Base64.encode(storageEndpoint); + } + if (config.storageType === constants.StorageType.GCS_AND_MINIO) { + cloudData['S3_ACCESS_KEY'] = Base64.encode(minioAccessKey); + cloudData['S3_SECRET_KEY'] = Base64.encode(minioSecretKey); + } + + const isCloudSecretCreated = await this.k8.createSecret( + constants.UPLOADER_SECRET_NAME, + namespace, + 'Opaque', + cloudData, + undefined, + true, + ); + if (!isCloudSecretCreated) { + throw new SoloError( + `failed to create Kubernetes secret for storage credentials of type '${config.storageType}'`, + ); + } + } catch (e: Error | any) { + const errorMessage = 'failed to create Kubernetes storage secret '; + this.logger.error(errorMessage, e); + throw new SoloError(errorMessage, e); + } + } + async prepareValuesArg(config: { chartDirectory?: string; app?: string; @@ -146,8 +225,13 @@ export class NetworkCommand extends BaseCommand { valuesFile?: string; haproxyIpsParsed?: Record; envoyIpsParsed?: Record; - genesisNetworkData: GenesisNetworkDataConstructor; + storageType: constants.StorageType; resolvedThrottlesFile: string; + storageAccessKey: string; + storageSecrets: string; + storageEndpoint: string; + storageBucket: string; + loadBalancerEnabled: boolean; }) { let valuesArg = config.chartDirectory ? `-f ${path.join(config.chartDirectory, 'solo-deployment', 'values.yaml')}` @@ -164,16 +248,43 @@ export class NetworkCommand extends BaseCommand { valuesArg = addDebugOptions(valuesArg, config.debugNodeAlias); } + if ( + config.storageType === constants.StorageType.S3_AND_GCS || + config.storageType === constants.StorageType.GCS_ONLY || + config.storageType === constants.StorageType.GCS_AND_MINIO + ) { + valuesArg += ' --set cloud.gcs.enabled=true'; + } + + if ( + config.storageType === constants.StorageType.S3_AND_GCS || + config.storageType === constants.StorageType.S3_ONLY + ) { + valuesArg += ' --set cloud.s3.enabled=true'; + } + + if ( + config.storageType === constants.StorageType.GCS_ONLY || + config.storageType === constants.StorageType.S3_ONLY || + config.storageType === constants.StorageType.S3_AND_GCS + ) { + valuesArg += ' --set cloud.minio.enabled=false'; + } + + if (config.storageType !== constants.StorageType.MINIO_ONLY) { + valuesArg += ' --set cloud.generateNewSecrets=false'; + } + + if (config.storageBucket) { + valuesArg += ` --set cloud.buckets.streamBucket=${config.storageBucket}`; + valuesArg += ` --set minio-server.tenant.buckets[0].name=${config.storageBucket}`; + } const profileName = this.configManager.getFlag(flags.profileName) as string; - this.profileValuesFile = await this.profileManager.prepareValuesForSoloChart( - profileName, - config.genesisNetworkData, - ); + this.profileValuesFile = await this.profileManager.prepareValuesForSoloChart(profileName); if (this.profileValuesFile) { valuesArg += this.prepareValuesFiles(this.profileValuesFile); } - // do not deploy mirror node until after we have the updated address book valuesArg += ` --set "telemetry.prometheus.svcMonitor.enabled=${config.enablePrometheusSvcMonitor}"`; valuesArg += ` --set "defaults.volumeClaims.enabled=${config.persistentVolumeClaims}"`; @@ -200,6 +311,11 @@ export class NetworkCommand extends BaseCommand { valuesArg += ` --set-file "hedera.configMaps.genesisThrottlesJson=${config.resolvedThrottlesFile}"`; } + if (config.loadBalancerEnabled) { + valuesArg += ' --set "defaults.haproxy.serviceType=LoadBalancer"'; + valuesArg += ' --set "defaults.envoyProxy.serviceType=LoadBalancer"'; + } + if (config.valuesFile) { valuesArg += this.prepareValuesFiles(config.valuesFile); } @@ -224,6 +340,7 @@ export class NetworkCommand extends BaseCommand { flags.chainId, flags.chartDirectory, flags.debugNodeAlias, + flags.loadBalancerEnabled, flags.log4j2Xml, flags.persistentVolumeClaims, flags.profileName, @@ -235,6 +352,11 @@ export class NetworkCommand extends BaseCommand { flags.grpcWebTlsKeyPath, flags.haproxyIps, flags.envoyIps, + flags.storageType, + flags.storageAccessKey, + flags.storageSecrets, + flags.storageEndpoint, + flags.storageBucket, ]); await this.configManager.executePrompt(task, NetworkCommand.DEPLOY_FLAGS_LIST); @@ -272,12 +394,6 @@ export class NetworkCommand extends BaseCommand { config.stagingDir = Templates.renderStagingDir(config.cacheDir, config.releaseTag); config.stagingKeysDir = path.join(validatePath(config.stagingDir), 'keys'); - config.genesisNetworkData = await GenesisNetworkDataConstructor.initialize( - config.nodeAliases, - this.keyManager, - config.keysDir, - ); - config.resolvedThrottlesFile = resolveValidJsonFilePath( config.genesisThrottlesFile, flags.genesisThrottlesFile.definition.defaultValue as string, @@ -299,6 +415,17 @@ export class NetworkCommand extends BaseCommand { fs.mkdirSync(config.keysDir); } + // if storageType is set, then we need to set the storage secrets + if ( + this.configManager.getFlag(flags.storageType) && + this.configManager.getFlag(flags.storageAccessKey) && + this.configManager.getFlag(flags.storageSecrets) && + this.configManager.getFlag(flags.storageEndpoint) + ) { + this.logger.debug('Preparing storage secrets'); + await this.prepareStorageSecrets(config); + } + this.logger.debug('Prepared config', { config, cachedConfig: this.configManager.config, @@ -525,6 +652,11 @@ export class NetworkCommand extends BaseCommand { constants.PODS_RUNNING_MAX_ATTEMPTS, constants.PODS_RUNNING_DELAY, ), + // skip if only cloud storage is/are used + skip: ctx => + ctx.config.storageType === constants.StorageType.GCS_ONLY || + ctx.config.storageType === constants.StorageType.S3_ONLY || + ctx.config.storageType === constants.StorageType.S3_AND_GCS, }); // set up the subtasks @@ -676,7 +808,11 @@ export class NetworkCommand extends BaseCommand { { title: 'Waiting for network pods to be running', task: async () => { - await this.k8.waitForPods([constants.POD_PHASE_RUNNING], ['solo.hedera.com/type=network-node'], 1); + await this.k8.waitForPods( + [constants.POD_PHASE_RUNNING], + ['solo.hedera.com/type=network-node', 'solo.hedera.com/type=network-node'], + 1, + ); }, }, ], diff --git a/src/commands/node/handlers.ts b/src/commands/node/handlers.ts index 89313f85c..5f0b64b31 100644 --- a/src/commands/node/handlers.ts +++ b/src/commands/node/handlers.ts @@ -744,7 +744,8 @@ export class NodeCommandHandlers implements CommandHandlers { }), this.tasks.identifyNetworkPods(), this.tasks.fetchPlatformSoftware('nodeAliases'), - this.tasks.setupNetworkNodes('nodeAliases'), + // TODO: change to isGenesis: true once we are ready to use genesis-network.json: this.tasks.setupNetworkNodes('nodeAliases', true), + this.tasks.setupNetworkNodes('nodeAliases', false), this.changeAllNodeStates(ConsensusNodeStates.SETUP), ], { diff --git a/src/commands/node/tasks.ts b/src/commands/node/tasks.ts index 482dbca28..951f4939e 100644 --- a/src/commands/node/tasks.ts +++ b/src/commands/node/tasks.ts @@ -24,8 +24,6 @@ import {type ChartManager} from '../../core/chart_manager.js'; import {type CertificateManager} from '../../core/certificate_manager.js'; import {Zippy} from '../../core/zippy.js'; import * as constants from '../../core/constants.js'; -import {Templates} from '../../core/templates.js'; -import {Task} from '../../core/task.js'; import { DEFAULT_NETWORK_NODE_NAME, FREEZE_ADMIN_ACCOUNT, @@ -33,6 +31,8 @@ import { IGNORED_NODE_ACCOUNT_ID, TREASURY_ACCOUNT_ID, } from '../../core/constants.js'; +import {Templates} from '../../core/templates.js'; +import {Task} from '../../core/task.js'; import { AccountBalanceQuery, AccountId, @@ -42,10 +42,10 @@ import { FreezeTransaction, FreezeType, Long, - PrivateKey, NodeCreateTransaction, NodeDeleteTransaction, NodeUpdateTransaction, + PrivateKey, Timestamp, } from '@hashgraph/sdk'; import {IllegalArgumentError, MissingArgumentError, SoloError} from '../../core/errors.js'; @@ -68,18 +68,17 @@ import { type ConfigBuilder, type NodeAlias, type NodeAliases, - type NodeId, type PodName, type SkipCheck, } from '../../types/aliases.js'; import {NodeStatusCodes, NodeStatusEnums, NodeSubcommandType} from '../../core/enumerations.js'; -import * as x509 from '@peculiar/x509'; import type {NodeDeleteConfigClass, NodeRefreshConfigClass, NodeUpdateConfigClass} from './configs.js'; import {type Lease} from '../../core/lease/lease.js'; import {ListrLease} from '../../core/lease/listr_lease.js'; import {Duration} from '../../core/time/duration.js'; import {type BaseCommand} from '../base.js'; import {type NodeAddConfigClass} from './node_add_config.js'; +import {GenesisNetworkDataConstructor} from '../../core/genesis_network_models/genesis_network_data_constructor.js'; export class NodeCommandTasks { private readonly accountManager: AccountManager; @@ -305,16 +304,21 @@ export class NodeCommandTasks { config: {namespace}, } = ctx; + const enableDebugger = ctx.config.debugNodeAlias && status !== NodeStatusCodes.FREEZE_COMPLETE; + const subTasks = nodeAliases.map((nodeAlias, i) => { const reminder = 'debugNodeAlias' in ctx.config && ctx.config.debugNodeAlias === nodeAlias && status !== NodeStatusCodes.FREEZE_COMPLETE - ? 'Please attach JVM debugger now.' + ? 'Please attach JVM debugger now. Sleeping for 1 hour, hit ctrl-c once debugging is complete.' : ''; const title = `Check network pod: ${chalk.yellow(nodeAlias)} ${chalk.red(reminder)}`; const subTask = async (ctx: any, task: ListrTaskWrapper) => { + if (enableDebugger) { + await sleep(Duration.ofHours(1)); + } ctx.config.podNames[nodeAlias] = await this._checkNetworkNodeActiveness( namespace, nodeAlias, @@ -511,15 +515,6 @@ export class NodeCommandTasks { ); } - _loadPermCertificate(certFullPath: string) { - const certPem = fs.readFileSync(certFullPath).toString(); - const decodedDers = x509.PemConverter.decode(certPem); - if (!decodedDers || decodedDers.length === 0) { - throw new SoloError('unable to load perm key: ' + certFullPath); - } - return new Uint8Array(decodedDers[0]); - } - async _addStake( namespace: string, accountId: string, @@ -547,7 +542,7 @@ export class NodeCommandTasks { // Create the transaction const transaction = new AccountUpdateTransaction() .setAccountId(accountId) - .setStakedNodeId(Templates.nodeIdFromNodeAlias(nodeAlias) - 1) + .setStakedNodeId(Templates.nodeIdFromNodeAlias(nodeAlias)) .freezeWith(client); // Sign the transaction with the account's private key @@ -875,14 +870,23 @@ export class NodeCommandTasks { }); } - setupNetworkNodes(nodeAliasesProperty: string) { - return new Task('Setup network nodes', (ctx: any, task: ListrTaskWrapper) => { + setupNetworkNodes(nodeAliasesProperty: string, isGenesis: boolean = false) { + return new Task('Setup network nodes', async (ctx: any, task: ListrTaskWrapper) => { + if (isGenesis) { + await this.generateGenesisNetworkJson( + ctx.config.namespace, + ctx.config.nodeAliases, + ctx.config.keysDir, + ctx.config.stagingDir, + ); + } + const subTasks = []; for (const nodeAlias of ctx.config[nodeAliasesProperty]) { const podName = ctx.config.podNames[nodeAlias]; subTasks.push({ title: `Node: ${chalk.yellow(nodeAlias)}`, - task: () => this.platformInstaller.taskSetup(podName), + task: () => this.platformInstaller.taskSetup(podName, ctx.config.stagingDir, isGenesis), }); } @@ -894,6 +898,33 @@ export class NodeCommandTasks { }); } + /** + * Generate genesis network json file + * @private + * @param namespace - namespace + * @param nodeAliases - node aliases + * @param keysDir - keys directory + * @param stagingDir - staging directory + */ + private async generateGenesisNetworkJson( + namespace: string, + nodeAliases: NodeAliases, + keysDir: string, + stagingDir: string, + ) { + const networkNodeServiceMap = await this.accountManager.getNodeServiceMap(namespace); + + const genesisNetworkData = await GenesisNetworkDataConstructor.initialize( + nodeAliases, + this.keyManager, + keysDir, + networkNodeServiceMap, + ); + + const genesisNetworkJson = path.join(stagingDir, 'genesis-network.json'); + fs.writeFileSync(genesisNetworkJson, genesisNetworkData.toJSON()); + } + prepareStagingDirectory(nodeAliasesProperty: any) { return new Task('Prepare staging directory', (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; @@ -1005,6 +1036,7 @@ export class NodeCommandTasks { ); await sleep(Duration.ofSeconds(60)); const accountMap = getNodeAccountMap(config.allNodeAliases); + let skipNodeAlias: NodeAlias; switch (transactionType) { case NodeSubcommandType.ADD: @@ -1013,18 +1045,17 @@ export class NodeCommandTasks { if (config.newAccountNumber) { // update map with current account ids accountMap.set(config.nodeAlias, config.newAccountNumber); - - // update _nodeClient with the new service map since one of the account number has changed - await self.accountManager.refreshNodeClient(config.namespace); + skipNodeAlias = config.nodeAlias; } break; case NodeSubcommandType.DELETE: if (config.nodeAlias) { accountMap.delete(config.nodeAlias); + skipNodeAlias = config.nodeAlias; } } - config.nodeClient = await self.accountManager.loadNodeClient(config.namespace); + config.nodeClient = await self.accountManager.refreshNodeClient(config.namespace, skipNodeAlias); // send some write transactions to invoke the handler that will trigger the stake weight recalculate for (const nodeAlias of accountMap.keys()) { @@ -1170,6 +1201,7 @@ export class NodeCommandTasks { values.hedera.nodes.push({ accountId: networkNodeServices.accountId, name: networkNodeServices.nodeAlias, + nodeId: networkNodeServices.nodeId, }); maxNum = maxNum > AccountId.fromString(networkNodeServices.accountId).num @@ -1215,7 +1247,7 @@ export class NodeCommandTasks { const config = ctx.config; const signingCertFile = Templates.renderGossipPemPublicKeyFile(config.nodeAlias); const signingCertFullPath = path.join(config.keysDir, signingCertFile); - ctx.signingCertDer = this._loadPermCertificate(signingCertFullPath); + ctx.signingCertDer = this.keyManager.getDerFromPemCertificate(signingCertFullPath); }); } @@ -1224,7 +1256,7 @@ export class NodeCommandTasks { const config = ctx.config; const tlsCertFile = Templates.renderTLSPemPublicKeyFile(config.nodeAlias); const tlsCertFullPath = path.join(config.keysDir, tlsCertFile); - const tlsCertDer = this._loadPermCertificate(tlsCertFullPath); + const tlsCertDer = this.keyManager.getDerFromPemCertificate(tlsCertFullPath); ctx.tlsCertHash = crypto.createHash('sha384').update(tlsCertDer).digest(); }); } @@ -1292,12 +1324,11 @@ export class NodeCommandTasks { return new Task('Send node update transaction', async (ctx: any, task: ListrTaskWrapper) => { const config: NodeUpdateConfigClass = ctx.config; - const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias) - 1; + const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias); self.logger.info(`nodeId: ${nodeId}, config.newAccountNumber: ${config.newAccountNumber}`); if (config.existingNodeAliases.length > 1) { - await self.accountManager.refreshNodeClient(config.namespace, config.nodeAlias); - config.nodeClient = await this.accountManager.loadNodeClient(config.namespace); + config.nodeClient = await self.accountManager.refreshNodeClient(config.namespace, config.nodeAlias); } try { @@ -1305,7 +1336,7 @@ export class NodeCommandTasks { if (config.tlsPublicKey && config.tlsPrivateKey) { self.logger.info(`config.tlsPublicKey: ${config.tlsPublicKey}`); - const tlsCertDer = self._loadPermCertificate(config.tlsPublicKey); + const tlsCertDer = self.keyManager.getDerFromPemCertificate(config.tlsPublicKey); const tlsCertHash = crypto.createHash('sha384').update(tlsCertDer).digest(); nodeUpdateTx = nodeUpdateTx.setCertificateHash(tlsCertHash); @@ -1317,7 +1348,7 @@ export class NodeCommandTasks { if (config.gossipPublicKey && config.gossipPrivateKey) { self.logger.info(`config.gossipPublicKey: ${config.gossipPublicKey}`); - const signingCertDer = self._loadPermCertificate(config.gossipPublicKey); + const signingCertDer = self.keyManager.getDerFromPemCertificate(config.gossipPublicKey); nodeUpdateTx = nodeUpdateTx.setGossipCaCertificate(signingCertDer); const publicKeyFile = Templates.renderGossipPemPublicKeyFile(config.nodeAlias); @@ -1377,26 +1408,32 @@ export class NodeCommandTasks { config.serviceMap = await self.accountManager.getNodeServiceMap(config.namespace); } + let maxNodeId = 0; + for (const nodeAlias of config.existingNodeAliases) { + const nodeId = config.serviceMap.get(nodeAlias).nodeId; + maxNodeId = Math.max(nodeId, maxNodeId); + } + + const nodeId = maxNodeId + 1; const index = config.existingNodeAliases.length; - const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias) - 1; let valuesArg = ''; for (let i = 0; i < index; i++) { if (transactionType === NodeSubcommandType.UPDATE && config.newAccountNumber && i === nodeId) { // for the case of updating node // use new account number for this node id - valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.newAccountNumber}" --set "hedera.nodes[${i}].name=${config.existingNodeAliases[i]}"`; + valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.newAccountNumber}" --set "hedera.nodes[${i}].name=${config.existingNodeAliases[i]}" --set "hedera.nodes[${i}].nodeId=${i}" `; } else if (transactionType !== NodeSubcommandType.DELETE || i !== nodeId) { // for the case of deleting node - valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.serviceMap.get(config.existingNodeAliases[i]).accountId}" --set "hedera.nodes[${i}].name=${config.existingNodeAliases[i]}"`; + valuesArg += ` --set "hedera.nodes[${i}].accountId=${config.serviceMap.get(config.existingNodeAliases[i]).accountId}" --set "hedera.nodes[${i}].name=${config.existingNodeAliases[i]}" --set "hedera.nodes[${i}].nodeId=${i}" `; } else if (transactionType === NodeSubcommandType.DELETE && i === nodeId) { - valuesArg += ` --set "hedera.nodes[${i}].accountId=${IGNORED_NODE_ACCOUNT_ID}" --set "hedera.nodes[${i}].name=${config.existingNodeAliases[i]}"`; + valuesArg += ` --set "hedera.nodes[${i}].accountId=${IGNORED_NODE_ACCOUNT_ID}" --set "hedera.nodes[${i}].name=${config.existingNodeAliases[i]}" --set "hedera.nodes[${i}].nodeId=${i}" `; } } // for the case of adding a new node if (transactionType === NodeSubcommandType.ADD && ctx.newNode && ctx.newNode.accountId) { - valuesArg += ` --set "hedera.nodes[${index}].accountId=${ctx.newNode.accountId}" --set "hedera.nodes[${index}].name=${ctx.newNode.name}"`; + valuesArg += ` --set "hedera.nodes[${index}].accountId=${ctx.newNode.accountId}" --set "hedera.nodes[${index}].name=${ctx.newNode.name}" --set "hedera.nodes[${index}].nodeId=${nodeId}" `; if (config.haproxyIps) { config.haproxyIpsParsed = Templates.parseNodeAliasToIpMapping(config.haproxyIps); @@ -1407,8 +1444,7 @@ export class NodeCommandTasks { } const nodeAlias: NodeAlias = config.nodeAlias; - const nodeId: NodeId = Templates.nodeIdFromNodeAlias(nodeAlias); - const nodeIndexInValues = nodeId - 1; + const nodeIndexInValues = Templates.nodeIdFromNodeAlias(nodeAlias); // Set static IPs for HAProxy if (config.haproxyIpsParsed) { @@ -1569,7 +1605,7 @@ export class NodeCommandTasks { async (ctx: any, task: ListrTaskWrapper) => { const config = ctx.config; const newNodeFullyQualifiedPodName = Templates.renderNetworkPodName(config.nodeAlias); - const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias) - 1; + const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias); const savedStateDir = config.lastStateZipPath.match(/\/(\d+)\.zip$/)[1]; const savedStatePath = `${constants.HEDERA_HAPI_PATH}/data/saved/com.hedera.services.ServicesMain/${nodeId}/123/${savedStateDir}`; await this.k8.execContainer(newNodeFullyQualifiedPodName, constants.ROOT_CONTAINER, [ @@ -1601,7 +1637,7 @@ export class NodeCommandTasks { const accountMap = getNodeAccountMap(config.existingNodeAliases); const deleteAccountId = accountMap.get(config.nodeAlias); this.logger.debug(`Deleting node: ${config.nodeAlias} with account: ${deleteAccountId}`); - const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias) - 1; + const nodeId = Templates.nodeIdFromNodeAlias(config.nodeAlias); const nodeDeleteTx = new NodeDeleteTransaction().setNodeId(nodeId).freezeWith(config.nodeClient); const signedTx = await nodeDeleteTx.sign(config.adminKey); diff --git a/src/core/account_manager.ts b/src/core/account_manager.ts index 2a9af9ec9..b0435bd3b 100644 --- a/src/core/account_manager.ts +++ b/src/core/account_manager.ts @@ -45,7 +45,7 @@ import {K8} from './k8.js'; import {type AccountIdWithKeyPairObject, type ExtendedNetServer} from '../types/index.js'; import {type NodeAlias, type PodName, type SdkNetworkEndpoint} from '../types/aliases.js'; import {IGNORED_NODE_ACCOUNT_ID} from './constants.js'; -import {sleep} from './helpers.js'; +import {isNumeric, sleep} from './helpers.js'; import {Duration} from './time/duration.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; @@ -194,6 +194,7 @@ export class AccountManager { treasuryAccountInfo.privateKey, skipNodeAlias, ); + return this._nodeClient; } /** @@ -388,17 +389,23 @@ export class AccountManager { labelSelector, ); + let nodeId = '0'; // retrieve the list of services and build custom objects for the attributes we need for (const service of serviceList.body.items) { - const serviceType = service.metadata.labels['solo.hedera.com/type']; let serviceBuilder = new NetworkNodeServicesBuilder( service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias, ); if (serviceBuilderMap.has(serviceBuilder.key())) { serviceBuilder = serviceBuilderMap.get(serviceBuilder.key()) as NetworkNodeServicesBuilder; + } else { + serviceBuilder = new NetworkNodeServicesBuilder( + service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias, + ); + serviceBuilder.withNamespace(namespace); } + const serviceType = service.metadata.labels['solo.hedera.com/type']; switch (serviceType) { // solo.hedera.com/type: envoy-proxy-svc case 'envoy-proxy-svc': @@ -413,7 +420,6 @@ export class AccountManager { // solo.hedera.com/type: haproxy-svc case 'haproxy-svc': serviceBuilder - .withAccountId(service.metadata!.labels!['solo.hedera.com/account-id']) .withHaProxyAppSelector(service.spec!.selector!.app) .withHaProxyName(service.metadata!.name as string) .withHaProxyClusterIp(service.spec!.clusterIP as string) @@ -426,7 +432,22 @@ export class AccountManager { break; // solo.hedera.com/type: network-node-svc case 'network-node-svc': + if ( + service.metadata!.labels!['solo.hedera.com/node-id'] !== '' && + isNumeric(service.metadata!.labels!['solo.hedera.com/node-id']) + ) { + nodeId = service.metadata!.labels!['solo.hedera.com/node-id']; + } else { + nodeId = `${Templates.nodeIdFromNodeAlias(service.metadata.labels['solo.hedera.com/node-name'] as NodeAlias)}`; + this.logger.warn( + `received an incorrect node id of ${service.metadata!.labels!['solo.hedera.com/node-id']} for ` + + `${service.metadata.labels['solo.hedera.com/node-name']}`, + ); + } + serviceBuilder + .withNodeId(nodeId) + .withAccountId(service.metadata!.labels!['solo.hedera.com/account-id']) .withNodeServiceName(service.metadata!.name as string) .withNodeServiceClusterIp(service.spec!.clusterIP as string) .withNodeServiceLoadBalancerIp( diff --git a/src/core/config/local_config.ts b/src/core/config/local_config.ts index ce0712568..9dbfb910d 100644 --- a/src/core/config/local_config.ts +++ b/src/core/config/local_config.ts @@ -15,7 +15,6 @@ * */ import {IsEmail, IsNotEmpty, IsObject, IsString, validateSync} from 'class-validator'; -import type {ListrTask, ListrTaskWrapper} from 'listr2'; import fs from 'fs'; import * as yaml from 'yaml'; import {Flags as flags} from '../../commands/flags.js'; @@ -35,6 +34,7 @@ import {type K8} from '../k8.js'; import {splitFlagInput} from '../helpers.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from '../container_helper.js'; +import type {SoloListrTask, SoloListrTaskWrapper} from '../../types/index.js'; @injectable() export class LocalConfig implements LocalConfigData { @@ -162,13 +162,17 @@ export class LocalConfig implements LocalConfigData { this.logger.info(`Wrote local config to ${this.filePath}: ${yamlContent}`); } - public promptLocalConfigTask(k8: K8): ListrTask { + public promptLocalConfigTask(k8: K8): SoloListrTask { const self = this; return { title: 'Prompt local configuration', skip: this.skipPromptTask, - task: async (_: any, task: ListrTaskWrapper): Promise => { + task: async (_: any, task: SoloListrTaskWrapper): Promise => { + if (self.configFileExists) { + self.configManager.setFlag(flags.userEmailAddress, self.userEmailAddress); + } + const isQuiet = self.configManager.getFlag(flags.quiet); const contexts = self.configManager.getFlag(flags.context); const deploymentName = self.configManager.getFlag(flags.namespace); diff --git a/src/core/config/remote/remote_config_manager.ts b/src/core/config/remote/remote_config_manager.ts index 884893056..d611d357a 100644 --- a/src/core/config/remote/remote_config_manager.ts +++ b/src/core/config/remote/remote_config_manager.ts @@ -231,7 +231,7 @@ export class RemoteConfigManager { * @returns the remote configuration data. * @throws {@link SoloError} if the ConfigMap could not be read and the error is not a 404 status. */ - private async getConfigMap(): Promise { + public async getConfigMap(): Promise { try { return await this.k8.getNamespacedConfigMap(constants.SOLO_REMOTE_CONFIGMAP_NAME); } catch (error: any) { diff --git a/src/core/config_manager.ts b/src/core/config_manager.ts index 178264200..168ea14f7 100644 --- a/src/core/config_manager.ts +++ b/src/core/config_manager.ts @@ -24,6 +24,7 @@ import type * as yargs from 'yargs'; import {type CommandFlag} from '../types/flag_types.js'; import {type ListrTaskWrapper} from 'listr2'; import {patchInject} from './container_helper.js'; +import * as constants from '../core/constants.js'; /** * ConfigManager cache command flag values so that user doesn't need to enter the same values repeatedly. @@ -110,6 +111,14 @@ export class ConfigManager { this.config.flags[flag.name] = val === true || val === 'true'; // use comparison to enforce boolean value break; + case 'StorageType': + // @ts-ignore + if (!Object.values(constants.StorageType).includes(`${val}`)) { + throw new SoloError(`Invalid storage type value '${val}'`); + } else { + this.config.flags[flag.name] = val; + } + break; default: throw new SoloError(`Unsupported field type for flag '${flag.name}': ${flag.definition.type}`); } diff --git a/src/core/constants.ts b/src/core/constants.ts index eaf91d2c6..9daaaef33 100644 --- a/src/core/constants.ts +++ b/src/core/constants.ts @@ -35,6 +35,7 @@ export const ROOT_CONTAINER = 'root-container'; export const SOLO_REMOTE_CONFIGMAP_NAME = 'solo-remote-config'; export const SOLO_REMOTE_CONFIGMAP_LABELS = {'solo.hedera.com/type': 'remote-config'}; export const SOLO_REMOTE_CONFIG_MAX_COMMAND_IN_HISTORY = 50; +export const SOLO_REMOTE_CONFIGMAP_LABEL_SELECTOR = 'solo.hedera.com/type=remote-config'; // --------------- Hedera network and node related constants -------------------------------------------------------------------- export const HEDERA_CHAIN_ID = process.env.SOLO_CHAIN_ID || '298'; @@ -211,3 +212,14 @@ export const NETWORK_DESTROY_WAIT_TIMEOUT = +process.env.NETWORK_DESTROY_WAIT_TI export const DEFAULT_LOCAL_CONFIG_FILE = 'local-config.yaml'; export const IGNORED_NODE_ACCOUNT_ID = '0.0.0'; + +export const UPLOADER_SECRET_NAME = 'uploader-mirror-secrets'; +export const MINIO_SECRET_NAME = 'minio-secrets'; + +export const enum StorageType { + MINIO_ONLY = 'minio_only', + GCS_AND_MINIO = 'gcs_and_minio', + S3_ONLY = 's3_only', + GCS_ONLY = 'gcs_only', + S3_AND_GCS = 's3_and_gcs', +} diff --git a/src/core/genesis_network_models/genesis_network_data_constructor.ts b/src/core/genesis_network_models/genesis_network_data_constructor.ts index 4498c6e3d..3e3b4bd6e 100644 --- a/src/core/genesis_network_models/genesis_network_data_constructor.ts +++ b/src/core/genesis_network_models/genesis_network_data_constructor.ts @@ -14,34 +14,58 @@ * limitations under the License. * */ -import crypto from 'node:crypto'; -import {PrivateKey} from '@hashgraph/sdk'; -import {Templates} from '../templates.js'; +import {AccountId, PrivateKey} from '@hashgraph/sdk'; import {GenesisNetworkNodeDataWrapper} from './genesis_network_node_data_wrapper.js'; -import * as x509 from '@peculiar/x509'; import * as constants from '../constants.js'; import type {KeyManager} from '../key_manager.js'; import type {ToJSON} from '../../types/index.js'; import type {JsonString, NodeAlias, NodeAliases} from '../../types/aliases.js'; +import {GenesisNetworkRosterEntryDataWrapper} from './genesis_network_roster_entry_data_wrapper.js'; +import {Templates} from '../templates.js'; +import path from 'path'; +import type {NetworkNodeServices} from '../network_node_services.js'; /** * Used to construct the nodes data and convert them to JSON */ export class GenesisNetworkDataConstructor implements ToJSON { public readonly nodes: Record = {}; + public readonly rosters: Record = {}; private constructor( private readonly nodeAliases: NodeAliases, private readonly keyManager: KeyManager, private readonly keysDir: string, + private readonly networkNodeServiceMap: Map, ) { - nodeAliases.forEach((nodeAlias, nodeId) => { - // TODO: get nodeId from label in pod. + nodeAliases.forEach(nodeAlias => { const adminPrivateKey = PrivateKey.fromStringED25519(constants.GENESIS_KEY); const adminPubKey = adminPrivateKey.publicKey; - this.nodes[nodeAlias] = new GenesisNetworkNodeDataWrapper(nodeId, adminPubKey, nodeAlias); + const nodeDataWrapper = new GenesisNetworkNodeDataWrapper( + +networkNodeServiceMap.get(nodeAlias).nodeId, + adminPubKey, + nodeAlias, + ); + this.nodes[nodeAlias] = nodeDataWrapper; + nodeDataWrapper.accountId = AccountId.fromString(networkNodeServiceMap.get(nodeAlias).accountId); + + const rosterDataWrapper = new GenesisNetworkRosterEntryDataWrapper(+networkNodeServiceMap.get(nodeAlias).nodeId); + this.rosters[nodeAlias] = rosterDataWrapper; + rosterDataWrapper.weight = this.nodes[nodeAlias].weight = constants.HEDERA_NODE_DEFAULT_STAKE_AMOUNT; + + const externalPort = +constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT; + const namespace = networkNodeServiceMap.get(nodeAlias).namespace; + const externalIP = Templates.renderFullyQualifiedNetworkSvcName(namespace, nodeAlias); + // Add gossip endpoints + nodeDataWrapper.addGossipEndpoint(externalIP, externalPort); + rosterDataWrapper.addGossipEndpoint(externalIP, externalPort); + + const haProxyFqdn = Templates.renderFullyQualifiedHaProxyName(nodeAlias, namespace); + + // Add service endpoints + nodeDataWrapper.addServiceEndpoint(haProxyFqdn, constants.GRPC_PORT); }); } @@ -49,8 +73,9 @@ export class GenesisNetworkDataConstructor implements ToJSON { nodeAliases: NodeAliases, keyManager: KeyManager, keysDir: string, + networkNodeServiceMap: Map, ): Promise { - const instance = new GenesisNetworkDataConstructor(nodeAliases, keyManager, keysDir); + const instance = new GenesisNetworkDataConstructor(nodeAliases, keyManager, keysDir, networkNodeServiceMap); await instance.load(); @@ -63,24 +88,29 @@ export class GenesisNetworkDataConstructor implements ToJSON { private async load() { await Promise.all( this.nodeAliases.map(async nodeAlias => { - const nodeKeys = await this.keyManager.loadSigningKey(nodeAlias, this.keysDir); - - //* Convert the certificate to PEM format - const certPem = nodeKeys.certificate.toString(); - - //* Assign the PEM certificate - this.nodes[nodeAlias].gossipCaCertificate = nodeKeys.certificate.toString('base64'); + const signingCertFile = Templates.renderGossipPemPublicKeyFile(nodeAlias); + const signingCertFullPath = path.join(this.keysDir, signingCertFile); + const derCertificate = this.keyManager.getDerFromPemCertificate(signingCertFullPath); - //* Decode the PEM to DER format - const tlsCertDer = new Uint8Array(x509.PemConverter.decode(certPem)[0]); + //* Assign the DER formatted certificate + this.rosters[nodeAlias].gossipCaCertificate = this.nodes[nodeAlias].gossipCaCertificate = + Buffer.from(derCertificate).toString('base64'); //* Generate the SHA-384 hash - this.nodes[nodeAlias].grpcCertificateHash = crypto.createHash('sha384').update(tlsCertDer).digest('base64'); + this.nodes[nodeAlias].grpcCertificateHash = ''; }), ); } public toJSON(): JsonString { - return JSON.stringify({nodeMetadata: Object.values(this.nodes).map(node => node.toObject())}); + const nodeMetadata = []; + Object.keys(this.nodes).forEach(nodeAlias => { + nodeMetadata.push({ + node: this.nodes[nodeAlias].toObject(), + rosterEntry: this.rosters[nodeAlias].toObject(), + }); + }); + + return JSON.stringify({nodeMetadata: nodeMetadata}); } } diff --git a/src/commands/context/configs.ts b/src/core/genesis_network_models/genesis_network_data_wrapper.ts similarity index 50% rename from src/commands/context/configs.ts rename to src/core/genesis_network_models/genesis_network_data_wrapper.ts index 6a60f41fe..9f6786d77 100644 --- a/src/commands/context/configs.ts +++ b/src/core/genesis_network_models/genesis_network_data_wrapper.ts @@ -14,28 +14,21 @@ * limitations under the License. * */ +import type {NodeId} from '../../types/aliases.js'; +import type {ServiceEndpoint} from '../../types/index.js'; -import {type NodeAlias} from '../../types/aliases.js'; +export abstract class GenesisNetworkDataWrapper { + public gossipEndpoint: ServiceEndpoint[] = []; + public weight: number; + public gossipCaCertificate: string; -export const CONNECT_CONFIGS_NAME = 'connectConfig'; + protected constructor(public readonly nodeId: NodeId) {} -export const connectConfigBuilder = async function (argv, ctx, task) { - const config = this.getConfig(CONNECT_CONFIGS_NAME, argv.flags, [ - 'currentDeploymentName', - ]) as ContextConnectConfigClass; - - // set config in the context for later tasks to use - ctx.config = config; - - return ctx.config; -}; - -export interface ContextConnectConfigClass { - app: string; - cacheDir: string; - devMode: boolean; - namespace: string; - nodeAlias: NodeAlias; - context: string; - clusterName: string; + /** + * @param domainName - a fully qualified domain name + * @param port + */ + public addGossipEndpoint(domainName: string, port: number): void { + this.gossipEndpoint.push({domainName, port, ipAddressV4: ''}); + } } diff --git a/src/core/genesis_network_models/genesis_network_node_data_wrapper.ts b/src/core/genesis_network_models/genesis_network_node_data_wrapper.ts index 20183c393..04e593225 100644 --- a/src/core/genesis_network_models/genesis_network_node_data_wrapper.ts +++ b/src/core/genesis_network_models/genesis_network_node_data_wrapper.ts @@ -15,24 +15,25 @@ * */ import type {AccountId, PublicKey} from '@hashgraph/sdk'; -import type {GenesisNetworkNodeStructure, ServiceEndpoint, ToObject} from '../../types/index.js'; +import type {GenesisNetworkNodeStructure, NodeAccountId, ServiceEndpoint, ToObject} from '../../types/index.js'; +import {GenesisNetworkDataWrapper} from './genesis_network_data_wrapper.js'; export class GenesisNetworkNodeDataWrapper - implements GenesisNetworkNodeStructure, ToObject<{node: GenesisNetworkNodeStructure}> + extends GenesisNetworkDataWrapper + implements ToObject { public accountId: AccountId; - public gossipEndpoint: ServiceEndpoint[] = []; public serviceEndpoint: ServiceEndpoint[] = []; - public gossipCaCertificate: string; public grpcCertificateHash: string; - public weight: number; public readonly deleted = false; constructor( public readonly nodeId: number, public readonly adminKey: PublicKey, public readonly description: string, - ) {} + ) { + super(nodeId); + } /** * @param domainName - a fully qualified domain name @@ -42,28 +43,18 @@ export class GenesisNetworkNodeDataWrapper this.serviceEndpoint.push({domainName, port, ipAddressV4: ''}); } - /** - * @param domainName - a fully qualified domain name - * @param port - */ - public addGossipEndpoint(domainName: string, port: number): void { - this.gossipEndpoint.push({domainName, port, ipAddressV4: ''}); - } - public toObject() { return { - node: { - nodeId: this.nodeId, - accountId: this.accountId, - description: this.description, - gossipEndpoint: this.gossipEndpoint, - serviceEndpoint: this.serviceEndpoint, - gossipCaCertificate: this.gossipCaCertificate, - grpcCertificateHash: this.grpcCertificateHash, - weight: this.weight, - deleted: this.deleted, - adminKey: this.adminKey, - }, + nodeId: this.nodeId, + accountId: {accountNum: `${this.accountId.num}`} as unknown as NodeAccountId, + description: this.description, + gossipEndpoint: this.gossipEndpoint, + serviceEndpoint: this.serviceEndpoint, + gossipCaCertificate: this.gossipCaCertificate, + grpcCertificateHash: this.grpcCertificateHash, + weight: this.weight, + deleted: this.deleted, + adminKey: this.adminKey, }; } } diff --git a/src/core/genesis_network_models/genesis_network_roster_entry_data_wrapper.ts b/src/core/genesis_network_models/genesis_network_roster_entry_data_wrapper.ts new file mode 100644 index 000000000..9f5a4ee73 --- /dev/null +++ b/src/core/genesis_network_models/genesis_network_roster_entry_data_wrapper.ts @@ -0,0 +1,37 @@ +/** + * Copyright (C) 2024 Hedera Hashgraph, LLC + * + * Licensed under the Apache License, Version 2.0 (the ""License""); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an ""AS IS"" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +import {GenesisNetworkDataWrapper} from './genesis_network_data_wrapper.js'; +import type {NodeId} from '../../types/aliases.js'; +import type {GenesisNetworkRosterStructure, ToObject} from '../../types/index.js'; + +export class GenesisNetworkRosterEntryDataWrapper + extends GenesisNetworkDataWrapper + implements GenesisNetworkRosterStructure, ToObject +{ + constructor(public readonly nodeId: NodeId) { + super(nodeId); + } + + public toObject() { + return { + nodeId: this.nodeId, + gossipEndpoint: this.gossipEndpoint, + gossipCaCertificate: this.gossipCaCertificate, + weight: this.weight, + }; + } +} diff --git a/src/core/helpers.ts b/src/core/helpers.ts index c850a4a7e..719b31921 100644 --- a/src/core/helpers.ts +++ b/src/core/helpers.ts @@ -226,7 +226,7 @@ export function renameAndCopyFile(srcFilePath: string, expectedBaseName: string, */ export function addDebugOptions(valuesArg: string, debugNodeAlias: NodeAlias, index = 0) { if (debugNodeAlias) { - const nodeId = Templates.nodeIdFromNodeAlias(debugNodeAlias) - 1; + const nodeId = Templates.nodeIdFromNodeAlias(debugNodeAlias); valuesArg += ` --set "hedera.nodes[${nodeId}].root.extraEnv[${index}].name=JAVA_OPTS"`; valuesArg += ` --set "hedera.nodes[${nodeId}].root.extraEnv[${index}].value=-agentlib:jdwp=transport=dt_socket\\,server=y\\,suspend=y\\,address=*:${constants.JVM_DEBUG_PORT}"`; } diff --git a/src/core/k8.ts b/src/core/k8.ts index 214058d2b..d2ee3694a 100644 --- a/src/core/k8.ts +++ b/src/core/k8.ts @@ -33,11 +33,12 @@ import * as constants from './constants.js'; import {ConfigManager} from './config_manager.js'; import {SoloLogger} from './logging.js'; import {type PodName, type TarCreateFilter} from '../types/aliases.js'; -import type {ExtendedNetServer, LocalContextObject} from '../types/index.js'; +import type {ExtendedNetServer, LocalContextObject, Optional} from '../types/index.js'; import {HEDERA_HAPI_PATH, ROOT_CONTAINER, SOLO_LOGS_DIR} from './constants.js'; import {Duration} from './time/duration.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; +import type {Namespace} from './config/remote/types.js'; interface TDirectoryData { directory: boolean; @@ -65,6 +66,7 @@ export class K8 { private kubeConfig!: k8s.KubeConfig; kubeClient!: k8s.CoreV1Api; private coordinationApiClient: k8s.CoordinationV1Api; + private networkingApi: k8s.NetworkingV1Api; constructor( @inject(ConfigManager) private readonly configManager?: ConfigManager, @@ -93,6 +95,7 @@ export class K8 { } this.kubeClient = this.kubeConfig.makeApiClient(k8s.CoreV1Api); + this.networkingApi = this.kubeConfig.makeApiClient(k8s.NetworkingV1Api); this.coordinationApiClient = this.kubeConfig.makeApiClient(k8s.CoordinationV1Api); return this; // to enable chaining @@ -104,7 +107,7 @@ export class K8 { * @param [filters] - an object with metadata fields and value * @returns a list of items that match the filters */ - applyMetadataFilter(items: (object | any)[], filters = {}) { + applyMetadataFilter(items: (object | any)[], filters: Record = {}) { if (!filters) throw new MissingArgumentError('filters are required'); const matched = []; @@ -135,7 +138,7 @@ export class K8 { * @param items - list of items * @param [filters] - an object with metadata fields and value */ - filterItem(items: (object | any)[], filters = {}) { + filterItem(items: (object | any)[], filters: Record = {}) { const filtered = this.applyMetadataFilter(items, filters); if (filtered.length > 1) throw new SoloError('multiple items found with filters', {filters}); return filtered[0]; @@ -171,8 +174,7 @@ export class K8 { if (resp.body && resp.body.items) { const namespaces: string[] = []; resp.body.items.forEach(item => { - // @ts-ignore - namespaces.push(item.metadata.name as string); + namespaces.push(item.metadata!.name); }); return namespaces; @@ -400,7 +402,7 @@ export class K8 { } return items; - } catch (e: Error | any) { + } catch (e) { throw new SoloError(`unable to check path in '${podName}':${containerName}' - ${destPath}: ${e.message}`, e); } } @@ -427,12 +429,10 @@ export class K8 { for (const entry of filterMap.entries()) { const field = entry[0]; const value = entry[1]; - // @ts-ignore this.logger.debug( `Checking file ${podName}:${containerName} ${destPath}; ${field} expected ${value}, found ${item[field]}`, {filters}, ); - // @ts-ignore if (`${value}` !== `${item[field]}`) { found = false; break; @@ -445,7 +445,7 @@ export class K8 { } } } - } catch (e: Error | any) { + } catch (e) { const error = new SoloError( `unable to check file in '${podName}':${containerName}' - ${destPath}: ${e.message}`, e, @@ -644,7 +644,7 @@ export class K8 { self.registerErrorStreamOnError(localContext, messagePrefix, inputPassthroughStream); }); - } catch (e: Error | any) { + } catch (e) { const errorMessage = `${messagePrefix} failed to upload file: ${e.message}`; self.logger.error(errorMessage, e); throw new SoloError(errorMessage, e); @@ -778,7 +778,7 @@ export class K8 { localContext, `${messagePrefix} files did not match, srcFileSize=${srcFileSize}, stat.size=${stat?.size}`, ); - } catch (e: Error | any) { + } catch { return self.exitWithError(localContext, `${messagePrefix} failed to complete download`); } }); @@ -789,7 +789,7 @@ export class K8 { self.registerErrorStreamOnError(localContext, messagePrefix, outputFileStream); }); - } catch (e: Error | any) { + } catch (e) { const errorMessage = `${messagePrefix}failed to download file: ${e.message}`; self.logger.error(errorMessage, e); throw new SoloError(errorMessage, e); @@ -991,7 +991,7 @@ export class K8 { if (isPortOpen) { return; } - } catch (e: Error | any) { + } catch { return; } await sleep(Duration.ofMillis(timeout)); @@ -1007,7 +1007,7 @@ export class K8 { podCount = 1, maxAttempts = constants.PODS_RUNNING_MAX_ATTEMPTS, delay = constants.PODS_RUNNING_DELAY, - podItemPredicate?: (items: k8s.V1Pod) => any, + podItemPredicate?: (items: k8s.V1Pod) => boolean, ): Promise { const ns = this._getNamespace(); const labelSelector = labels.join(','); @@ -1017,7 +1017,7 @@ export class K8 { return new Promise((resolve, reject) => { let attempts = 0; - const check = async (resolve: (items: k8s.V1Pod[]) => void, reject: (reason?: any) => void) => { + const check = async (resolve: (items: k8s.V1Pod[]) => void, reject: (reason?: Error) => void) => { // wait for the pod to be available with the given status and labels try { const resp = await this.kubeClient.listNamespacedPod( @@ -1055,7 +1055,7 @@ export class K8 { return resolve(resp.body.items); } } - } catch (e: Error | any) { + } catch (e) { this.logger.info('Error occurred while waiting for pods, retrying', e); } @@ -1084,7 +1084,7 @@ export class K8 { async waitForPodReady(labels: string[] = [], podCount = 1, maxAttempts = 10, delay = 500) { try { return await this.waitForPodConditions(K8.PodReadyCondition, labels, podCount, maxAttempts, delay); - } catch (e: Error | any) { + } catch (e: Error | unknown) { throw new SoloError(`Pod not ready [maxAttempts = ${maxAttempts}]`, e); } } @@ -1263,7 +1263,7 @@ export class K8 { namespace: string, secretType: string, data: Record, - labels: any, + labels: Optional>, recreate: boolean, ) { if (recreate) { @@ -1287,7 +1287,7 @@ export class K8 { const resp = await this.kubeClient.createNamespacedSecret(namespace, v1Secret); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create secret ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1297,7 +1297,7 @@ export class K8 { /** * Delete a secret from the namespace - * @param name - the name of the new secret + * @param name - the name of the existing secret * @param namespace - the namespace to store the secret * @returns whether the secret was deleted successfully */ @@ -1345,7 +1345,7 @@ export class K8 { const resp = await this.kubeClient.createNamespacedConfigMap(namespace, configMap); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create configmap ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1377,7 +1377,7 @@ export class K8 { const resp = await this.kubeClient.replaceNamespacedConfigMap(name, namespace, configMap); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create configmap ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1390,7 +1390,7 @@ export class K8 { const resp = await this.kubeClient.deleteNamespacedConfigMap(name, namespace); return resp.response.statusCode === StatusCodes.CREATED; - } catch (e: Error | any) { + } catch (e) { throw new SoloError( `failed to create configmap ${name} in namespace ${namespace}: ${e.message}, ${e?.body?.message}`, e, @@ -1399,6 +1399,7 @@ export class K8 { } // --------------------------------------- LEASES --------------------------------------- // + async createNamespacedLease(namespace: string, leaseName: string, holderName: string, durationSeconds = 20) { const lease = new k8s.V1Lease(); @@ -1471,6 +1472,100 @@ export class K8 { return body as k8s.V1Status; } + // --------------------------------------- Pod Identifiers --------------------------------------- // + + /** + * Check if cert-manager is installed inside any namespace. + * @returns if cert-manager is found + */ + public async isCertManagerInstalled(): Promise { + try { + const pods = await this.kubeClient.listPodForAllNamespaces(undefined, undefined, undefined, 'app=cert-manager'); + + return pods.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + /** + * Check if minio is installed inside the namespace. + * @returns if minio is found + */ + public async isMinioInstalled(namespace: Namespace): Promise { + try { + // TODO DETECT THE OPERATOR + const pods = await this.kubeClient.listNamespacedPod( + namespace, + undefined, + undefined, + undefined, + undefined, + 'app=minio', + ); + + return pods.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + /** + * Check if the ingress controller is installed inside any namespace. + * @returns if ingress controller is found + */ + public async isIngressControllerInstalled(): Promise { + try { + const response = await this.networkingApi.listIngressClass(); + + return response.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + public async isRemoteConfigPresentInAnyNamespace() { + try { + const configmaps = await this.kubeClient.listConfigMapForAllNamespaces( + undefined, + undefined, + undefined, + constants.SOLO_REMOTE_CONFIGMAP_LABEL_SELECTOR, + ); + + return configmaps.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + + public async isPrometheusInstalled(namespace: Namespace) { + try { + const pods = await this.kubeClient.listNamespacedPod( + namespace, + undefined, + undefined, + undefined, + undefined, + 'app.kubernetes.io/name=prometheus', + ); + + return pods.body.items.length > 0; + } catch (e) { + this.logger.error('Failed to find cert-manager:', e); + + return false; + } + } + /* ------------- Utilities ------------- */ /** @@ -1480,7 +1575,11 @@ export class K8 { * * @throws SoloError - if the status code is not OK */ - private handleKubernetesClientError(response: http.IncomingMessage, error: Error | any, errorMessage: string): void { + private handleKubernetesClientError( + response: http.IncomingMessage, + error: Error | unknown, + errorMessage: string, + ): void { const statusCode = +response?.statusCode || StatusCodes.INTERNAL_SERVER_ERROR; if (statusCode <= StatusCodes.ACCEPTED) return; @@ -1490,7 +1589,7 @@ export class K8 { throw new SoloError(errorMessage, errorMessage, {statusCode: statusCode}); } - private _getNamespace() { + private _getNamespace(): Namespace { const ns = this.configManager.getFlag(flags.namespace); if (!ns) throw new MissingArgumentError('namespace is not set'); return ns; @@ -1582,7 +1681,7 @@ export class K8 { ]); await this.execContainer(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${scriptName}`); await this.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/data/${podName}.zip`, targetDir); - } catch (e: Error | any) { + } catch (e: Error | unknown) { // not throw error here, so we can continue to finish downloading logs from other pods // and also delete namespace in the end this.logger.error(`${constants.NODE_LOG_FAILURE_MSG} ${podName}`, e); @@ -1592,13 +1691,16 @@ export class K8 { /** * Download state files from a pod - * @param k8 - an instance of core/K8 * @param namespace - the namespace of the network * @param nodeAlias - the pod name * @returns a promise that resolves when the state files are downloaded */ async getNodeStatesFromPod(namespace: string, nodeAlias: string) { - const pods = await this.getPodsByLabel([`solo.hedera.com/node-name=${nodeAlias}`]); + const pods = await this.getPodsByLabel([ + `solo.hedera.com/node-name=${nodeAlias}`, + 'solo.hedera.com/type=network-node', + ]); + // get length of pods const promises = []; for (const pod of pods) { @@ -1618,7 +1720,7 @@ export class K8 { const zipCommand = `tar -czf ${HEDERA_HAPI_PATH}/${podName}-state.zip -C ${HEDERA_HAPI_PATH}/data/saved .`; await this.execContainer(podName, ROOT_CONTAINER, zipCommand); await this.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/${podName}-state.zip`, targetDir); - } catch (e: Error | any) { + } catch (e: Error | unknown) { this.logger.error(`failed to download state from pod ${podName}`, e); this.logger.showUser(`Failed to download state from pod ${podName}` + e); } diff --git a/src/core/key_manager.ts b/src/core/key_manager.ts index 98ac6759f..e7f6b216a 100644 --- a/src/core/key_manager.ts +++ b/src/core/key_manager.ts @@ -509,4 +509,17 @@ export class KeyManager { return subTasks; } + + /** + * Given the path to the PEM certificate (Base64 ASCII), will return the DER (raw binary) bytes + * @param pemCertFullPath + */ + getDerFromPemCertificate(pemCertFullPath: string) { + const certPem = fs.readFileSync(pemCertFullPath).toString(); + const decodedDers = x509.PemConverter.decode(certPem); + if (!decodedDers || decodedDers.length === 0) { + throw new SoloError('unable to load perm key: ' + pemCertFullPath); + } + return new Uint8Array(decodedDers[0]); + } } diff --git a/src/core/network_node_services.ts b/src/core/network_node_services.ts index fccdf8349..722c4c482 100644 --- a/src/core/network_node_services.ts +++ b/src/core/network_node_services.ts @@ -19,6 +19,8 @@ import type {NodeAlias, PodName} from '../types/aliases.js'; export class NetworkNodeServices { public readonly nodeAlias: NodeAlias; + public readonly namespace: string; + public readonly nodeId: string | number; public readonly nodePodName?: PodName; public readonly haProxyName?: string; public readonly haProxyLoadBalancerIp?: string; @@ -41,6 +43,8 @@ export class NetworkNodeServices { constructor(builder: NetworkNodeServicesBuilder) { this.nodeAlias = builder.nodeAlias; + this.namespace = builder.namespace; + this.nodeId = builder.nodeId; this.nodePodName = builder.nodePodName; this.haProxyName = builder.haProxyName; this.haProxyLoadBalancerIp = builder.haProxyLoadBalancerIp; @@ -68,6 +72,8 @@ export class NetworkNodeServices { } export class NetworkNodeServicesBuilder { + public namespace?: string; + public nodeId?: string | number; public haProxyName?: string; public accountId?: string; public haProxyClusterIp!: string; @@ -91,6 +97,16 @@ export class NetworkNodeServicesBuilder { constructor(public readonly nodeAlias: NodeAlias) {} + withNamespace(namespace: string) { + this.namespace = namespace; + return this; + } + + withNodeId(nodeId: string | number) { + this.nodeId = nodeId; + return this; + } + withAccountId(accountId: string) { this.accountId = accountId; return this; diff --git a/src/core/platform_installer.ts b/src/core/platform_installer.ts index b916c118e..6d20cc2b6 100644 --- a/src/core/platform_installer.ts +++ b/src/core/platform_installer.ts @@ -278,10 +278,14 @@ export class PlatformInstaller { } /** Return a list of task to perform node directory setup */ - taskSetup(podName: PodName) { + taskSetup(podName: PodName, stagingDir: string, isGenesis: boolean) { const self = this; return new Listr( [ + { + title: 'Copy configuration files', + task: async () => await self.copyConfigurationFiles(stagingDir, podName, isGenesis), + }, { title: 'Set file permissions', task: async () => await self.setPlatformDirPermissions(podName), @@ -296,6 +300,20 @@ export class PlatformInstaller { ); } + /** + * Copy configuration files to the network consensus node pod + * @param stagingDir - staging directory path + * @param podName - network consensus node pod name + * @param isGenesis - true if this is `solo node setup` and we are at genesis + * @private + */ + private async copyConfigurationFiles(stagingDir: string, podName: `network-node${number}-0`, isGenesis: boolean) { + if (isGenesis) { + const genesisNetworkJson = [path.join(stagingDir, 'genesis-network.json')]; + await this.copyFiles(podName, genesisNetworkJson, `${constants.HEDERA_HAPI_PATH}/data/config`); + } + } + /** * Return a list of task to copy the node keys to the staging directory * diff --git a/src/core/profile_manager.ts b/src/core/profile_manager.ts index 3829241b3..29ce21e84 100644 --- a/src/core/profile_manager.ts +++ b/src/core/profile_manager.ts @@ -28,11 +28,9 @@ import * as constants from './constants.js'; import {ConfigManager} from './config_manager.js'; import * as helpers from './helpers.js'; import {getNodeAccountMap} from './helpers.js'; -import {AccountId} from '@hashgraph/sdk'; import type {SemVer} from 'semver'; import {SoloLogger} from './logging.js'; import type {AnyObject, DirPath, NodeAlias, NodeAliases, Path} from '../types/aliases.js'; -import type {GenesisNetworkDataConstructor} from './genesis_network_models/genesis_network_data_constructor.js'; import type {Optional} from '../types/index.js'; import {inject, injectable} from 'tsyringe-neo'; import {patchInject} from './container_helper.js'; @@ -186,12 +184,7 @@ export class ProfileManager { } } - resourcesForConsensusPod( - profile: AnyObject, - nodeAliases: NodeAliases, - yamlRoot: AnyObject, - genesisNetworkData?: GenesisNetworkDataConstructor, - ): AnyObject { + resourcesForConsensusPod(profile: AnyObject, nodeAliases: NodeAliases, yamlRoot: AnyObject): AnyObject { if (!profile) throw new MissingArgumentError('profile is required'); const accountMap = getNodeAccountMap(nodeAliases); @@ -199,6 +192,7 @@ export class ProfileManager { // set consensus pod level resources for (let nodeIndex = 0; nodeIndex < nodeAliases.length; nodeIndex++) { this._setValue(`hedera.nodes.${nodeIndex}.name`, nodeAliases[nodeIndex], yamlRoot); + this._setValue(`hedera.nodes.${nodeIndex}.nodeId`, `${nodeIndex}`, yamlRoot); this._setValue(`hedera.nodes.${nodeIndex}.accountId`, accountMap.get(nodeAliases[nodeIndex]), yamlRoot); } @@ -218,7 +212,6 @@ export class ProfileManager { this.configManager.getFlag(flags.releaseTag), this.configManager.getFlag(flags.app), this.configManager.getFlag(flags.chainId), - genesisNetworkData, ); for (const flag of flags.nodeConfigFileFlags.values()) { @@ -261,14 +254,6 @@ export class ProfileManager { yamlRoot, ); - if (genesisNetworkData) { - const genesisNetworkJson = path.join(stagingDir, 'genesis-network.json'); - - fs.writeFileSync(genesisNetworkJson, genesisNetworkData.toJSON()); - - this._setFileContentsAsValue('hedera.configMaps.genesisNetworkJson', genesisNetworkJson, yamlRoot); - } - if (this.configManager.getFlag(flags.applicationEnv)) { this._setFileContentsAsValue( 'hedera.configMaps.applicationEnv', @@ -334,7 +319,7 @@ export class ProfileManager { * @param genesisNetworkData - reference to the constructor * @returns return the full path to the values file */ - public async prepareValuesForSoloChart(profileName: string, genesisNetworkData?: GenesisNetworkDataConstructor) { + public async prepareValuesForSoloChart(profileName: string) { if (!profileName) throw new MissingArgumentError('profileName is required'); const profile = this.getProfile(profileName); @@ -343,7 +328,7 @@ export class ProfileManager { // generate the YAML const yamlRoot = {}; - this.resourcesForConsensusPod(profile, nodeAliases, yamlRoot, genesisNetworkData); + this.resourcesForConsensusPod(profile, nodeAliases, yamlRoot); this.resourcesForHaProxyPod(profile, yamlRoot); this.resourcesForEnvoyProxyPod(profile, yamlRoot); this.resourcesForMinioTenantPod(profile, yamlRoot); @@ -469,7 +454,7 @@ export class ProfileManager { * @param namespace - namespace where the network is deployed * @param nodeAccountMap - the map of node aliases to account IDs * @param destPath - path to the destination directory to write the config.txt file - * @param releaseTag - release tag e.g. v0.42.0 + * @param releaseTagOverride - release tag override * @param [appName] - the app name (default: HederaNode.jar) * @param [chainId] - chain ID (298 for local network) * @param genesisNetworkData @@ -482,7 +467,6 @@ export class ProfileManager { releaseTagOverride: string, appName = constants.HEDERA_APP_NAME, chainId = constants.HEDERA_CHAIN_ID, - genesisNetworkData?: GenesisNetworkDataConstructor, ) { let releaseTag = releaseTagOverride; if (!nodeAccountMap || nodeAccountMap.size === 0) { @@ -500,7 +484,7 @@ export class ProfileManager { const externalPort = +constants.HEDERA_NODE_EXTERNAL_GOSSIP_PORT; const nodeStakeAmount = constants.HEDERA_NODE_DEFAULT_STAKE_AMOUNT; - // @ts-ignore + // @ts-expect-error - TS2353: Object literal may only specify known properties, and includePrerelease does not exist in type Options const releaseVersion = semver.parse(releaseTag, {includePrerelease: true}) as SemVer; try { @@ -514,23 +498,6 @@ export class ProfileManager { const externalIP = Templates.renderFullyQualifiedNetworkSvcName(namespace, nodeAlias); const account = nodeAccountMap.get(nodeAlias); - if (genesisNetworkData) { - // TODO: Use the "nodeSeq" - - const nodeDataWrapper = genesisNetworkData.nodes[nodeAlias]; - - nodeDataWrapper.weight = nodeStakeAmount; - nodeDataWrapper.accountId = AccountId.fromString(account); - - //? Add gossip endpoints - nodeDataWrapper.addGossipEndpoint(externalIP, externalPort); - - const haProxyFqdn = Templates.renderFullyQualifiedHaProxyName(nodeAlias, namespace); - - //? Add service endpoints - nodeDataWrapper.addServiceEndpoint(haProxyFqdn, constants.GRPC_PORT); - } - configLines.push( `address, ${nodeSeq}, ${nodeSeq}, ${nodeAlias}, ${nodeStakeAmount}, ${internalIP}, ${internalPort}, ${externalIP}, ${externalPort}, ${account}`, ); diff --git a/src/core/templates.ts b/src/core/templates.ts index 06f91be0b..a8bdf4a35 100644 --- a/src/core/templates.ts +++ b/src/core/templates.ts @@ -173,7 +173,7 @@ export class Templates { for (let i = nodeAlias.length - 1; i > 0; i--) { // @ts-ignore if (isNaN(nodeAlias[i])) { - return parseInt(nodeAlias.substring(i + 1, nodeAlias.length)); + return parseInt(nodeAlias.substring(i + 1, nodeAlias.length)) - 1; } } diff --git a/src/types/index.ts b/src/types/index.ts index be22a2a13..140aff925 100644 --- a/src/types/index.ts +++ b/src/types/index.ts @@ -103,9 +103,15 @@ export interface ServiceEndpoint { domainName: string; } +export interface NodeAccountId { + accountId: { + accountNum: string; + }; +} + export interface GenesisNetworkNodeStructure { nodeId: number; - accountId: AccountId; + accountId: NodeAccountId; description: string; gossipEndpoint: ServiceEndpoint[]; serviceEndpoint: ServiceEndpoint[]; @@ -115,3 +121,10 @@ export interface GenesisNetworkNodeStructure { deleted: boolean; adminKey: PublicKey; } + +export interface GenesisNetworkRosterStructure { + nodeId: number; + weight: number; + gossipEndpoint: ServiceEndpoint[]; + gossipCaCertificate: string; +} diff --git a/test/e2e/commands/cluster.test.ts b/test/e2e/commands/cluster.test.ts index b734ab8d6..d0bd6e6b3 100644 --- a/test/e2e/commands/cluster.test.ts +++ b/test/e2e/commands/cluster.test.ts @@ -67,7 +67,7 @@ describe('ClusterCommand', () => { await k8.deleteNamespace(namespace); argv[flags.clusterSetupNamespace.name] = constants.SOLO_SETUP_NAMESPACE; configManager.update(argv); - await clusterCmd.setup(argv); // restore solo-cluster-setup for other e2e tests to leverage + await clusterCmd.handlers.setup(argv); // restore solo-cluster-setup for other e2e tests to leverage do { await sleep(Duration.ofSeconds(5)); } while ( @@ -85,33 +85,33 @@ describe('ClusterCommand', () => { it('should cleanup existing deployment', async () => { if (await chartManager.isChartInstalled(constants.SOLO_SETUP_NAMESPACE, constants.SOLO_CLUSTER_SETUP_CHART)) { - expect(await clusterCmd.reset(argv)).to.be.true; + expect(await clusterCmd.handlers.reset(argv)).to.be.true; } }).timeout(Duration.ofMinutes(1).toMillis()); it('solo cluster setup should fail with invalid cluster name', async () => { argv[flags.clusterSetupNamespace.name] = 'INVALID'; configManager.update(argv); - await expect(clusterCmd.setup(argv)).to.be.rejectedWith('Error on cluster setup'); + await expect(clusterCmd.handlers.setup(argv)).to.be.rejectedWith('Error on cluster setup'); }).timeout(Duration.ofMinutes(1).toMillis()); it('solo cluster setup should work with valid args', async () => { argv[flags.clusterSetupNamespace.name] = namespace; configManager.update(argv); - expect(await clusterCmd.setup(argv)).to.be.true; + expect(await clusterCmd.handlers.setup(argv)).to.be.true; }).timeout(Duration.ofMinutes(1).toMillis()); - it('function getClusterInfo should return true', () => { - expect(clusterCmd.getClusterInfo()).to.be.ok; + it('solo cluster info should work', () => { + expect(clusterCmd.handlers.info(argv)).to.be.ok; }).timeout(Duration.ofMinutes(1).toMillis()); - it('function showClusterList should return right true', async () => { - expect(clusterCmd.showClusterList()).to.be.ok; + it('solo cluster list', async () => { + expect(clusterCmd.handlers.list(argv)).to.be.ok; }).timeout(Duration.ofMinutes(1).toMillis()); it('function showInstalledChartList should return right true', async () => { // @ts-ignore - await expect(clusterCmd.showInstalledChartList()).to.eventually.be.undefined; + await expect(clusterCmd.handlers.tasks.showInstalledChartList()).to.eventually.be.undefined; }).timeout(Duration.ofMinutes(1).toMillis()); // helm list would return an empty list if given invalid namespace @@ -120,7 +120,7 @@ describe('ClusterCommand', () => { configManager.update(argv); try { - await expect(clusterCmd.reset(argv)).to.be.rejectedWith('Error on cluster reset'); + await expect(clusterCmd.handlers.reset(argv)).to.be.rejectedWith('Error on cluster reset'); } catch (e) { clusterCmd.logger.showUserError(e); expect.fail(); @@ -130,6 +130,6 @@ describe('ClusterCommand', () => { it('solo cluster reset should work with valid args', async () => { argv[flags.clusterSetupNamespace.name] = namespace; configManager.update(argv); - expect(await clusterCmd.reset(argv)).to.be.true; + expect(await clusterCmd.handlers.reset(argv)).to.be.true; }).timeout(Duration.ofMinutes(1).toMillis()); }); diff --git a/test/e2e/commands/mirror_node.test.ts b/test/e2e/commands/mirror_node.test.ts index bca6b4c58..b6b050310 100644 --- a/test/e2e/commands/mirror_node.test.ts +++ b/test/e2e/commands/mirror_node.test.ts @@ -101,6 +101,8 @@ e2eTestSuite(testName, argv, undefined, undefined, undefined, undefined, undefin flags.tlsClusterIssuerType.constName, flags.clusterSetupNamespace.constName, flags.soloChartVersion.constName, + flags.storageSecrets.constName, + flags.storageEndpoint.constName, ]); }).timeout(Duration.ofMinutes(10).toMillis()); diff --git a/test/e2e/commands/network.test.ts b/test/e2e/commands/network.test.ts index f0a5bc974..4c5acba64 100644 --- a/test/e2e/commands/network.test.ts +++ b/test/e2e/commands/network.test.ts @@ -67,7 +67,7 @@ describe('NetworkCommand', () => { before(async () => { await initCmd.init(argv); - await clusterCmd.setup(argv); + await clusterCmd.handlers.setup(argv); fs.mkdirSync(applicationEnvParentDirectory, {recursive: true}); fs.writeFileSync(applicationEnvFilePath, applicationEnvFileContents); }); @@ -102,6 +102,9 @@ describe('NetworkCommand', () => { flags.settingTxt.constName, flags.grpcTlsKeyPath.constName, flags.grpcWebTlsKeyPath.constName, + flags.storageAccessKey.constName, + flags.storageSecrets.constName, + flags.storageEndpoint.constName, ]); } catch (e) { networkCmd.logger.showUserError(e); diff --git a/test/e2e/commands/node_delete.test.ts b/test/e2e/commands/node_delete.test.ts index 8e0e035f2..9b8b996e0 100644 --- a/test/e2e/commands/node_delete.test.ts +++ b/test/e2e/commands/node_delete.test.ts @@ -33,10 +33,10 @@ import * as NodeCommandConfigs from '../../../src/commands/node/configs.js'; import {Duration} from '../../../src/core/time/duration.js'; const namespace = 'node-delete'; -const nodeAlias = 'node1'; +const deleteNodeAlias = 'node1'; const argv = getDefaultArgv(); argv[flags.nodeAliasesUnparsed.name] = 'node1,node2'; -argv[flags.nodeAlias.name] = nodeAlias; +argv[flags.nodeAlias.name] = deleteNodeAlias; argv[flags.stakeAmounts.name] = '1,1000'; argv[flags.generateGossipKeys.name] = true; argv[flags.generateTlsKeys.name] = true; @@ -74,9 +74,9 @@ e2eTestSuite(namespace, argv, undefined, undefined, undefined, undefined, undefi await bootstrapResp.opts.accountManager.close(); }).timeout(Duration.ofMinutes(10).toMillis()); - balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, deleteNodeAlias); - accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, deleteNodeAlias); it('config.txt should no longer contain removed node alias name', async () => { // read config.txt file from first node, read config.txt line by line, it should not contain value of nodeAlias @@ -86,7 +86,7 @@ e2eTestSuite(namespace, argv, undefined, undefined, undefined, undefined, undefi await k8.copyFrom(podName, ROOT_CONTAINER, `${HEDERA_HAPI_PATH}/config.txt`, tmpDir); const configTxt = fs.readFileSync(`${tmpDir}/config.txt`, 'utf8'); console.log('config.txt:', configTxt); - expect(configTxt).not.to.contain(nodeAlias); + expect(configTxt).not.to.contain(deleteNodeAlias); }).timeout(Duration.ofMinutes(10).toMillis()); }); }); diff --git a/test/e2e/commands/node_update.test.ts b/test/e2e/commands/node_update.test.ts index c1c88c143..ac00bfa31 100644 --- a/test/e2e/commands/node_update.test.ts +++ b/test/e2e/commands/node_update.test.ts @@ -115,9 +115,9 @@ e2eTestSuite(namespace, argv, undefined, undefined, undefined, undefined, undefi await bootstrapResp.opts.accountManager.close(); }).timeout(Duration.ofMinutes(30).toMillis()); - balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); - accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); it('signing key and tls key should not match previous one', async () => { const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( diff --git a/test/e2e/commands/separate_node_delete.test.ts b/test/e2e/commands/separate_node_delete.test.ts index a577c59dc..3aadd81a0 100644 --- a/test/e2e/commands/separate_node_delete.test.ts +++ b/test/e2e/commands/separate_node_delete.test.ts @@ -86,9 +86,9 @@ e2eTestSuite(namespace, argv, undefined, undefined, undefined, undefined, undefi await bootstrapResp.opts.accountManager.close(); }).timeout(Duration.ofMinutes(10).toMillis()); - balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, nodeAlias); - accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, nodeAlias); it('config.txt should no longer contain removed nodeAlias', async () => { // read config.txt file from first node, read config.txt line by line, it should not contain value of nodeAlias diff --git a/test/e2e/commands/separate_node_update.test.ts b/test/e2e/commands/separate_node_update.test.ts index 3440d9762..c276321f8 100644 --- a/test/e2e/commands/separate_node_update.test.ts +++ b/test/e2e/commands/separate_node_update.test.ts @@ -126,9 +126,9 @@ e2eTestSuite(namespace, argv, undefined, undefined, undefined, undefined, undefi await bootstrapResp.opts.accountManager.close(); }).timeout(Duration.ofMinutes(30).toMillis()); - balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + balanceQueryShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); - accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace); + accountCreationShouldSucceed(bootstrapResp.opts.accountManager, nodeCmd, namespace, updateNodeId); it('signing key and tls key should not match previous one', async () => { const currentNodeIdsPrivateKeysHash = await getNodeAliasesPrivateKeysHash( diff --git a/test/test_util.ts b/test/test_util.ts index 83a02ef47..4107cdaf2 100644 --- a/test/test_util.ts +++ b/test/test_util.ts @@ -23,7 +23,7 @@ import fs from 'fs'; import os from 'os'; import path from 'path'; import {Flags as flags} from '../src/commands/flags.js'; -import {ClusterCommand} from '../src/commands/cluster.js'; +import {ClusterCommand} from '../src/commands/cluster/index.js'; import {InitCommand} from '../src/commands/init.js'; import {NetworkCommand} from '../src/commands/network.js'; import {NodeCommand} from '../src/commands/node/index.js'; @@ -261,7 +261,7 @@ export function e2eTestSuite( if ( !(await chartManager.isChartInstalled(constants.SOLO_SETUP_NAMESPACE, constants.SOLO_CLUSTER_SETUP_CHART)) ) { - await clusterCmd.setup(argv); + await clusterCmd.handlers.setup(argv); } }).timeout(Duration.ofMinutes(2).toMillis()); @@ -289,6 +289,9 @@ export function e2eTestSuite( flags.settingTxt.constName, flags.grpcTlsKeyPath.constName, flags.grpcWebTlsKeyPath.constName, + flags.storageAccessKey.constName, + flags.storageSecrets.constName, + flags.storageEndpoint.constName, ]); }).timeout(Duration.ofMinutes(5).toMillis()); @@ -333,11 +336,16 @@ export function e2eTestSuite( }); } -export function balanceQueryShouldSucceed(accountManager: AccountManager, cmd: BaseCommand, namespace: string) { +export function balanceQueryShouldSucceed( + accountManager: AccountManager, + cmd: BaseCommand, + namespace: string, + skipNodeAlias?: NodeAlias, +) { it('Balance query should succeed', async () => { try { expect(accountManager._nodeClient).to.be.null; - await accountManager.loadNodeClient(namespace); + await accountManager.refreshNodeClient(namespace, skipNodeAlias); expect(accountManager._nodeClient).not.to.be.null; const balance = await new AccountBalanceQuery() @@ -353,10 +361,15 @@ export function balanceQueryShouldSucceed(accountManager: AccountManager, cmd: B }).timeout(Duration.ofMinutes(2).toMillis()); } -export function accountCreationShouldSucceed(accountManager: AccountManager, nodeCmd: BaseCommand, namespace: string) { +export function accountCreationShouldSucceed( + accountManager: AccountManager, + nodeCmd: BaseCommand, + namespace: string, + skipNodeAlias?: NodeAlias, +) { it('Account creation should succeed', async () => { try { - await accountManager.loadNodeClient(namespace); + await accountManager.refreshNodeClient(namespace, skipNodeAlias); expect(accountManager._nodeClient).not.to.be.null; const privateKey = PrivateKey.generate(); const amount = 100; diff --git a/test/unit/commands/cluster.test.ts b/test/unit/commands/cluster.test.ts index 6ff3afd2d..a8a8dffef 100644 --- a/test/unit/commands/cluster.test.ts +++ b/test/unit/commands/cluster.test.ts @@ -18,8 +18,14 @@ import sinon from 'sinon'; import {describe, it, beforeEach} from 'mocha'; import {expect} from 'chai'; -import {ClusterCommand} from '../../../src/commands/cluster.js'; -import {getDefaultArgv, HEDERA_PLATFORM_VERSION_TAG, TEST_CLUSTER} from '../../test_util.js'; +import {ClusterCommand} from '../../../src/commands/cluster/index.js'; +import { + getDefaultArgv, + getTestCacheDir, + HEDERA_PLATFORM_VERSION_TAG, + TEST_CLUSTER, + testLocalConfigData, +} from '../../test_util.js'; import {Flags as flags} from '../../../src/commands/flags.js'; import * as version from '../../../version.js'; import * as constants from '../../../src/core/constants.js'; @@ -32,11 +38,34 @@ import path from 'path'; import {container} from 'tsyringe-neo'; import {resetTestContainer} from '../../test_container.js'; import * as test from 'node:test'; +import {ClusterCommandTasks} from '../../../src/commands/cluster/tasks.js'; +import type {BaseCommand} from '../../../src/commands/base.js'; +import {LocalConfig} from '../../../src/core/config/local_config.js'; +import type {CommandFlag} from '../../../src/types/flag_types.js'; +import {K8} from '../../../src/core/k8.js'; +import {type Cluster, KubeConfig} from '@kubernetes/client-node'; +import {RemoteConfigManager} from '../../../src/core/config/remote/remote_config_manager.js'; +import {DependencyManager} from '../../../src/core/dependency_managers/index.js'; +import {PackageDownloader} from '../../../src/core/package_downloader.js'; +import {KeyManager} from '../../../src/core/key_manager.js'; +import {AccountManager} from '../../../src/core/account_manager.js'; +import {PlatformInstaller} from '../../../src/core/platform_installer.js'; +import {ProfileManager} from '../../../src/core/profile_manager.js'; +import {LeaseManager} from '../../../src/core/lease/lease_manager.js'; +import {CertificateManager} from '../../../src/core/certificate_manager.js'; +import type {Opts} from '../../../src/types/command_types.js'; +import type {ListrTaskWrapper} from 'listr2'; +import fs from 'fs'; +import {stringify} from 'yaml'; const getBaseCommandOpts = () => ({ logger: sinon.stub(), helm: sinon.stub(), - k8: sinon.stub(), + k8: { + isMinioInstalled: sinon.stub().returns(false), + isPrometheusInstalled: sinon.stub().returns(false), + isCertManagerInstalled: sinon.stub().returns(false), + }, chartManager: sinon.stub(), configManager: sinon.stub(), depManager: sinon.stub(), @@ -81,7 +110,7 @@ describe('ClusterCommand unit tests', () => { it('Install function is called with expected parameters', async () => { const clusterCommand = new ClusterCommand(opts); - await clusterCommand.setup(argv); + await clusterCommand.handlers.setup(argv); expect(opts.chartManager.install.args[0][0]).to.equal(constants.SOLO_SETUP_NAMESPACE); expect(opts.chartManager.install.args[0][1]).to.equal(constants.SOLO_CLUSTER_SETUP_CHART); @@ -96,11 +125,329 @@ describe('ClusterCommand unit tests', () => { argv[flags.force.name] = true; const clusterCommand = new ClusterCommand(opts); - await clusterCommand.setup(argv); + await clusterCommand.handlers.setup(argv); expect(opts.chartManager.install.args[0][2]).to.equal( path.join(ROOT_DIR, 'test-directory', constants.SOLO_CLUSTER_SETUP_CHART), ); }); }); + + describe('cluster connect', () => { + const filePath = `${getTestCacheDir('ClusterCommandTasks')}/localConfig.yaml`; + const sandbox = sinon.createSandbox(); + let namespacePromptStub: sinon.SinonStub; + let clusterNamePromptStub: sinon.SinonStub; + let contextPromptStub: sinon.SinonStub; + let tasks: ClusterCommandTasks; + let command: BaseCommand; + let loggerStub: sinon.SinonStubbedInstance; + let localConfig: LocalConfig; + + const getBaseCommandOpts = ( + sandbox: sinon.SinonSandbox, + remoteConfig: any = {}, + // @ts-ignore + stubbedFlags: Record[] = [], + ) => { + const loggerStub = sandbox.createStubInstance(SoloLogger); + const k8Stub = sandbox.createStubInstance(K8); + k8Stub.getContexts.returns([ + {cluster: 'cluster-1', user: 'user-1', name: 'context-1', namespace: 'deployment-1'}, + {cluster: 'cluster-2', user: 'user-2', name: 'context-2', namespace: 'deployment-2'}, + {cluster: 'cluster-3', user: 'user-3', name: 'context-3', namespace: 'deployment-3'}, + ]); + k8Stub.isMinioInstalled.returns(new Promise(() => true)); + k8Stub.isPrometheusInstalled.returns(new Promise(() => true)); + k8Stub.isCertManagerInstalled.returns(new Promise(() => true)); + const kubeConfigStub = sandbox.createStubInstance(KubeConfig); + kubeConfigStub.getCurrentContext.returns('context-from-kubeConfig'); + kubeConfigStub.getCurrentCluster.returns({ + name: 'cluster-3', + caData: 'caData', + caFile: 'caFile', + server: 'server-3', + skipTLSVerify: true, + tlsServerName: 'tls-3', + } as Cluster); + + const remoteConfigManagerStub = sandbox.createStubInstance(RemoteConfigManager); + remoteConfigManagerStub.modify.callsFake(async callback => { + await callback(remoteConfig); + }); + + k8Stub.getKubeConfig.returns(kubeConfigStub); + + const configManager = sandbox.createStubInstance(ConfigManager); + + for (let i = 0; i < stubbedFlags.length; i++) { + configManager.getFlag.withArgs(stubbedFlags[i][0]).returns(stubbedFlags[i][1]); + } + + return { + logger: loggerStub, + helm: sandbox.createStubInstance(Helm), + k8: k8Stub, + chartManager: sandbox.createStubInstance(ChartManager), + configManager, + depManager: sandbox.createStubInstance(DependencyManager), + localConfig: new LocalConfig(filePath), + downloader: sandbox.createStubInstance(PackageDownloader), + keyManager: sandbox.createStubInstance(KeyManager), + accountManager: sandbox.createStubInstance(AccountManager), + platformInstaller: sandbox.createStubInstance(PlatformInstaller), + profileManager: sandbox.createStubInstance(ProfileManager), + leaseManager: sandbox.createStubInstance(LeaseManager), + certificateManager: sandbox.createStubInstance(CertificateManager), + remoteConfigManager: remoteConfigManagerStub, + } as Opts; + }; + + describe('updateLocalConfig', () => { + async function runUpdateLocalConfigTask(opts) { + command = new ClusterCommand(opts); + tasks = new ClusterCommandTasks(command, opts.k8); + const taskObj = tasks.updateLocalConfig({}); + await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); + return command; + } + + afterEach(async () => { + await fs.promises.unlink(filePath); + sandbox.restore(); + }); + + after(() => {}); + + beforeEach(async () => { + namespacePromptStub = sandbox.stub(flags.namespace, 'prompt').callsFake(() => { + return new Promise(resolve => { + resolve('deployment-3'); + }); + }); + clusterNamePromptStub = sandbox.stub(flags.clusterName, 'prompt').callsFake(() => { + return new Promise(resolve => { + resolve('cluster-3'); + }); + }); + contextPromptStub = sandbox.stub(flags.context, 'prompt').callsFake(() => { + return new Promise(resolve => { + resolve('context-3'); + }); + }); + loggerStub = sandbox.createStubInstance(SoloLogger); + await fs.promises.writeFile(filePath, stringify(testLocalConfigData)); + }); + + it('should update currentDeployment with clusters from remoteConfig', async () => { + const remoteConfig = { + clusters: { + 'cluster-2': 'deployment', + }, + }; + const opts = getBaseCommandOpts(sandbox, remoteConfig, []); + command = await runUpdateLocalConfigTask(opts); // @ts-ignore + localConfig = new LocalConfig(filePath); + + expect(localConfig.currentDeploymentName).to.equal('deployment'); + expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2']); + expect(localConfig.clusterContextMapping).to.deep.equal({ + 'cluster-1': 'context-1', + 'cluster-2': 'context-2', + }); + }); + + it('should update clusterContextMapping with provided context', async () => { + const remoteConfig = { + clusters: { + 'cluster-2': 'deployment', + }, + }; + const opts = getBaseCommandOpts(sandbox, remoteConfig, [[flags.context, 'provided-context']]); + command = await runUpdateLocalConfigTask(opts); // @ts-ignore + localConfig = new LocalConfig(filePath); + + expect(localConfig.currentDeploymentName).to.equal('deployment'); + expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2']); + expect(localConfig.clusterContextMapping).to.deep.equal({ + 'cluster-1': 'context-1', + 'cluster-2': 'provided-context', + }); + }); + + it('should update multiple clusterContextMappings with provided contexts', async () => { + const remoteConfig = { + clusters: { + 'cluster-2': 'deployment', + 'cluster-3': 'deployment', + 'cluster-4': 'deployment', + }, + }; + const opts = getBaseCommandOpts(sandbox, remoteConfig, [ + [flags.context, 'provided-context-2,provided-context-3,provided-context-4'], + ]); + command = await runUpdateLocalConfigTask(opts); // @ts-ignore + localConfig = new LocalConfig(filePath); + + expect(localConfig.currentDeploymentName).to.equal('deployment'); + expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2', 'cluster-3', 'cluster-4']); + expect(localConfig.clusterContextMapping).to.deep.equal({ + 'cluster-1': 'context-1', + 'cluster-2': 'provided-context-2', + 'cluster-3': 'provided-context-3', + 'cluster-4': 'provided-context-4', + }); + }); + + it('should update multiple clusterContextMappings with default KubeConfig context if quiet=true', async () => { + const remoteConfig = { + clusters: { + 'cluster-2': 'deployment', + 'cluster-3': 'deployment', + }, + }; + const opts = getBaseCommandOpts(sandbox, remoteConfig, [[flags.quiet, true]]); + command = await runUpdateLocalConfigTask(opts); // @ts-ignore + localConfig = new LocalConfig(filePath); + + expect(localConfig.currentDeploymentName).to.equal('deployment'); + expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2', 'cluster-3']); + expect(localConfig.clusterContextMapping).to.deep.equal({ + 'cluster-1': 'context-1', + 'cluster-2': 'context-2', + 'cluster-3': 'context-from-kubeConfig', + }); + }); + + it('should update multiple clusterContextMappings with prompted context no value was provided', async () => { + const remoteConfig = { + clusters: { + 'cluster-2': 'deployment', + 'new-cluster': 'deployment', + }, + }; + const opts = getBaseCommandOpts(sandbox, remoteConfig, []); + + command = await runUpdateLocalConfigTask(opts); // @ts-ignore + localConfig = new LocalConfig(filePath); + + expect(localConfig.currentDeploymentName).to.equal('deployment'); + expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2', 'new-cluster']); + expect(localConfig.clusterContextMapping).to.deep.equal({ + 'cluster-1': 'context-1', + 'cluster-2': 'context-2', + 'new-cluster': 'context-3', // prompted value + }); + }); + }); + + describe('selectContext', () => { + async function runSelectContextTask(opts) { + command = new ClusterCommand(opts); + tasks = new ClusterCommandTasks(command, opts.k8); + const taskObj = tasks.selectContext({}); + await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); + return command; + } + + afterEach(async () => { + await fs.promises.unlink(filePath); + sandbox.restore(); + }); + + beforeEach(async () => { + namespacePromptStub = sandbox.stub(flags.namespace, 'prompt').callsFake(() => { + return new Promise(resolve => { + resolve('deployment-3'); + }); + }); + clusterNamePromptStub = sandbox.stub(flags.clusterName, 'prompt').callsFake(() => { + return new Promise(resolve => { + resolve('cluster-3'); + }); + }); + contextPromptStub = sandbox.stub(flags.context, 'prompt').callsFake(() => { + return new Promise(resolve => { + resolve('context-3'); + }); + }); + loggerStub = sandbox.createStubInstance(SoloLogger); + await fs.promises.writeFile(filePath, stringify(testLocalConfigData)); + }); + + it('should use first provided context', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [ + [flags.context, 'provided-context-1,provided-context-2,provided-context-3'], + ]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('provided-context-1'); + }); + + it('should use local config mapping to connect to first provided cluster', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [[flags.clusterName, 'cluster-2,cluster-3']]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-2'); + }); + + it('should prompt for context if selected cluster is not found in local config mapping', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [[flags.clusterName, 'cluster-3']]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-3'); + }); + + it('should use default kubeConfig context if selected cluster is not found in local config mapping and quiet=true', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [ + [flags.clusterName, 'unknown-cluster'], + [flags.quiet, true], + ]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-from-kubeConfig'); + }); + + it('should use context from local config mapping for the first cluster from the selected deployment', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [[flags.namespace, 'deployment-2']]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-2'); + }); + + it('should prompt for context if selected deployment is found in local config but the context is not', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [[flags.namespace, 'deployment-3']]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-3'); + }); + + it('should use default context if selected deployment is found in local config but the context is not and quiet=true', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [ + [flags.namespace, 'deployment-3'], + [flags.quiet, true], + ]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-from-kubeConfig'); + }); + + it('should prompt for clusters and contexts if selected deployment is not found in local config', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [[flags.namespace, 'deployment-4']]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-3'); + }); + + it('should use clusters and contexts from kubeConfig if selected deployment is not found in local config and quiet=true', async () => { + const opts = getBaseCommandOpts(sandbox, {}, [ + [flags.namespace, 'deployment-4'], + [flags.quiet, true], + ]); + + command = await runSelectContextTask(opts); // @ts-ignore + expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-from-kubeConfig'); + }); + }); + }); }); diff --git a/test/unit/commands/context.test.ts b/test/unit/commands/context.test.ts deleted file mode 100644 index ceea764dc..000000000 --- a/test/unit/commands/context.test.ts +++ /dev/null @@ -1,361 +0,0 @@ -/** - * Copyright (C) 2024 Hedera Hashgraph, LLC - * - * Licensed under the Apache License, Version 2.0 (the ""License""); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an ""AS IS"" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -import sinon from 'sinon'; -import {describe, it, beforeEach} from 'mocha'; -import {expect} from 'chai'; - -import {ContextCommandTasks} from '../../../src/commands/context/tasks.js'; -import {DependencyManager} from '../../../src/core/dependency_managers/index.js'; -import {LocalConfig} from '../../../src/core/config/local_config.js'; -import {PackageDownloader} from '../../../src/core/package_downloader.js'; -import {KeyManager} from '../../../src/core/key_manager.js'; -import {AccountManager} from '../../../src/core/account_manager.js'; -import {PlatformInstaller} from '../../../src/core/platform_installer.js'; -import {ProfileManager} from '../../../src/core/profile_manager.js'; -import {LeaseManager} from '../../../src/core/lease/lease_manager.js'; -import {CertificateManager} from '../../../src/core/certificate_manager.js'; -import {RemoteConfigManager} from '../../../src/core/config/remote/remote_config_manager.js'; -import {K8} from '../../../src/core/k8.js'; -import {ConfigManager} from '../../../src/core/config_manager.js'; -import {Helm} from '../../../src/core/helm.js'; -import {ChartManager} from '../../../src/core/chart_manager.js'; -import {getTestCacheDir, testLocalConfigData} from '../../test_util.js'; -import {type BaseCommand} from '../../../src/commands/base.js'; -import {Flags as flags} from '../../../src/commands/flags.js'; -import {SoloLogger} from '../../../src/core/logging.js'; -import {type Opts} from '../../../src/types/command_types.js'; -import fs from 'fs'; -import {stringify} from 'yaml'; -import {type Cluster, KubeConfig} from '@kubernetes/client-node'; -import {type ListrTaskWrapper} from 'listr2'; -import {ContextCommand} from '../../../src/commands/context/index.js'; -import {type CommandFlag} from '../../../src/types/flag_types.js'; - -describe('ContextCommandTasks unit tests', () => { - const filePath = `${getTestCacheDir('ContextCommandTasks')}/localConfig.yaml`; - const sandbox = sinon.createSandbox(); - let namespacePromptStub: sinon.SinonStub; - let clusterNamePromptStub: sinon.SinonStub; - let contextPromptStub: sinon.SinonStub; - let tasks: ContextCommandTasks; - let command: BaseCommand; - let loggerStub: sinon.SinonStubbedInstance; - let localConfig: LocalConfig; - - const getBaseCommandOpts = ( - sandbox: sinon.SinonSandbox, - remoteConfig: any = {}, - // @ts-ignore - stubbedFlags: Record[] = [], - ) => { - const loggerStub = sandbox.createStubInstance(SoloLogger); - const k8Stub = sandbox.createStubInstance(K8); - k8Stub.getContexts.returns([ - {cluster: 'cluster-1', user: 'user-1', name: 'context-1', namespace: 'deployment-1'}, - {cluster: 'cluster-2', user: 'user-2', name: 'context-2', namespace: 'deployment-2'}, - {cluster: 'cluster-3', user: 'user-3', name: 'context-3', namespace: 'deployment-3'}, - ]); - const kubeConfigStub = sandbox.createStubInstance(KubeConfig); - kubeConfigStub.getCurrentContext.returns('context-from-kubeConfig'); - kubeConfigStub.getCurrentCluster.returns({ - name: 'cluster-3', - caData: 'caData', - caFile: 'caFile', - server: 'server-3', - skipTLSVerify: true, - tlsServerName: 'tls-3', - } as Cluster); - - const remoteConfigManagerStub = sandbox.createStubInstance(RemoteConfigManager); - remoteConfigManagerStub.modify.callsFake(async callback => { - await callback(remoteConfig); - }); - - k8Stub.getKubeConfig.returns(kubeConfigStub); - - const configManager = sandbox.createStubInstance(ConfigManager); - - for (let i = 0; i < stubbedFlags.length; i++) { - configManager.getFlag.withArgs(stubbedFlags[i][0]).returns(stubbedFlags[i][1]); - } - - return { - logger: loggerStub, - helm: sandbox.createStubInstance(Helm), - k8: k8Stub, - chartManager: sandbox.createStubInstance(ChartManager), - configManager, - depManager: sandbox.createStubInstance(DependencyManager), - localConfig: new LocalConfig(filePath), - downloader: sandbox.createStubInstance(PackageDownloader), - keyManager: sandbox.createStubInstance(KeyManager), - accountManager: sandbox.createStubInstance(AccountManager), - platformInstaller: sandbox.createStubInstance(PlatformInstaller), - profileManager: sandbox.createStubInstance(ProfileManager), - leaseManager: sandbox.createStubInstance(LeaseManager), - certificateManager: sandbox.createStubInstance(CertificateManager), - remoteConfigManager: remoteConfigManagerStub, - } as Opts; - }; - - describe('updateLocalConfig', () => { - async function runUpdateLocalConfigTask(opts) { - command = new ContextCommand(opts); - tasks = new ContextCommandTasks(command); - const taskObj = tasks.updateLocalConfig({}); - await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); - return command; - } - - afterEach(async () => { - await fs.promises.unlink(filePath); - sandbox.restore(); - }); - - after(() => {}); - - beforeEach(async () => { - namespacePromptStub = sandbox.stub(flags.namespace, 'prompt').callsFake(() => { - return new Promise(resolve => { - resolve('deployment-3'); - }); - }); - clusterNamePromptStub = sandbox.stub(flags.clusterName, 'prompt').callsFake(() => { - return new Promise(resolve => { - resolve('cluster-3'); - }); - }); - contextPromptStub = sandbox.stub(flags.context, 'prompt').callsFake(() => { - return new Promise(resolve => { - resolve('context-3'); - }); - }); - loggerStub = sandbox.createStubInstance(SoloLogger); - await fs.promises.writeFile(filePath, stringify(testLocalConfigData)); - }); - - it('should update currentDeployment with clusters from remoteConfig', async () => { - const remoteConfig = { - clusters: { - 'cluster-2': 'deployment', - }, - }; - const opts = getBaseCommandOpts(sandbox, remoteConfig, []); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore - localConfig = new LocalConfig(filePath); - - expect(localConfig.currentDeploymentName).to.equal('deployment'); - expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2']); - expect(localConfig.clusterContextMapping).to.deep.equal({ - 'cluster-1': 'context-1', - 'cluster-2': 'context-2', - }); - }); - - it('should update clusterContextMapping with provided context', async () => { - const remoteConfig = { - clusters: { - 'cluster-2': 'deployment', - }, - }; - const opts = getBaseCommandOpts(sandbox, remoteConfig, [[flags.context, 'provided-context']]); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore - localConfig = new LocalConfig(filePath); - - expect(localConfig.currentDeploymentName).to.equal('deployment'); - expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2']); - expect(localConfig.clusterContextMapping).to.deep.equal({ - 'cluster-1': 'context-1', - 'cluster-2': 'provided-context', - }); - }); - - it('should update multiple clusterContextMappings with provided contexts', async () => { - const remoteConfig = { - clusters: { - 'cluster-2': 'deployment', - 'cluster-3': 'deployment', - 'cluster-4': 'deployment', - }, - }; - const opts = getBaseCommandOpts(sandbox, remoteConfig, [ - [flags.context, 'provided-context-2,provided-context-3,provided-context-4'], - ]); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore - localConfig = new LocalConfig(filePath); - - expect(localConfig.currentDeploymentName).to.equal('deployment'); - expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2', 'cluster-3', 'cluster-4']); - expect(localConfig.clusterContextMapping).to.deep.equal({ - 'cluster-1': 'context-1', - 'cluster-2': 'provided-context-2', - 'cluster-3': 'provided-context-3', - 'cluster-4': 'provided-context-4', - }); - }); - - it('should update multiple clusterContextMappings with default KubeConfig context if quiet=true', async () => { - const remoteConfig = { - clusters: { - 'cluster-2': 'deployment', - 'cluster-3': 'deployment', - }, - }; - const opts = getBaseCommandOpts(sandbox, remoteConfig, [[flags.quiet, true]]); - command = await runUpdateLocalConfigTask(opts); // @ts-ignore - localConfig = new LocalConfig(filePath); - - expect(localConfig.currentDeploymentName).to.equal('deployment'); - expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2', 'cluster-3']); - expect(localConfig.clusterContextMapping).to.deep.equal({ - 'cluster-1': 'context-1', - 'cluster-2': 'context-2', - 'cluster-3': 'context-from-kubeConfig', - }); - }); - - it('should update multiple clusterContextMappings with prompted context no value was provided', async () => { - const remoteConfig = { - clusters: { - 'cluster-2': 'deployment', - 'new-cluster': 'deployment', - }, - }; - const opts = getBaseCommandOpts(sandbox, remoteConfig, []); - - command = await runUpdateLocalConfigTask(opts); // @ts-ignore - localConfig = new LocalConfig(filePath); - - expect(localConfig.currentDeploymentName).to.equal('deployment'); - expect(localConfig.getCurrentDeployment().clusters).to.deep.equal(['cluster-2', 'new-cluster']); - expect(localConfig.clusterContextMapping).to.deep.equal({ - 'cluster-1': 'context-1', - 'cluster-2': 'context-2', - 'new-cluster': 'context-3', // prompted value - }); - }); - }); - - describe('selectContext', () => { - async function runSelectContextTask(opts) { - command = new ContextCommand(opts); - tasks = new ContextCommandTasks(command); - const taskObj = tasks.selectContext({}); - await taskObj.task({config: {}}, sandbox.stub() as unknown as ListrTaskWrapper); - return command; - } - - afterEach(async () => { - await fs.promises.unlink(filePath); - sandbox.restore(); - }); - - beforeEach(async () => { - namespacePromptStub = sandbox.stub(flags.namespace, 'prompt').callsFake(() => { - return new Promise(resolve => { - resolve('deployment-3'); - }); - }); - clusterNamePromptStub = sandbox.stub(flags.clusterName, 'prompt').callsFake(() => { - return new Promise(resolve => { - resolve('cluster-3'); - }); - }); - contextPromptStub = sandbox.stub(flags.context, 'prompt').callsFake(() => { - return new Promise(resolve => { - resolve('context-3'); - }); - }); - loggerStub = sandbox.createStubInstance(SoloLogger); - await fs.promises.writeFile(filePath, stringify(testLocalConfigData)); - }); - - it('should use first provided context', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [ - [flags.context, 'provided-context-1,provided-context-2,provided-context-3'], - ]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('provided-context-1'); - }); - - it('should use local config mapping to connect to first provided cluster', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [[flags.clusterName, 'cluster-2,cluster-3']]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-2'); - }); - - it('should prompt for context if selected cluster is not found in local config mapping', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [[flags.clusterName, 'cluster-3']]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-3'); - }); - - it('should use default kubeConfig context if selected cluster is not found in local config mapping and quiet=true', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [ - [flags.clusterName, 'unknown-cluster'], - [flags.quiet, true], - ]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-from-kubeConfig'); - }); - - it('should use context from local config mapping for the first cluster from the selected deployment', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [[flags.namespace, 'deployment-2']]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-2'); - }); - - it('should prompt for context if selected deployment is found in local config but the context is not', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [[flags.namespace, 'deployment-3']]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-3'); - }); - - it('should use default context if selected deployment is found in local config but the context is not and quiet=true', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [ - [flags.namespace, 'deployment-3'], - [flags.quiet, true], - ]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-from-kubeConfig'); - }); - - it('should prompt for clusters and contexts if selected deployment is not found in local config', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [[flags.namespace, 'deployment-4']]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-3'); - }); - - it('should use clusters and contexts from kubeConfig if selected deployment is not found in local config and quiet=true', async () => { - const opts = getBaseCommandOpts(sandbox, {}, [ - [flags.namespace, 'deployment-4'], - [flags.quiet, true], - ]); - - command = await runSelectContextTask(opts); // @ts-ignore - expect(command.getK8().getKubeConfig().setCurrentContext).to.have.been.calledWith('context-from-kubeConfig'); - }); - }); -}); diff --git a/test/unit/commands/network.test.ts b/test/unit/commands/network.test.ts index cf11457cc..e57f71fbf 100644 --- a/test/unit/commands/network.test.ts +++ b/test/unit/commands/network.test.ts @@ -77,6 +77,10 @@ describe('NetworkCommand unit tests', () => { opts.k8.waitForPodReady = sinon.stub(); opts.k8.waitForPods = sinon.stub(); opts.k8.readNamespacedLease = sinon.stub(); + opts.k8.isMinioInstalled = sinon.stub(); + opts.k8.isPrometheusInstalled = sinon.stub(); + opts.k8.isCertManagerInstalled = sinon.stub(); + opts.k8.logger = opts.logger; container.registerInstance(K8, opts.k8); diff --git a/version.ts b/version.ts index 0a3e1eef4..6f63efbcc 100644 --- a/version.ts +++ b/version.ts @@ -20,7 +20,7 @@ */ export const HELM_VERSION = 'v3.14.2'; -export const SOLO_CHART_VERSION = '0.39.0'; +export const SOLO_CHART_VERSION = '0.41.0'; export const HEDERA_PLATFORM_VERSION = 'v0.58.1'; export const MIRROR_NODE_VERSION = '0.118.1'; export const HEDERA_EXPLORER_VERSION = '0.2.1';