diff --git a/examples/hashsphere-0001/Taskfile.yml b/examples/hashsphere-0001/Taskfile.yml new file mode 100644 index 000000000..626fc510a --- /dev/null +++ b/examples/hashsphere-0001/Taskfile.yml @@ -0,0 +1,111 @@ +version: 3 +includes: + helper: ../../HelperTasks.yml +dotenv: + - .env + +env: + SOLO_CHART_VERSION: 0.36.0 + CONSENSUS_NODE_VERSION: v0.57.1 + SOLO_NAMESPACE: solo-{{ env "USER" | replace "." "-" | trunc 63 | default "test" }} + SOLO_CLUSTER_SETUP_NAMESPACE: solo-setup + SOLO_CLUSTER_RELEASE_NAME: solo-cluster-setup + SOLO_NETWORK_SIZE: 5 + SOLO_CLUSTER_NAME: solo-cluster + MIRROR_RELEASE_NAME: mirror + +vars: + solo_settings_file: "{{.ROOT_DIR}}/settings.txt" + solo_values_file: "{{.ROOT_DIR}}/init-containers-values.yaml" + ip_list_template_file: "{{.ROOT_DIR}}/list-external-ips.gotemplate" + nodes: + ref: until (env "SOLO_NETWORK_SIZE" | default .SOLO_NETWORK_SIZE | int) + node_list_internal: "{{range $idx, $n := .nodes }}node{{add $n 1}},{{end}}" + node_identifiers: "{{ .node_list_internal | trimSuffix \",\" }}" + solo_user_dir: "{{ env \"HOME\" }}/.solo" + solo_cache_dir: "{{ .solo_user_dir }}/cache" + solo_logs_dir: "{{ .solo_user_dir }}/logs" + solo_keys_dir: "{{ .solo_cache_dir }}/keys" + solo_bin_dir: "{{ .solo_user_dir }}/bin" + minio_installed: false + +tasks: + default: + cmds: + - task: "helper:install:kubectl:darwin" + - task: "helper:install:kubectl:linux" + - task: "helper:install:solo" + - task: "install" + - task: "start" + + install: + cmds: + - task: "helper:solo:init" + - task: "helper:solo:keys" + - task: "solo:cluster:setup" + - task: "solo:network:deploy" + + start: + cmds: + - task: "helper:solo:node:start" + + stop: + cmds: + - task: "helper:solo:node:stop" + + show:ips: + cmds: + - task: "solo:node:addresses" + + destroy: + cmds: + - task: "helper:solo:node:stop" + - task: "helper:solo:network:destroy" + + clean: + cmds: + - task: "destroy" + - task: "clean:cache" + - task: "clean:logs" + + clean:cache: + cmds: + - task: "helper:solo:cache:remove" + + clean:logs: + cmds: + - task: "helper:solo:logs:remove" + + # Do not use network:deploy from HelperTasks.yml since custom network need extra settings and values files + solo:network:deploy: + internal: true + cmds: + - npm run build + - solo network deploy --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}" --solo-chart-version "${SOLO_CHART_VERSION}" --values-file init-containers-values.yaml --settings-txt settings.txt + - solo node setup --namespace "${SOLO_NAMESPACE}" --node-aliases-unparsed {{.node_identifiers}} --release-tag "${CONSENSUS_NODE_VERSION}" + + # Do not use node:addresses from HelperTasks.yml since we need to use template file to get output the list of IPs + solo:node:addresses: + internal: true + cmds: + - kubectl get svc -n "${SOLO_NAMESPACE}" -l "solo.hedera.com/type=network-node-svc" --output=go-template-file={{ .ip_list_template_file }} + + solo:cluster:minio: + cmds: + - | + if ! kubectl get svc -l app.kubernetes.io/name=minio-operator --all-namespaces --no-headers | grep -q . ; then + echo "No services found with label app.kubernetes.io/name=minio-operator" + echo "--minio" > /tmp/.minio_flag + else + echo "--no-minio" > /tmp/.minio_flag + fi + + solo:cluster:setup: + deps: + - task: "solo:cluster:minio" + status: + - helm list --all-namespaces | grep -qz "${SOLO_CLUSTER_RELEASE_NAME}" + cmds: + - export MINIO_FLAG=$(cat /tmp/.minio_flag) + - npm run build + - solo cluster setup --cluster-setup-namespace "${SOLO_CLUSTER_SETUP_NAMESPACE}" "${MINIO_FLAG}" diff --git a/examples/hashsphere-0001/init-containers-values.yaml b/examples/hashsphere-0001/init-containers-values.yaml new file mode 100644 index 000000000..76e8437a8 --- /dev/null +++ b/examples/hashsphere-0001/init-containers-values.yaml @@ -0,0 +1,168 @@ +# hedera node configuration +hedera: + initContainers: + - name: init-hedera-node + image: busybox:stable-musl + command: ["sh", "-c", "cp -r /etc /data-saved"] + volumeMounts: + - name: hgcapp-data-saved + mountPath: /data-saved + nodes: + - name: node1 + accountId: 0.0.3 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node2 + accountId: 0.0.4 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node3 + accountId: 0.0.5 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node4 + accountId: 0.0.6 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + - name: node5 + accountId: 0.0.7 + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi +defaults: + envoyProxy: + loadBalancerEnabled: true + sidecars: + recordStreamUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + eventStreamUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + recordStreamSidecarUploader: + resources: + requests: + cpu: 100m + memory: 100Mi + limits: + cpu: 150m + memory: 200Mi + root: + resources: + requests: + cpu: 2 + memory: 16Gi + limits: + cpu: 4 + memory: 31Gi + extraEnv: + - name: JAVA_OPTS + value: "-XX:+UnlockExperimentalVMOptions -XX:+UseZGC -XX:ZAllocationSpikeTolerance=2 -XX:ConcGCThreads=4 -XX:MaxDirectMemorySize=4g -XX:MetaspaceSize=100M -XX:+ZGenerational -Xlog:gc*:gc.log --add-opens java.base/jdk.internal.misc=ALL-UNNAMED --add-opens java.base/java.nio=ALL-UNNAMED -Dio.netty.tryReflectionSetAccessible=true" + - name: JAVA_HEAP_MIN + value: "16g" + - name: JAVA_HEAP_MAX + value: "19g" +deployment: + podAnnotations: {} + podLabels: {} + nodeSelector: + solo.hashgraph.io/role: "consensus-node" + tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "consensus-node" + effect: "NoSchedule" +minio-server: + secrets: + # This secret has [accessKey, secretKey] and will be randomly generated by helm + existingSecret: minio-secrets + tenant: + buckets: + - name: solo-streams + - name: solo-backups + name: minio + pools: + - servers: 1 + name: pool-1 + volumesPerServer: 1 + size: 512Gi + storageClassName: standard-rwo + nodeSelector: {} + configuration: + name: minio-secrets + certificate: + requestAutoCert: false + environment: + MINIO_BROWSER_LOGIN_ANIMATION: off # https://github.com/minio/console/issues/2539#issuecomment-1619211962 +haproxyDeployment: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: solo.hedera.com/type + operator: In + values: + - network-node + topologyKey: kubernetes.io/hostname +envoyDeployment: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: solo.hedera.com/type + operator: In + values: + - network-node + topologyKey: kubernetes.io/hostname +minioDeployment: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: solo.hedera.com/type + operator: In + values: + - network-node + topologyKey: kubernetes.io/hostname diff --git a/examples/hashsphere-0001/list-external-ips.gotemplate b/examples/hashsphere-0001/list-external-ips.gotemplate new file mode 100644 index 000000000..247d492c6 --- /dev/null +++ b/examples/hashsphere-0001/list-external-ips.gotemplate @@ -0,0 +1,6 @@ +{{- range .items -}} + {{ $name := .metadata.name }} + {{- range .status.loadBalancer.ingress -}} + {{$name}} {{": "}} {{ .ip }} {{"\n"}} + {{- end -}} +{{- end -}} diff --git a/examples/hashsphere-0001/nlg-values.yaml b/examples/hashsphere-0001/nlg-values.yaml new file mode 100644 index 000000000..70b0fa9ba --- /dev/null +++ b/examples/hashsphere-0001/nlg-values.yaml @@ -0,0 +1,47 @@ +replicas: 1 + +resources: + limits: + memory: 32Gi + cpu: '32' + requests: + memory: 16Gi + cpu: '16' + +nodeSelector: + solo.hashgraph.io/role: "test-clients" +tolerations: + - key: "solo.hashgraph.io/role" + operator: "Equal" + value: "test-clients" + effect: "NoSchedule" +affinity: {} + +loadGenerator: + java: + maxMemory: '48g' + test: + className: com.hedera.benchmark.NftTransferLoadTest + args: + - -c + - "7" + - -a + - "1000" + - -T + - "10" + - -n + - "10" + - -S + - "hot" + - -p + - "50" + - -t + - "1m" + properties: + - '34.118.231.223\:50211=0.0.3' + - '34.118.238.41\:50211=0.0.4' + - '34.118.235.163\:50211=0.0.5' + - '34.118.233.134\:50211=0.0.6' + - '34.118.238.65\:50211=0.0.7' + - '34.118.230.205\:50211=0.0.8' + - '34.118.225.213\:50211=0.0.9' diff --git a/examples/hashsphere-0001/settings.txt b/examples/hashsphere-0001/settings.txt new file mode 100644 index 000000000..6f587e119 --- /dev/null +++ b/examples/hashsphere-0001/settings.txt @@ -0,0 +1,16 @@ +checkSignedStateFromDisk, 1 +csvFileName, MainNetStats +doUpnp, false +loadKeysFromPfxFiles, 0 +maxOutgoingSyncs, 1 +reconnect.active, 1 +reconnect.reconnectWindowSeconds, -1 +showInternalStats, 1 +state.saveStatePeriod, 300 +useLoopbackIp, false +waitAtStartup, false +state.mainClassNameOverride, com.hedera.services.ServicesMain +maxEventQueueForCons, 1000 +merkleDb.hashesRamToDiskThreshold, 8388608 +event.creation.maxCreationRate, 20 +virtualMap.familyThrottleThreshold, 6000000000