From d1a4e7fe839e324b53ab854008948b28d490643a Mon Sep 17 00:00:00 2001 From: dharamveergit Date: Tue, 19 Mar 2024 21:25:45 +0530 Subject: [PATCH 1/2] Refactor hero section and add advert section --- src/components/home/hero/hero.astro | 67 +++++++++++++---------------- src/content/Homepage/index.md | 38 +++++++++++----- src/content/config.ts | 39 ++++++++++++----- src/layouts/layout.astro | 24 ++++++++++- src/pages/index.astro | 4 +- 5 files changed, 112 insertions(+), 60 deletions(-) diff --git a/src/components/home/hero/hero.astro b/src/components/home/hero/hero.astro index 7df7c168..125ccaa6 100644 --- a/src/components/home/hero/hero.astro +++ b/src/components/home/hero/hero.astro @@ -1,5 +1,11 @@ --- import Button from "@/components/ui/button-link.astro"; +import type { CollectionEntry } from "astro:content"; + +interface Props { + heroSection: CollectionEntry<"Homepage">["data"]["heroSection"]; +} + const { heroSection } = Astro.props; --- @@ -20,44 +26,29 @@ const { heroSection } = Astro.props;

-
- - - -
- - diff --git a/src/content/Homepage/index.md b/src/content/Homepage/index.md index 4c764346..2f0665f0 100644 --- a/src/content/Homepage/index.md +++ b/src/content/Homepage/index.md @@ -1,18 +1,36 @@ --- +advert: + title: Access NVIDIA H100, A100, and A6000 at industry-leading pricing → + link: "#" + # hero section content heroSection: title: The World's
Premier Decentralized
Compute Marketplace description: "Akash is an open network that lets users buy and sell computing resources securely and efficiently. Purpose-built for public utility." - - primaryButton: - label: Get Started - link: "#getting-started" - enable: true - - secondaryButton: - label: Deploy Now - link: "https://console.akash.network" - enable: true + cards: + - title: "Explore network resources and pricing" + description: "Browse a wide range of cloud resources and live network pricing" + buttons: + - label: "Explore pricing" + link: "https://akash.network/about/pricing/custom/" + type: secondary + - label: "View GPU availability" + link: "https://stats.akash.network/" + type: primary + + - title: "Provide compute and earn" + description: "Become an Akash Provider by offering your hardware on the network and earn when users deploy" + buttons: + - label: "Become a Provider" + link: "https://akash.network/providers" + type: secondary + + - title: "Deploy with Akash Console" + description: "Get started with the network's user-friendly deployment console" + buttons: + - label: "Deploy now" + link: "https://console.akash.network/" + type: secondary # infrastructure section content infrastructureSection: diff --git a/src/content/config.ts b/src/content/config.ts index b664944d..45869136 100644 --- a/src/content/config.ts +++ b/src/content/config.ts @@ -25,19 +25,38 @@ const homePage = defineCollection({ // Type-check frontmatter using a schema schema: ({ image }) => { return z.object({ + advert: z.object({ + title: z.string(), + + link: z.string(), + }), + heroSection: z.object({ title: z.string(), description: z.string(), - primaryButton: z.object({ - label: z.string(), - link: z.string(), - enable: z.boolean(), - }), - secondaryButton: z.object({ - label: z.string(), - link: z.string(), - enable: z.boolean(), - }), + cards: z.array( + z.object({ + title: z.string(), + description: z.string(), + buttons: z.array( + z.object({ + label: z.string(), + link: z.string(), + type: z.union([z.literal("primary"), z.literal("secondary")]), + }), + ), + }), + ), + // primaryButton: z.object({ + // label: z.string(), + // link: z.string(), + // enable: z.boolean(), + // }), + // secondaryButton: z.object({ + // label: z.string(), + // link: z.string(), + // enable: z.boolean(), + // }), }), infrastructureSection: z.object({ title: z.string(), diff --git a/src/layouts/layout.astro b/src/layouts/layout.astro index 74495829..ab722cb0 100644 --- a/src/layouts/layout.astro +++ b/src/layouts/layout.astro @@ -15,12 +15,34 @@ interface Props { title: string; description?: string; image?: string; + advert?: { + title: string; + link: string; + }; } -const { title, description, image } = Astro.props; +const { title, description, image, advert } = Astro.props; --- + { + advert && ( + + ) + }
diff --git a/src/pages/index.astro b/src/pages/index.astro index a36d8643..43fe0030 100644 --- a/src/pages/index.astro +++ b/src/pages/index.astro @@ -34,6 +34,7 @@ const posts = (await getCollection("Blog")) return dateB.getTime() - dateA.getTime(); }); const { + advert, heroSection, infrastructureSection, featureSection, @@ -46,6 +47,7 @@ console.log(homepage); --- @@ -80,7 +82,7 @@ console.log(homepage); Date: Tue, 19 Mar 2024 23:04:39 +0530 Subject: [PATCH 2/2] Update index.astro, index.md, and hamburger-menu.tsx --- src/components/header/hamburger-menu.tsx | 4 +- .../gpu-resource-enablement/index.md | 2 +- .../ip-leases-provider-enablement/index.md | 6 +- .../additional-k8s-resources/index.md | 60 ++++--- .../index.md | 162 ++++++++++++++---- .../providers/provider-faq-and-guide/index.md | 2 +- src/content/Homepage/index.md | 6 +- src/pages/index.astro | 4 +- 8 files changed, 179 insertions(+), 67 deletions(-) diff --git a/src/components/header/hamburger-menu.tsx b/src/components/header/hamburger-menu.tsx index 49a2c8a3..6d2d85ac 100644 --- a/src/components/header/hamburger-menu.tsx +++ b/src/components/header/hamburger-menu.tsx @@ -90,13 +90,13 @@ const Panel = ({ currentPath, open }: { currentPath: string; open: any }) => {
- Get Started - + */} Open main menu diff --git a/src/content/Docs/providers/build-a-cloud-provider/gpu-resource-enablement/index.md b/src/content/Docs/providers/build-a-cloud-provider/gpu-resource-enablement/index.md index 70472ab0..87617042 100644 --- a/src/content/Docs/providers/build-a-cloud-provider/gpu-resource-enablement/index.md +++ b/src/content/Docs/providers/build-a-cloud-provider/gpu-resource-enablement/index.md @@ -13,7 +13,7 @@ The steps involved in enabling your Akash Provider to host GPU resources are cov - [Apply NVIDIA Runtime Engine](#apply-nvidia-runtime-engine) - [Update Akash Provider](#update-akash-provider) - [GPU Test Deployments](#gpu-test-deployments) -- [GPU Provider Troubleshooting](../../../../providers/akash-provider-troubleshooting/gpu-provider-troubleshooting) +- [GPU Provider Troubleshooting](/docs/providers/provider-faq-and-guide/#gpu-provider-troubleshooting) ## GPU Provider Configuration diff --git a/src/content/Docs/providers/build-a-cloud-provider/ip-leases-provider-enablement/index.md b/src/content/Docs/providers/build-a-cloud-provider/ip-leases-provider-enablement/index.md index 81fa48c7..c6a149eb 100644 --- a/src/content/Docs/providers/build-a-cloud-provider/ip-leases-provider-enablement/index.md +++ b/src/content/Docs/providers/build-a-cloud-provider/ip-leases-provider-enablement/index.md @@ -41,9 +41,9 @@ In this guide we present paths to install MetalLB both via Helm Charts and Kubes Sections within this guide: -- [New MetalLB Deployment via Helm](metallb-install.md#option-1-deploy-metallb-with-helm) -- [New MetalLB Deployment via Kubespray](metallb-install.md#option-2-deploy-metallb-using-kubespray) -- [Migration of MetalLB Version 0.12.X to 0.13.x](metallb-install.md#migrating-metallb-0.12.x-to-0.13.x) +- [New MetalLB Deployment via Helm](#option-1-deploy-metallb-with-helm) +- [New MetalLB Deployment via Kubespray](#option-2-deploy-metallb-using-kubespray) +- [Migration of MetalLB Version 0.12.X to 0.13.x](#migrating-metallb-012x-to-013x) ### Option 1: Deploy MetalLB with Helm diff --git a/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/additional-k8s-resources/index.md b/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/additional-k8s-resources/index.md index e1339a1c..8e2f4f02 100644 --- a/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/additional-k8s-resources/index.md +++ b/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/additional-k8s-resources/index.md @@ -12,31 +12,45 @@ linkTitle: "Additional K8s Resources" The Kubespray hosts.yaml inventory file is composed of 3 groups: -* **kube\_node**: list of Kubernetes nodes where the pods will run. -* **kube\_control\_plane**: list of servers where Kubernetes control plane components (apiserver, scheduler, controller) will run. -* **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose. +- **kube_node**: list of Kubernetes nodes where the pods will run. +- **kube_control_plane**: list of servers where Kubernetes control plane components (apiserver, scheduler, controller) will run. +- **etcd**: list of servers to compose the etcd server. You should have at least 3 servers for failover purpose. Please following these links for YAML examples and depending on your preferred topology: -* [All-In-One Node](kubespray-hosts.yaml-examples.md#all-in-one-node) -* [One Control Plane Node with Multiple Worker Nodes](kubespray-hosts.yaml-examples.md#one-control-plane-node-with-multiple-worker-nodes) -* [Multiple Control Plane Nodes with Multiple Work Nodes](kubespray-hosts.yaml-examples.md#multiple-control-plane-nodes-with-multiple-work-nodes) +- [Kubespray Hosts.Yaml Examples](#kubespray-hostsyaml-examples) + - [Hosts.Yaml Overview](#hostsyaml-overview) +- [All-In-One Node](#all-in-one-node) + - [Topology](#topology) + - [ Pros](#-pros) + - [Cons](#cons) + - [Example Hosts.yaml File](#example-hostsyaml-file) +- [One Control Plane Node with Multiple Worker Nodes](#one-control-plane-node-with-multiple-worker-nodes) + - [Topology](#topology-1) + - [Pros](#pros) + - [Cons](#cons-1) + - [Example Hosts.yaml File](#example-hostsyaml-file-1) +- [Multiple Control Plane Nodes with Multiple Work Nodes](#multiple-control-plane-nodes-with-multiple-work-nodes) + - [Topology](#topology-2) + - [Pros](#pros-1) + - [Cons](#cons-2) + - [Example Hosts.yaml File](#example-hostsyaml-file-2) ## All-In-One Node ### Topology -* node1 - is a single control plane + etcd node -* node1 - is also running the pods +- node1 - is a single control plane + etcd node +- node1 - is also running the pods ### Pros -* Easy to manage +- Easy to manage ### Cons -* Single point of failure for K8s/etcd/pods; -* Thinner security barrier since pods are running on control plane / etcd nodes; +- Single point of failure for K8s/etcd/pods; +- Thinner security barrier since pods are running on control plane / etcd nodes; ### Example Hosts.yaml File @@ -63,17 +77,17 @@ Please following these links for YAML examples and depending on your preferred t ### Topology -* node1 - single control plane + etcd node -* node2..N - kube nodes where the pods will run +- node1 - single control plane + etcd node +- node2..N - kube nodes where the pods will run ### Pros -* Better security barrier since pods aren't running on control plane / etcd nodes -* Can scale by adding either more control plane nodes or worker nodes +- Better security barrier since pods aren't running on control plane / etcd nodes +- Can scale by adding either more control plane nodes or worker nodes ### Cons -* Single point of failure only for K8s/etcd but not the pods +- Single point of failure only for K8s/etcd but not the pods ### Example Hosts.yaml File @@ -101,18 +115,18 @@ Please following these links for YAML examples and depending on your preferred t ### Topology -* Nodes 1.-3 - the control plane + etcd nodes; (This makes K8s High Available) -* Node 4.-N - the kube nodes on which the Pods will run +- Nodes 1.-3 - the control plane + etcd nodes; (This makes K8s High Available) +- Node 4.-N - the kube nodes on which the Pods will run ### Pros -* Highly available control plane / etcd -* Better security barrier since pods aren't running on control plane / etcd nodes -* Can scale by adding either more control plane nodes or worker nodes +- Highly available control plane / etcd +- Better security barrier since pods aren't running on control plane / etcd nodes +- Can scale by adding either more control plane nodes or worker nodes ### Cons -* More complex environment makes its configuration & management more difficult +- More complex environment makes its configuration & management more difficult ### Example Hosts.yaml File @@ -139,4 +153,4 @@ Please following these links for YAML examples and depending on your preferred t kube_node: calico_rr: hosts: {} -``` \ No newline at end of file +``` diff --git a/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/kubernetes-cluster-for-akash-providers/index.md b/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/kubernetes-cluster-for-akash-providers/index.md index 5751896d..9c475088 100644 --- a/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/kubernetes-cluster-for-akash-providers/index.md +++ b/src/content/Docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/kubernetes-cluster-for-akash-providers/index.md @@ -19,21 +19,21 @@ The Kubernetes instructions in this guide are intended for audiences that have t - **Server Administration Skills** - necessary for setting up servers/network making up the Kubernetes cluster - **Kubernetes Experience** - a base level of Kubernetes administration is highly recommended -> Please consider using the [Praetor](../../community-solutions/praetor.md) application to build an Akash Provider for small and medium sized environments which require little customization. +> Please consider using the [Praetor](/providers) application to build an Akash Provider for small and medium sized environments which require little customization. ## Guide Sections -- [Clone the Kubespray Project](#step-1---clone-the-kubespray-project) +- [Clone the Kubespray Project](#clone-the-kubespray-project) - [Install Ansible](#step-2---install-ansible) - [Ansible Access to Kubernetes Cluster](#step-3---ansible-access-to-kubernetes-cluster) - [Ansible Inventory](#step-4---ansible-inventory) -- [Additional Verifications](#step-5---enable-gvisor) +- [Additional Verifications](#step-5---additional-verificationsconfig) - [DNS Configuration](#step-6---dns-configuration) -- [Provider Ephemeral Storage Config](#step-6---provider-ephemeral-storage-config) -- [Create Kubernetes Cluster](#step-6---create-kubernetes-cluster) -- [Confirm Kubernetes Cluster](#step-7---confirm-kubernetes-cluster) -- [Custom Kernel Parameters](#step-10---custom-kernel-parameters) -- [Review Firewall Policies](#step-9---review-firewall-policies) +- [Provider Ephemeral Storage Config](#step-7---provider-ephemeral-storage-config-optional) +- [Create Kubernetes Cluster](#step-8---create-kubernetes-cluster) +- [Confirm Kubernetes Cluster](#step-9---confirm-kubernetes-cluster) +- [Custom Kernel Parameters](#step-9---custom-kernel-parameters) +- [Review Firewall Policies](#step-10---review-firewall-policies) ## STEP 1 - Clone the Kubespray Project @@ -54,7 +54,7 @@ The recommended minimum number of hosts is four for a production Provider Kubern - We recommend running a single worker node per physical server as CPU is typically the largest resource bottleneck. The use of a single worker node allows larger workloads to be deployed on your provider. -- If you intended to build a provider with persistent storage please refer to host storage requirements detailed [here](./docs/providers/build-a-cloud-provider/helm-based-provider-persistent-storage-enablement/). +- If you intended to build a provider with persistent storage please refer to host storage requirements detailed [here](/docs/providers/build-a-cloud-provider/helm-based-provider-persistent-storage-enablement/). ### Kubernetes Cluster Software/Hardware Requirements and Recommendations @@ -105,7 +105,6 @@ cd ~ git clone -b v2.24.1 --depth=1 https://github.com/kubernetes-sigs/kubespray.git cd kubespray - ``` ### Cluster Updates @@ -268,15 +267,15 @@ DEBUG: adding host node4 to group kube_node - Update the kube_control_plane category if needed with full list of hosts that should be master nodes - Ensure you have either 1 or 3 Kubernetes control plane nodes under `kube_control_plane`. If 2 are listed, change that to 1 or 3, depending on whether you want Kubernetes be Highly Available. - Ensure you have only control plane nodes listed under `etcd`. If you would like to review additional best practices for etcd, please review this [guide](https://rafay.co/the-kubernetes-current/etcd-kubernetes-what-you-should-know/). -- For additional details regarding `hosts.yaml` best practices and example configurations, review this [guide](additional-k8s-resources/kubespray-hosts.yaml-examples.md). +- For additional details regarding `hosts.yaml` best practices and example configurations, review this [guide](/docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/additional-k8s-resources/#kubespray-hostsyaml-examples). ``` vi ~/kubespray/inventory/akash/hosts.yaml ``` -#### **Example hosts.yaml File** +##### **Example hosts.yaml File** -- Additional hosts.yaml examples, based on different Kubernetes cluster topologies, may be found [here](additional-k8s-resources/kubespray-hosts.yaml-examples.md) +- Additional hosts.yaml examples, based on different Kubernetes cluster topologies, may be found [here](/docs/providers/build-a-cloud-provider/kubernetes-cluster-for-akash-providers/additional-k8s-resources/#kubespray-hostsyaml-examples) ``` all: @@ -420,7 +419,7 @@ container_manager: containerd > Skip if you are not using gVisor -If you are using a newer systemd version, your container will get stuck in ContainerCreating state on your provider with gVisor enabled. Please reference [this document](/docs/providers/build-a-cloud-provider/gvisor-issue---no-system-cgroup-v2-support/) for details regarding this issue and the recommended workaround. +If you are using a newer systemd version, your container will get stuck in ContainerCreating state on your provider with gVisor enabled. Please reference [this document](/docs/providers/build-a-cloud-provider/gvisor-issue-no-system-cgroup-v2-support/) for details regarding this issue and the recommended workaround. ## STEP 6 - DNS Configuration @@ -680,7 +679,7 @@ systemctl start kubelet systemctl enable kubelet ``` -#### Verify +##### Verify ``` journalctl -u kubelet -f @@ -721,7 +720,7 @@ With inventory in place we are ready to build the Kubernetes cluster via Ansible - Note - the cluster creation may take several minutes to complete - If the Kubespray process fails or is interpreted, run the Ansible playbook again and it will complete any incomplete steps on the subsequent run -> _**NOTE**_ - if you intend to enable GPU resources on your provider - consider completing this [step](/docs/other-resources/experimental/) now to avoid having to run Kubespray on multiple occasions. Only the `NVIDIA Runtime Configuration` section of the `GPU Resource Enablement` guide should be completed at this time and then return to this guide/step. +> _**NOTE**_ - if you intend to enable GPU resources on your provider - consider completing this [step](/docs/providers/build-a-cloud-provider/gpu-resource-enablement/#gpu-provider-configuration) now to avoid having to run Kubespray on multiple occasions. Only the `NVIDIA Runtime Configuration` section of the `GPU Resource Enablement` guide should be completed at this time and then return to this guide/step. ``` cd ~/kubespray @@ -729,39 +728,105 @@ cd ~/kubespray ansible-playbook -i inventory/akash/hosts.yaml -b -v --private-key=~/.ssh/id_rsa cluster.yml ``` -## STEP 8 - Review Firewall Policies +## STEP 9 - Confirm Kubernetes Cluster -If local firewall instances are running on Kubernetes control-plane and worker nodes, add the following policies. +A couple of quick Kubernetes cluster checks are in order before moving into next steps. -### Kubernetes Port List +### SSH into Kubernetes Master Node -In this step we will cover common Kubernetes ports that need to be opened for cross server communications. For an exhaustive and constantly updated reference, please use the following list published by the Kubernetes developers. +- The verifications in this section must be completed on a master node with kubectl access to the cluster. -- [Exhaustive list of Kubernetes Ports](https://kubernetes.io/docs/reference/ports-and-protocols/) +### Confirm Kubernetes Nodes -### **Etcd Key Value Store Policies** +``` +kubectl get nodes +``` -Ensure the following ports are open in between all Kubernetes etcd instances: +#### **Example output from a healthy Kubernetes cluster** ``` -- 2379/tcp for client requests; (Kubernetes control plane to etcd) -- 2380/tcp for peer communication; (etcd to etcd communication) +root@node1:/home/ubuntu# kubectl get nodes + +NAME STATUS ROLES AGE VERSION +node1 Ready control-plane,master 5m48s v1.22.5 +node2 Ready control-plane,master 5m22s v1.22.5 +node3 Ready control-plane,master 5m12s v1.22.5 +node4 Ready 4m7s v1.22.5 ``` -### **API Server Policies** +### **Confirm Kubernetes Pods** -Ensure the following ports are open in between all Kubernetes API server instances: +``` +kubectl get pods -n kube-system +``` + +#### Example output of the pods that are the brains of the cluster ``` -- 6443/tcp - Kubernetes API server +root@node1:/home/ubuntu# kubectl get pods -n kube-system + +NAME READY STATUS RESTARTS AGE +calico-kube-controllers-5788f6558-mzm64 1/1 Running 1 (4m53s ago) 4m54s +calico-node-2g4pr 1/1 Running 0 5m29s +calico-node-6hrj4 1/1 Running 0 5m29s +calico-node-9dqc4 1/1 Running 0 5m29s +calico-node-zt8ls 1/1 Running 0 5m29s +coredns-8474476ff8-9sgm5 1/1 Running 0 4m32s +coredns-8474476ff8-x67xd 1/1 Running 0 4m27s +dns-autoscaler-5ffdc7f89d-lnpmm 1/1 Running 0 4m28s +kube-apiserver-node1 1/1 Running 1 7m30s +kube-apiserver-node2 1/1 Running 1 7m13s +kube-apiserver-node3 1/1 Running 1 7m3s +kube-controller-manager-node1 1/1 Running 1 7m30s +kube-controller-manager-node2 1/1 Running 1 7m13s +kube-controller-manager-node3 1/1 Running 1 7m3s +kube-proxy-75s7d 1/1 Running 0 5m56s +kube-proxy-kpxtm 1/1 Running 0 5m56s +kube-proxy-stgwd 1/1 Running 0 5m56s +kube-proxy-vndvs 1/1 Running 0 5m56s +kube-scheduler-node1 1/1 Running 1 7m37s +kube-scheduler-node2 1/1 Running 1 7m13s +kube-scheduler-node3 1/1 Running 1 7m3s +nginx-proxy-node4 1/1 Running 0 5m58s +nodelocaldns-7znkj 1/1 Running 0 4m28s +nodelocaldns-g8dqm 1/1 Running 0 4m27s +nodelocaldns-gf58m 1/1 Running 0 4m28s +nodelocaldns-n88fj 1/1 Running 0 4m28s ``` -### Worker Node Policies +### Confirm DNS -Ensure the following ports are open in between all Kubernetes worker nodes: +#### Verify CoreDNS Config + +> This is to verify that Kubespray properly set the expected upstream servers in the DNS Configuration previous step ``` -- 10250/tcp - Kubelet API server; (Kubernetes control plane to kubelet) +kubectl -n kube-system get cm coredns -o yaml | grep forward +``` + +#### Verify All DNS Related Pods Are in a Running State + +``` +kubectl -n kube-system get pods -l k8s-app=kube-dns +kubectl -n kube-system get pods -l k8s-app=nodelocaldns +``` + +With kubespray version >= `2.22.x`: + +``` +kubectl -n kube-system get pods -l k8s-app=node-local-dns +``` + +### Verify etcd Status and Health + +> Commands should be run on the control plane node to ensure health of the Kubernetes `etcd` database + +``` +export $(grep -v '^#' /etc/etcd.env | xargs -d '\n') +etcdctl -w table member list +etcdctl endpoint health --cluster -w table +etcdctl endpoint status --cluster -w table +etcdctl check perf ``` ## STEP 9 - Custom Kernel Parameters @@ -789,3 +854,38 @@ EOF ``` sysctl -p /etc/sysctl.d/90-akash.conf ``` + +## STEP 10 - Review Firewall Policies + +If local firewall instances are running on Kubernetes control-plane and worker nodes, add the following policies. + +### Kubernetes Port List + +In this step we will cover common Kubernetes ports that need to be opened for cross server communications. For an exhaustive and constantly updated reference, please use the following list published by the Kubernetes developers. + +- [Exhaustive list of Kubernetes Ports](https://kubernetes.io/docs/reference/ports-and-protocols/) + +### **Etcd Key Value Store Policies** + +Ensure the following ports are open in between all Kubernetes etcd instances: + +``` +- 2379/tcp for client requests; (Kubernetes control plane to etcd) +- 2380/tcp for peer communication; (etcd to etcd communication) +``` + +### **API Server Policies** + +Ensure the following ports are open in between all Kubernetes API server instances: + +``` +- 6443/tcp - Kubernetes API server +``` + +### Worker Node Policies + +Ensure the following ports are open in between all Kubernetes worker nodes: + +``` +- 10250/tcp - Kubelet API server; (Kubernetes control plane to kubelet) +``` diff --git a/src/content/Docs/providers/provider-faq-and-guide/index.md b/src/content/Docs/providers/provider-faq-and-guide/index.md index 28da5670..b99b1220 100644 --- a/src/content/Docs/providers/provider-faq-and-guide/index.md +++ b/src/content/Docs/providers/provider-faq-and-guide/index.md @@ -1443,7 +1443,7 @@ Should your Akash Provider encounter issues during the installation process or i - [Basic GPU Resource Verifications](#basic-gpu-resource-verifications) - [Examine Linux Kernel Logs for GPU Resource Errors and Mismatches](#examine-linux-kernel-logs-for-gpu-resource-errors-and-mismatches) -- [Ensure Correct Version/Presence of NVIDIA Device Plugin](#ensure-correct-version-presence-of-nvidia-device-plugin) +- [Ensure Correct Version/Presence of NVIDIA Device Plugin](#ensure-correct-versionpresence-of-nvidia-device-plugin) - [CUDA Drivers Fabric Manager](#cuda-drivers-fabric-manager) ### Basic GPU Resource Verifications diff --git a/src/content/Homepage/index.md b/src/content/Homepage/index.md index 2f0665f0..4ff09af2 100644 --- a/src/content/Homepage/index.md +++ b/src/content/Homepage/index.md @@ -12,7 +12,7 @@ heroSection: description: "Browse a wide range of cloud resources and live network pricing" buttons: - label: "Explore pricing" - link: "https://akash.network/about/pricing/custom/" + link: "/about/pricing/custom/" type: secondary - label: "View GPU availability" link: "https://stats.akash.network/" @@ -22,14 +22,14 @@ heroSection: description: "Become an Akash Provider by offering your hardware on the network and earn when users deploy" buttons: - label: "Become a Provider" - link: "https://akash.network/providers" + link: "/providers" type: secondary - title: "Deploy with Akash Console" description: "Get started with the network's user-friendly deployment console" buttons: - label: "Deploy now" - link: "https://console.akash.network/" + link: "/deploy" type: secondary # infrastructure section content diff --git a/src/pages/index.astro b/src/pages/index.astro index 43fe0030..e6a53feb 100644 --- a/src/pages/index.astro +++ b/src/pages/index.astro @@ -42,8 +42,6 @@ const { getStartedSection, testimonialsSection, } = homepage.data; - -console.log(homepage); --- - +