diff --git a/docs/src/assets/arch.dsl b/docs/src/assets/arch.dsl new file mode 100644 index 000000000..97ec4aa45 --- /dev/null +++ b/docs/src/assets/arch.dsl @@ -0,0 +1,173 @@ +!constant c4 "c4.dsl" + +workspace "Canonical K8s Workspace" { + model { + + admin = person "K8s Admin" "Responsible for the K8s cluster, has elevated permissions" + user = person "K8s User" "Interact with the workloads hosted in K8s" + charm = softwareSystem "Charm K8s" "Orchestrating the lifecycle management of K8s" + + external_lb = softwareSystem "Load Balancer" "External LB, offered by the substrate (cloud)" "Extern" + storage = softwareSystem "Storage" "External storage, offered by the substrate (cloud)" "Extern" + iam = softwareSystem "Identity management system" "External identity system, offered by the substrate (cloud)" "Extern" + external_datastore = softwareSystem "External datastore" "postgress or etcd" "Extern" + + k8s_snap = softwareSystem "K8s Snap Distribution" "The Kubernetes distribution in a snap" { + + kubectl = container "Kubectl" "kubectl client for accessing the cluster" + + kubernetes = container "Kubernetes Services" "API server, kubelet, kube-proxy, scheduler, kube-controller" { + systemd = component "systemd daemons" "Daemons holding the k8s services" + apiserver = component "API server" + kubelet = component "kubelet" + kube_proxy = component "kube-proxy" + scheduler = component "scheduler" + kube_controller = component "kube-controller" + network = component "Network CNI" "The network implementation of K8s (from Cilium)" + storage_provider = component "Local storage provider" "Simple storage for workloads" + ingress = component "Ingress" "Ingress for workloads (from Cilium)" + gw = component "Gateway" "Gateway API for workloads (from Cilium)" + dns = component "DNS" "Internal DNS" + metrics_server = component "Metrics server" "Keep track of cluster metrics" + loadbalancer = component "Load-balancer" "The load balancer (from Cilium)" + } + + rt = container "Runtime" "Containerd and runc" + + k8sd = container "K8sd" "Deamon implementing the functionality available in the k8s snap" { + cli = component "CLI" "The CLI the offered" "CLI" + api = component "API via HTTP" "The API interface offered" "REST" + cluster_manager = component "CLuster management" "Management of the cluster with the help of MicroCluster" + } + + state = container "State" "Datastores holding the cluster state" { + k8sd_db = component "k8sd-dqlite" "MicroCluster DB" + k8s_dqlite = component "k8s-dqlite" "Datastore holding the K8s cluster state" + } + } + + admin -> cli "Administers the cluster" + admin -> kubectl "Uses to manage the cluster" + user -> loadbalancer "Interact with workloads hosted in K8s" + charm -> api "Orchestrates the lifecycle management of K8s" + + k8s_snap -> storage "Hosted workloads use storage" + k8s_snap -> iam "Users identity is retrieved" + + k8s_dqlite -> external_datastore "May be replaced by" "Any" "Runtime" + loadbalancer -> external_lb "May be replaced by" "Any" "Runtime" + + cluster_manager -> systemd "Configures" + + systemd -> apiserver "Is a service" + systemd -> kubelet "Is a service" + systemd -> kube_proxy "Is a service" + systemd -> kube_controller "Is a service" + systemd -> scheduler "Is a service" + + network -> apiserver "Keeps state in" + dns -> apiserver "Keeps state in" + apiserver -> k8s_dqlite "Uses by default" + + network -> ingress "May provide" "HTTP/HTTPS" "Runtime" + network -> gw "May provide" "HTTP/HTTPS" "Runtime" + network -> loadbalancer "May provide" "HTTP/HTTPS" "Runtime" + + cluster_manager -> k8sd_db "Keeps state in" + + kubectl -> apiserver "Interacts" + api -> systemd "Configures" + api -> rt "Configures" + api -> cluster_manager "Uses" + + cli -> api "CLI is based on the API primitives" + + } + views { + + systemLandscape Overview "K8s Snap Overview" { + include * + autoLayout + } + + container k8s_snap { + include * + autoLayout + title "K8s Snap Context View" + } + + component state { + include * + autoLayout + title "Datastores" + } + + component k8sd { + include * + autoLayout + title "k8sd" + } + + component kubernetes { + include * + autoLayout + title "Kubernetes services" + } + + styles { + element "Person" { + background #08427b + color #ffffff + fontSize 22 + shape Person + } + + element "Software System" { + background #1168bd + color #ffffff + } + element "Structurizr" { + background #77FF44 + color #000000 + } + element "Container" { + background #438dd5 + color #ffffff + } + element "Component" { + background #85bbf0 + color #000000 + } + element "BuiltIn" { + background #1988f6 + color #FFFFFF + } + element "Extern" { + background #dddddd + color #000000 + } + + element "Extension" { + background #FFdd88 + color #000000 + } + + element "File" { + shape Folder + background #448704 + color #ffffff + } + + relationship "Relationship" { + dashed false + } + + relationship "Runtime" { + dashed true + color #0000FF + } + + } + } + +} \ No newline at end of file diff --git a/docs/src/assets/k8s-container.puml b/docs/src/assets/k8s-container.puml index 1f287a5bd..417d29342 100644 --- a/docs/src/assets/k8s-container.puml +++ b/docs/src/assets/k8s-container.puml @@ -1,6 +1,6 @@ @startuml set separator none -title K8s Snap Container View +title K8s Snap Context View top to bottom direction @@ -9,33 +9,30 @@ top to bottom direction !include Person(K8sAdmin, "K8s Admin", $descr="Responsible for the K8s cluster, has elevated permissions", $tags="", $link="") -Person(K8sUser, "K8s User", $descr="Interacts with the workloads hosted in K8s", $tags="", $link="") -System(K8scharm, "K8s charm", $descr="Orchestrating the lifecycle management of K8s", $tags="", $link="") +Person(K8sUser, "K8s User", $descr="Interact with the workloads hosted in K8s", $tags="", $link="") +System(CharmK8s, "Charm K8s", $descr="Orchestrating the lifecycle management of K8s", $tags="", $link="") System(LoadBalancer, "Load Balancer", $descr="External LB, offered by the substrate (cloud)", $tags="", $link="") System(Externaldatastore, "External datastore", $descr="postgress or etcd", $tags="", $link="") System_Boundary("K8sSnapDistribution_boundary", "K8s Snap Distribution", $tags="") { Container(K8sSnapDistribution.KubernetesServices, "Kubernetes Services", $techn="", $descr="API server, kubelet, kube-proxy, scheduler, kube-controller", $tags="", $link="") Container(K8sSnapDistribution.Runtime, "Runtime", $techn="", $descr="Containerd and runc", $tags="", $link="") - Container(K8sSnapDistribution.Components, "Components", $techn="", $descr="Core components for the k8s distribution", $tags="", $link="") - Container(K8sSnapDistribution.K8sd, "K8sd", $techn="", $descr="Daemon implementing the functionality available in the k8s snap", $tags="", $link="") + Container(K8sSnapDistribution.K8sd, "K8sd", $techn="", $descr="Deamon implementing the functionality available in the k8s snap", $tags="", $link="") Container(K8sSnapDistribution.State, "State", $techn="", $descr="Datastores holding the cluster state", $tags="", $link="") Container(K8sSnapDistribution.Kubectl, "Kubectl", $techn="", $descr="kubectl client for accessing the cluster", $tags="", $link="") } -Rel(K8sAdmin, K8sSnapDistribution.K8sd, "Creates and administers the cluster", $techn="", $tags="", $link="") +Rel(K8sAdmin, K8sSnapDistribution.K8sd, "Sets up and configured the cluster", $techn="", $tags="", $link="") Rel(K8sAdmin, K8sSnapDistribution.Kubectl, "Uses to manage the cluster", $techn="", $tags="", $link="") -Rel(K8sUser, K8sSnapDistribution.Components, "Interacts with workloads hosted in K8s", $techn="", $tags="", $link="") -Rel(K8scharm, K8sSnapDistribution.K8sd, "Orchestrates the lifecycle management of K8s", $techn="", $tags="", $link="") +Rel(K8sUser, K8sSnapDistribution.KubernetesServices, "Interacts with workloads hosted in K8s", $techn="", $tags="", $link="") +Rel(CharmK8s, K8sSnapDistribution.K8sd, "Orchestrates the lifecycle management of K8s", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.State, Externaldatastore, "May be replaced by", $techn="Any", $tags="", $link="") -Rel(K8sSnapDistribution.Components, LoadBalancer, "May be replaced by", $techn="Any", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd, K8sSnapDistribution.Components, "Handles", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.KubernetesServices, LoadBalancer, "May be replaced by", $techn="Any", $tags="", $link="") Rel(K8sSnapDistribution.K8sd, K8sSnapDistribution.KubernetesServices, "Configures", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.Components, K8sSnapDistribution.KubernetesServices, "Keeps state in", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices, K8sSnapDistribution.State, "Uses by default", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.K8sd, K8sSnapDistribution.State, "Keeps state in", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.Kubectl, K8sSnapDistribution.KubernetesServices, "Interacts", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.K8sd, K8sSnapDistribution.Runtime, "Configures", $techn="", $tags="", $link="") SHOW_LEGEND(true) -@enduml +@enduml \ No newline at end of file diff --git a/docs/src/assets/k8s-services.puml b/docs/src/assets/k8s-services.puml index 598fcdff2..4086dc608 100644 --- a/docs/src/assets/k8s-services.puml +++ b/docs/src/assets/k8s-services.puml @@ -9,31 +9,44 @@ top to bottom direction !include !include -Container(K8sSnapDistribution.K8sd, "K8sd", $techn="", $descr="Daemon implementing the functionality available in the k8s snap", $tags="", $link="") -Container(K8sSnapDistribution.Components, "Components", $techn="", $descr="Core components for the k8s distribution", $tags="", $link="") -Container(K8sSnapDistribution.Kubectl, "Kubectl", $techn="", $descr="kubectl client for accessing the cluster", $tags="", $link="") +Container(K8sSnapDistribution.K8sd, "K8sd", $techn="", $descr="Deamon implementing the functionality available in the k8s snap", $tags="", $link="") Container(K8sSnapDistribution.State, "State", $techn="", $descr="Datastores holding the cluster state", $tags="", $link="") +Person(K8sUser, "K8s User", $descr="Interact with the workloads hosted in K8s", $tags="", $link="") +System(LoadBalancer, "Load Balancer", $descr="External LB, offered by the substrate (cloud)", $tags="", $link="") +Container(K8sSnapDistribution.Kubectl, "Kubectl", $techn="", $descr="kubectl client for accessing the cluster", $tags="", $link="") Container_Boundary("K8sSnapDistribution.KubernetesServices_boundary", "Kubernetes Services", $tags="") { - Component(K8sSnapDistribution.KubernetesServices.systemddaemons, "systemd daemons", $techn="", $descr="Daemons running the k8s services", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.systemddaemons, "systemd daemons", $techn="", $descr="Daemons holding the k8s services", $tags="", $link="") Component(K8sSnapDistribution.KubernetesServices.APIserver, "API server", $techn="", $descr="", $tags="", $link="") Component(K8sSnapDistribution.KubernetesServices.kubelet, "kubelet", $techn="", $descr="", $tags="", $link="") Component(K8sSnapDistribution.KubernetesServices.kubeproxy, "kube-proxy", $techn="", $descr="", $tags="", $link="") Component(K8sSnapDistribution.KubernetesServices.scheduler, "scheduler", $techn="", $descr="", $tags="", $link="") Component(K8sSnapDistribution.KubernetesServices.kubecontroller, "kube-controller", $techn="", $descr="", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.NetworkCNI, "Network CNI", $techn="", $descr="The network implementation of K8s (from Cilium)", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.Localstorageprovider, "Local storage provider", $techn="", $descr="Simple storage for workloads", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.Ingress, "Ingress", $techn="", $descr="Ingress for workloads (from Cilium)", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.Gateway, "Gateway", $techn="", $descr="Gateway API for workloads (from Cilium)", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.DNS, "DNS", $techn="", $descr="Internal DNS", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.Metricsserver, "Metrics server", $techn="", $descr="Keep track of cluster metrics", $tags="", $link="") + Component(K8sSnapDistribution.KubernetesServices.Loadbalancer, "Load-balancer", $techn="", $descr="The load balancer (from Cilium)", $tags="", $link="") } -Rel(K8sSnapDistribution.K8sd, K8sSnapDistribution.Components, "Handles", $techn="", $tags="", $link="") +Rel(K8sUser, K8sSnapDistribution.KubernetesServices.Loadbalancer, "Interacts with workloads hosted in K8s", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.KubernetesServices.Loadbalancer, LoadBalancer, "May be replaced by", $techn="Any", $tags="", $link="") Rel(K8sSnapDistribution.K8sd, K8sSnapDistribution.KubernetesServices.systemddaemons, "Configures", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices.systemddaemons, K8sSnapDistribution.KubernetesServices.APIserver, "Is a service", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices.systemddaemons, K8sSnapDistribution.KubernetesServices.kubelet, "Is a service", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices.systemddaemons, K8sSnapDistribution.KubernetesServices.kubeproxy, "Is a service", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices.systemddaemons, K8sSnapDistribution.KubernetesServices.kubecontroller, "Is a service", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices.systemddaemons, K8sSnapDistribution.KubernetesServices.scheduler, "Is a service", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.Components, K8sSnapDistribution.KubernetesServices.APIserver, "Keeps state in", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.KubernetesServices.NetworkCNI, K8sSnapDistribution.KubernetesServices.APIserver, "Keeps state in", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.KubernetesServices.DNS, K8sSnapDistribution.KubernetesServices.APIserver, "Keeps state in", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices.APIserver, K8sSnapDistribution.State, "Uses by default", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.KubernetesServices.NetworkCNI, K8sSnapDistribution.KubernetesServices.Ingress, "May provide", $techn="HTTP/HTTPS", $tags="", $link="") +Rel(K8sSnapDistribution.KubernetesServices.NetworkCNI, K8sSnapDistribution.KubernetesServices.Gateway, "May provide", $techn="HTTP/HTTPS", $tags="", $link="") +Rel(K8sSnapDistribution.KubernetesServices.NetworkCNI, K8sSnapDistribution.KubernetesServices.Loadbalancer, "May provide", $techn="HTTP/HTTPS", $tags="", $link="") Rel(K8sSnapDistribution.K8sd, K8sSnapDistribution.State, "Keeps state in", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.Kubectl, K8sSnapDistribution.KubernetesServices.APIserver, "Interacts", $techn="", $tags="", $link="") SHOW_LEGEND(true) -@enduml +@enduml \ No newline at end of file diff --git a/docs/src/assets/k8sd-component.puml b/docs/src/assets/k8sd-component.puml index 010d75a29..f95cd278c 100644 --- a/docs/src/assets/k8sd-component.puml +++ b/docs/src/assets/k8sd-component.puml @@ -10,31 +10,26 @@ top to bottom direction !include Person(K8sAdmin, "K8s Admin", $descr="Responsible for the K8s cluster, has elevated permissions", $tags="", $link="") -System(K8scharm, "K8s charm", $descr="Orchestrating the lifecycle management of K8s", $tags="", $link="") Container(K8sSnapDistribution.Runtime, "Runtime", $techn="", $descr="Containerd and runc", $tags="", $link="") -Container(K8sSnapDistribution.Components, "Components", $techn="", $descr="Core components for the k8s distribution", $tags="", $link="") +System(CharmK8s, "Charm K8s", $descr="Orchestrating the lifecycle management of K8s", $tags="", $link="") Container(K8sSnapDistribution.State, "State", $techn="", $descr="Datastores holding the cluster state", $tags="", $link="") Container(K8sSnapDistribution.KubernetesServices, "Kubernetes Services", $techn="", $descr="API server, kubelet, kube-proxy, scheduler, kube-controller", $tags="", $link="") Container_Boundary("K8sSnapDistribution.K8sd_boundary", "K8sd", $tags="") { - Component(K8sSnapDistribution.K8sd.CLI, "CLI", $techn="CLI", $descr="The CLI offered", $tags="", $link="") - Component(K8sSnapDistribution.K8sd.RESTAPI, "REST API", $techn="REST", $descr="The REST interface offered", $tags="", $link="") - Component(K8sSnapDistribution.K8sd.Clustermanagement, "Cluster management", $techn="", $descr="Management of the cluster with the help of MicroCluster", $tags="", $link="") - Component(K8sSnapDistribution.K8sd.Componentmanagement, "Component management", $techn="", $descr="Management of the CNI, DNS, storage, ingress, metrics", $tags="", $link="") + Component(K8sSnapDistribution.K8sd.CLI, "CLI", $techn="CLI", $descr="The CLI the offered", $tags="", $link="") + Component(K8sSnapDistribution.K8sd.APIviaHTTP, "API via HTTP", $techn="REST", $descr="The API interface offered", $tags="", $link="") + Component(K8sSnapDistribution.K8sd.CLustermanagement, "CLuster management", $techn="", $descr="Management of the cluster with the help of MicroCluster", $tags="", $link="") } -Rel(K8sAdmin, K8sSnapDistribution.K8sd.CLI, "Sets up and configures the cluster", $techn="", $tags="", $link="") -Rel(K8scharm, K8sSnapDistribution.K8sd.RESTAPI, "Orchestrates the lifecycle management of K8s", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.Componentmanagement, K8sSnapDistribution.Components, "Handles", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.Clustermanagement, K8sSnapDistribution.KubernetesServices, "Configures", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.Components, K8sSnapDistribution.KubernetesServices, "Keeps state in", $techn="", $tags="", $link="") +Rel(K8sAdmin, K8sSnapDistribution.K8sd.CLI, "Sets up and configured the cluster", $techn="", $tags="", $link="") +Rel(CharmK8s, K8sSnapDistribution.K8sd.APIviaHTTP, "Orchestrates the lifecycle management of K8s", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.K8sd.CLustermanagement, K8sSnapDistribution.KubernetesServices, "Configures", $techn="", $tags="", $link="") Rel(K8sSnapDistribution.KubernetesServices, K8sSnapDistribution.State, "Uses by default", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.Clustermanagement, K8sSnapDistribution.State, "Keeps state in", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.RESTAPI, K8sSnapDistribution.KubernetesServices, "Configures", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.RESTAPI, K8sSnapDistribution.Runtime, "Configures", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.RESTAPI, K8sSnapDistribution.K8sd.Componentmanagement, "Uses", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.RESTAPI, K8sSnapDistribution.K8sd.Clustermanagement, "Uses", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution.K8sd.CLI, K8sSnapDistribution.K8sd.RESTAPI, "CLI is based on the API primitives", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.K8sd.CLustermanagement, K8sSnapDistribution.State, "Keeps state in", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.K8sd.APIviaHTTP, K8sSnapDistribution.KubernetesServices, "Configures", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.K8sd.APIviaHTTP, K8sSnapDistribution.Runtime, "Configures", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.K8sd.APIviaHTTP, K8sSnapDistribution.K8sd.CLustermanagement, "Uses", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution.K8sd.CLI, K8sSnapDistribution.K8sd.APIviaHTTP, "CLI is based on the API primitives", $techn="", $tags="", $link="") SHOW_LEGEND(true) -@enduml +@enduml \ No newline at end of file diff --git a/docs/src/assets/overview.puml b/docs/src/assets/overview.puml index 5e13f0719..120bfb12e 100644 --- a/docs/src/assets/overview.puml +++ b/docs/src/assets/overview.puml @@ -1,6 +1,6 @@ @startuml set separator none -title Canonical Kubernetes system context +title System Landscape top to bottom direction @@ -8,21 +8,21 @@ top to bottom direction !include Person(K8sAdmin, "K8s Admin", $descr="Responsible for the K8s cluster, has elevated permissions", $tags="", $link="") -Person(K8sUser, "K8s User", $descr="Interacts with the workloads hosted in K8s", $tags="", $link="") -System(K8scharm, "K8s charm", $descr="Orchestrating the lifecycle management of K8s", $tags="", $link="") +Person(K8sUser, "K8s User", $descr="Interact with the workloads hosted in K8s", $tags="", $link="") +System(CharmK8s, "Charm K8s", $descr="Orchestrating the lifecycle management of K8s", $tags="", $link="") System(LoadBalancer, "Load Balancer", $descr="External LB, offered by the substrate (cloud)", $tags="", $link="") System(Storage, "Storage", $descr="External storage, offered by the substrate (cloud)", $tags="", $link="") System(Identitymanagementsystem, "Identity management system", $descr="External identity system, offered by the substrate (cloud)", $tags="", $link="") System(Externaldatastore, "External datastore", $descr="postgress or etcd", $tags="", $link="") System(K8sSnapDistribution, "K8s Snap Distribution", $descr="The Kubernetes distribution in a snap", $tags="", $link="") -Rel(K8sAdmin, K8sSnapDistribution, "Administers the cluster", $techn="", $tags="", $link="") +Rel(K8sAdmin, K8sSnapDistribution, "Sets up and configured the cluster", $techn="", $tags="", $link="") Rel(K8sUser, K8sSnapDistribution, "Interacts with workloads hosted in K8s", $techn="", $tags="", $link="") -Rel(K8scharm, K8sSnapDistribution, "Orchestrates the lifecycle management of K8s", $techn="", $tags="", $link="") +Rel(CharmK8s, K8sSnapDistribution, "Orchestrates the lifecycle management of K8s", $techn="", $tags="", $link="") Rel(K8sSnapDistribution, Storage, "Hosted workloads use storage", $techn="", $tags="", $link="") -Rel(K8sSnapDistribution, Identitymanagementsystem, "User's identity is retrieved", $techn="", $tags="", $link="") +Rel(K8sSnapDistribution, Identitymanagementsystem, "Users identity is retrieved", $techn="", $tags="", $link="") Rel(K8sSnapDistribution, Externaldatastore, "May be replaced by", $techn="Any", $tags="", $link="") Rel(K8sSnapDistribution, LoadBalancer, "May be replaced by", $techn="Any", $tags="", $link="") SHOW_LEGEND(true) -@enduml +@enduml \ No newline at end of file diff --git a/docs/src/explanation/about.md b/docs/src/explanation/about.md index 74dc93f07..938cff9ad 100644 --- a/docs/src/explanation/about.md +++ b/docs/src/explanation/about.md @@ -24,7 +24,7 @@ In addition to the upstream Kubernetes services, Canonical Kubernetes also inclu - an ingress provider - a load-balancer - a gateway API controller -- a metrics server provided by the [Canonical Observability Stack(COS)][COS] +- a metrics server ## Where can I install it? @@ -35,7 +35,7 @@ the snap is also available. ## Can I use it to make a cluster? -Yes. Canonical Kubernetes is designed to be emminently scalable. You can start +Yes. Canonical Kubernetes is designed to be eminently scalable. You can start with a single node and add more as and when the need arises. Scale up or down at any time. Systems with more than three nodes will automatically become Highly Available. @@ -44,7 +44,7 @@ Highly Available. Each and every user will be supported by the community. For a more detailed look at what that entails, please see our [Community page]. If you need a -greater level of support, Cannonical provides [Ubuntu Pro], a comprehensive +greater level of support, Canonical provides [Ubuntu Pro], a comprehensive subscription for your open-source software stack. For more support options, visit the [Ubuntu support] page. diff --git a/docs/src/explanation/channels.md b/docs/src/explanation/channels.md index ca34cc1ab..693c97745 100644 --- a/docs/src/explanation/channels.md +++ b/docs/src/explanation/channels.md @@ -10,7 +10,7 @@ When installing or updating Canonical Kubernetes you can (and should in most cases) specify a channel. The channel specified is made up of two components; the **track** and the **risk level**. -The track will match the minor version of upstream Kubernetes. For example, +The track matches the minor version of upstream Kubernetes. For example, specifying the `1.30` track will match upstream releases of the same minor version ("1.30.0", "1.30.1", "1.30.x" etc.). Releases of Canonical Kubernetes closely follow the upstream releases and usually follow within 24 hours. @@ -18,14 +18,14 @@ closely follow the upstream releases and usually follow within 24 hours. The 'risk level' component of the channel is one of the following: - **`stable`**: Matches upstream stable releases -- **`candidate`**: Tracks upstream release candidate -- **`beta`**: Tracks upstream beta releases - expect bugs +- **`candidate`**: Holds the release candidates of the snap +- **`beta`**: Tracks the beta releases - expect bugs - **`edge`**: Experimental release including upstream alpha releases -Note that for each track, not all risk levels are guranteed to be available. -For example, there may be a new upstream version in devlopment which only has +Note that for each track, not all risk levels are guaranteed to be available. +For example, there may be a new upstream version in development which only has an `edge` level. For a mature release, there may no longer be any `beta` or -`edge`. In these cases, if you specify a risk level which has no releases for +`candidate`. In these cases, if you specify a risk level which has no releases for that track the snap system will choose the closest available release with a lower risk level. Whatever risk level specified is the **maximum** risk level of the snap that will be installed - if you choose `candidate` you will never @@ -42,8 +42,10 @@ snap info k8s Updates for upstream patch releases will happen automatically by default. For example, if you have selected the channel `1.30/stable`, your snap will refresh -itself on the usual snap [refresh schedule]. These updates should not effect -the operation of Canonical Kubernetes. +itself regularly keeping your cluster up-to-date with the latest patches. +For deployments where this behavior is undesirable you are given the option to +postpone, schedule or even block automatic updates. +The [Snap refreshes documentation] page outlines how to configure these options. To change the channel of an already installed snap, the `refresh` command can be used: @@ -91,3 +93,4 @@ Use `--channel=/candidate`. [Snapcraft documentation]: https://snapcraft.io/docs/channels +[Snap refreshes documentation]: https://microk8s.io/docs/snap-refreshes \ No newline at end of file diff --git a/docs/src/howto/index.md b/docs/src/howto/index.md index cd1dae0f3..8c5e7a4b9 100644 --- a/docs/src/howto/index.md +++ b/docs/src/howto/index.md @@ -32,7 +32,7 @@ Alternatively, the [Tutorials section] contains step-by-step tutorials to help guide you through exploring and using Canonical Kubernetes. For a better understanding of how Canonical Kubernetes works and related topics -such as security, our [Explanation section] helps you to expand your knowledge +such as security, our [Explanation section] helps you expand your knowledge and get the most out of Kubernetes. Finally, our [Reference section] is for when you need to check specific details diff --git a/docs/src/howto/install/snap.md b/docs/src/howto/install/snap.md index 3b566f2b7..c48d5b5e1 100644 --- a/docs/src/howto/install/snap.md +++ b/docs/src/howto/install/snap.md @@ -39,7 +39,7 @@ sudo snap install k8s --classic --channel=latest/edge ``` ```{note} -In the pre-release phase, `latest/edge` is the only channel available. +The `latest/edge` channel is always under active development. This is where you will find the latest features but you may also experience instability. ``` ## Bootstrap the cluster diff --git a/docs/src/howto/networking/default-dns.md b/docs/src/howto/networking/default-dns.md index db979d4bf..9d7f7428f 100644 --- a/docs/src/howto/networking/default-dns.md +++ b/docs/src/howto/networking/default-dns.md @@ -42,7 +42,7 @@ sudo k8s help enable Discover your configuration options by running: ```bash -sudo k8s set dns –help +sudo k8s set dns --help ``` You should see three options: @@ -76,10 +76,10 @@ desired values for your DNS configuration. Canonical Kubernetes also allows you to disable the built-in DNS, if you desire a custom solution: -``` {warning} Do not disable DNS unless you have a replacement configured. -Disabling DNS will disrupt internal cluster communication. Ensure a suitable +``` {warning} Disabling DNS will disrupt internal cluster communication. Ensure a suitable custom DNS solution is in place before disabling. You can re-enable DNS at any point, and your cluster will return to normal functionality.``` +``` ```bash sudo k8s disable dns diff --git a/docs/src/howto/networking/default-ingress.md b/docs/src/howto/networking/default-ingress.md index 3813ce74d..e665fea71 100644 --- a/docs/src/howto/networking/default-ingress.md +++ b/docs/src/howto/networking/default-ingress.md @@ -39,7 +39,7 @@ sudo k8s help enable Discover your configuration options by running: ```bash -sudo k8s set ingress –help +sudo k8s set ingress --help ``` You should see three options: diff --git a/docs/src/howto/storage.md b/docs/src/howto/storage.md index ba4680793..16f7cb069 100644 --- a/docs/src/howto/storage.md +++ b/docs/src/howto/storage.md @@ -1,6 +1,6 @@ # How to use default storage -Canonical Kubernetes offers a local storage option to quickly set up and run a +Canonical Kubernetes offers a local-storage option to quickly set up and run a cluster, especially for single-node support. This guide walks you through enabling and configuring this feature. @@ -13,55 +13,55 @@ This guide assumes the following: [getting-started-guide]) -## Enable Storage +## Enable Local Storage When bootstrapping the snap, the storage functionality is not enabled by default. To enable it, execute the following command: -```sh -sudo k8s enable storage +```bash +sudo k8s enable local-storage ``` -## Configure Storage +## Configure Local Storage While the storage option comes with sensible defaults, you can customise it to meet your requirements. Obtain the current configuration by running: -```sh -sudo k8s get storage +```bash +sudo k8s get local-storage ``` You can modify the configuration using the `set` command. For example, to change the local storage path: -```sh -sudo k8s set storage.local-path=/path/to/new/folder +```bash +sudo k8s set local-storage.local-path=/path/to/new/folder ``` -The storage functionality provides the following configuration options: +The local-storage functionality provides the following configuration options: -- **local-path**: path where the local files will be created. -- **reclaim-policy**: set the reclaim policy of the persistent volumes +- `local-path`: path where the local files will be created. +- `reclaim-policy`: set the reclaim policy of the persistent volumes provisioned. It should be one of "Retain", "Recycle", or "Delete". -- **set-default**: set the local-storage storage class to be the default. If +- `set-default`: set the local-storage storage class to be the default. If this flag is not set and the cluster has already a default storage class it is not changed. If this flag is not set and the cluster does not have a default class set then the class from the local-storage becomes the default one. -## Disable Storage +## Disable Local Storage The local storage option is suitable for single-node clusters and development -environments, but it has inherent limitations. For a production environment you -typically want a more sophisticated storage solution. To disable the storage -functionality, run: +environments as it has no multi node data replication. +For a production environment you may want a more sophisticated +storage solution. To disable local-storage, run: -``` -sudo k8s disable storage +```bash +sudo k8s disable local-storage ``` -Note that this will only remove the CSI driver. The persististent volume claim -will still be there and your data remain on disk. +Disabling storage only removes the CSI driver. The persistent volume claims +will still be available and your data will remain on disk. diff --git a/docs/src/index.md b/docs/src/index.md index 9c89b1c67..6f9897467 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,8 +1,8 @@ # Canonical Kubernetes documentation The Canonical K8s snap is a performant, lightweight, secure and opinionated distribution -of **Kubernetes** which includes all the components needed to create and manage -a scaleable cluster suitable for all use cases. +of **Kubernetes** which includes everything needed to create and manage +a scalable cluster suitable for all use cases. ![Illustration depicting working on components and clouds][logo] diff --git a/docs/src/reference/architecture.md b/docs/src/reference/architecture.md index 95988ab55..f13990b83 100644 --- a/docs/src/reference/architecture.md +++ b/docs/src/reference/architecture.md @@ -19,10 +19,10 @@ Two actors interact with the Kubernetes snap: Kubernetes API server. Out of the box our K8s distribution offers admin access to the cluster. That initial user is able to configure the cluster to match their needs and of course create other users that may or may not have - the same privileges. The K8s admin is also able to maintain workloads running + admin privileges. The K8s admin is also able to maintain workloads running in the cluster. -- **K8s user**: A user consuming the services hosted in the cluster. Users do +- **K8s user**: A user consuming the workloads hosted in the cluster. Users do not have access to the Kubernetes API server. They need to access the cluster through the options (nodeport, ingress, load-balancer) offered by the administrator who deployed the workload they are interested in. @@ -33,8 +33,8 @@ orchestrate the multi-node clustering operations. A set of external systems need to be easily integrated with our K8s distribution. We have identified the following: - - **Loadbalancer**: Although the K8s snap distribution comes with a - loadbalancer we expect the end customer environment to have a loadbalancer + - **Load Balancer**: Although the K8s snap distribution comes with a + load balancer we expect the end customer environment to have a load balancer and thus we need to integrate with it. - **Storage**: Kubernetes typically expects storage to be external to the cluster. The K8s snap comes with a local storage option but we still need to @@ -49,7 +49,7 @@ distribution. We have identified the following: ## The k8s snap -Looking more closely at what is conatined within the K8s snap istelf: +Looking more closely at what is contained within the K8s snap itself: ```{kroki} ../assets/k8s-container.puml ``` @@ -58,18 +58,15 @@ The `k8s` snap distribution includes the following: - **Kubectl**: through which users and other systems interact with Kubernetes and drive the cluster operations. -- **K8s upstream services**: These are Kubernetes binaries built from upstream - and shipped in the snap. -- **Components** are the workloads and features we deem important to be - available to our users and therefore are shipped in the snap and are enabled, - configured and disabled in a guided way. +- **K8s services**: These are all the Kubernetes services as well as core workloads + built from upstream and shipped in the snap. - State is backed up by **dqlite** by default, which keeps that state of the Kubernetes cluster as well as the state we maintain for the needs of the cluster operations. The cluster state may optionally be stored in a different, external datastore. - **Runtime**: `containerd` and `runc` are the shipped container runtimes. - **K8sd**: which implements the operations logic and exposes that - functionality via CLIs and REST APIs. + functionality via CLIs and APIs. ## K8sd @@ -79,19 +76,20 @@ needed for managing the Kubernetes cluster. ```{kroki} ../assets/k8sd-component.puml ``` -At the core of the `k8sd` functionality we have the components and cluster -managers: The components manager is responsible for the workload features we +At the core of the `k8sd` functionality we have the cluster +manager that is responsible for configuring the services, workload and features we deem important for a Kubernetes cluster. Namely: +- Kubernetes systemd services - DNS - CNI - ingress - gateway API - load-balancer -- local storage -- observability +- local-storage +- metrics-server -The cluster manager is responsible for implementing the formation of the +The cluster manager is also responsible for implementing the formation of the cluster. This includes operations such as joining/removing nodes into the cluster and reporting status. @@ -100,7 +98,7 @@ This functionality is exposed via the following interfaces: - The **CLI**: The CLI is available to only the root user on the K8s snap and all CLI commands are mapped to respective REST calls. -- The **API**: The JSON RPC API serves the CLI and is also used by the charm to +- The **API**: The API over HTTP serves the CLI and is also used to programmatically drive the Kubernetes cluster. diff --git a/docs/src/reference/community.md b/docs/src/reference/community.md index 9f2883511..6f943c4b5 100644 --- a/docs/src/reference/community.md +++ b/docs/src/reference/community.md @@ -25,8 +25,8 @@ but we promise to respond within three working days. ## Found a bug? -You can always track what is going on with development by tracking the -developments on GitHub. This is also the best place to file a bug if you find +You can always track what is going on with development by watching our GitHub +repository. This is also the best place to file a bug if you find one, or of course you are also welcome to contribute to the code. **Our commitment to you** - we monitor the issues on GitHub regularly and we diff --git a/docs/src/tutorial/add-remove-nodes.md b/docs/src/tutorial/add-remove-nodes.md index 3c6ef37c0..2339b94ad 100644 --- a/docs/src/tutorial/add-remove-nodes.md +++ b/docs/src/tutorial/add-remove-nodes.md @@ -21,19 +21,19 @@ In this article, "**control plane**" refers to the Multipass VM that operates th The first step is creating the VMs. -```sh -multipass launch 22.04 --name control-plane +```bash +multipass launch 22.04 --name control-plane -m 4G -d 8G ``` -```sh -multipass launch 22.04 --name worker +```bash +multipass launch 22.04 --name worker -m 4G -c 4 -d 8G ``` This step can take a few minutes as Multipass creates the new virtual machines. It's normal and expected. Install Canonical Kubernetes on both VMs with the following command: -```sh +```bash sudo snap install --classic --edge k8s ``` @@ -41,23 +41,16 @@ sudo snap install --classic --edge k8s Bootstrap the control plane node: -```sh +```bash sudo k8s bootstrap ``` -Then, enable two components necessary for nodes' communication. - -```sh -sudo k8s enable dns -sudo k8s enable network -``` - Canonical Kubernetes allows you to create two types of nodes: control plane and worker nodes. In this example, we're creating a worker node. Create the token for the worker node to join the cluster. -```sh +```bash sudo k8s add-node worker --worker ``` @@ -71,7 +64,7 @@ A base64 token will be printed to your terminal. Keep it handy as you will need To join the worker node to the cluster, run: -```sh +```bash sudo k8s join-cluster ``` @@ -83,13 +76,13 @@ To see what we've accomplished in this tutorial: If you created a control plane node, check that it joined successfully: -```sh +```bash sudo k8s status ``` If you created a worker node, verify with this command: -```sh +```bash sudo k8s kubectl get nodes ``` @@ -102,7 +95,7 @@ Congratulations! To delete the VMs from your system, two commands are needed: -```sh +```bash multipass remove control-plane multipass remove worker multipass purge diff --git a/docs/src/tutorial/getting-started.md b/docs/src/tutorial/getting-started.md index 3bbdf9f82..3c43360f1 100644 --- a/docs/src/tutorial/getting-started.md +++ b/docs/src/tutorial/getting-started.md @@ -10,15 +10,15 @@ ### 1. Install Canonical Kubernetes Install the Canonical Kubernetes snap with: -``` -sudo snap install --edge k8s --classic +```bash +sudo snap install k8s --edge --classic ``` ### 2. Bootstrap a Kubernetes Cluster Bootstrap a Kubernetes cluster with default configuration using: -``` +```bash sudo k8s bootstrap ``` @@ -26,7 +26,7 @@ This command initialises your cluster and configures your host system as a Kubernetes node. For custom configurations, you can explore additional options using: -``` +```bash sudo k8s bootstrap --help ``` @@ -35,53 +35,28 @@ sudo k8s bootstrap --help To confirm the installation was successful and your node is ready you should run: -``` +```bash sudo k8s status ``` -You should see `k8s is not ready` in the command output. This will -change once we've enabled the `network` and `dns` components. - -### 4. Enable Components (DNS, Network) - -With Canonical Kubernetes, you can enable and disable core components -such as DNS, gateway, ingress, network, and storage. For an overview -of components, see the [Components Overview](#TODO) - -DNS resolution is fundamental for communication between pods within -the cluster and is essential for any Kubernetes deployment. To enable -DNS resolution, run: - -``` -sudo k8s enable dns -``` - -To enable network connectivity execute: - -``` -sudo k8s enable network -``` - Run the following command to list all the pods in the `kube-system` namespace: -``` +```bash sudo k8s kubectl get pods -n kube-system ``` -You will observe three pods running: -- **Coredns**: Provides DNS resolution services. +You will observe at least three pods running: +- **CoreDNS**: Provides DNS resolution services. - **Network operator**: Manages the lifecycle of the networking solution. - **Network agent**: Facilitates network management. Confirm that Canonical Kubernetes has transitioned to the `k8s is ready` state by running: -``` +```bash sudo k8s status --wait-ready ``` -Note: To disable a component execute `sudo k8s disable ` - ### 5. Access Kubernetes The standard tool for deploying and managing workloads on Kuberenetes @@ -90,13 +65,13 @@ For convenience, Canonical Kubernetes bundles a version of kubectl for you to use with no extra setup or configuration. For example, to view your node you can run the command: -``` +```bash sudo k8s kubectl get nodes ``` …or to see the running services: -``` +```bash sudo k8s kubectl get services ``` @@ -108,7 +83,7 @@ command to do that as with any Kubernetes. Let's deploy a demo NGINX server: -``` +```bash sudo k8s kubectl create deployment nginx --image=nginx ``` This command launches a [pod](https://kubernetes.io/docs/concepts/workloads/pods/), @@ -117,7 +92,7 @@ running the nginx application within a container. You can check the status of your pods by running: -``` +```bash sudo k8s kubectl get pods ``` @@ -127,38 +102,39 @@ It may take a moment for the pod to be ready and running. ### 7. Remove an app To remove the NGINX workload, execute the following command: -``` + +```bash sudo k8s kubectl delete deployment nginx ``` To verify that the pod has been removed, you can check the status of pods by running: -``` +```bash sudo k8s kubectl get pods ``` -### 8. Enable Components (Storage) +### 8. Enable Local Storage In scenarios where you need to preserve application data beyond the lifecycle of the pod, Kubernetes provides persistent volumes. -With Canonical Kubernetes, you can enable storage to configure +With Canonical Kubernetes, you can enable local-storage to configure your storage solutions: -``` -sudo k8s enable storage +```bash +sudo k8s enable local-storage ``` -To verify that the storage component is enabled, execute: -``` +To verify that the local-storage is enabled, execute: +```bash sudo k8s status ``` -You should see `storage enabled` in the command output. +You should see `local-storage enabled` in the command output. Let's create a `PersistentVolumeClaim` and use it in a `Pod`. For example, we can deploy the following manifest: -``` +```bash sudo k8s kubectl apply -f https://raw.githubusercontent.com/canonical/k8s-snap/main/docs/src/assets/tutorial-pod-with-pvc.yaml ``` This command deploys a pod based on the YAML configuration of a @@ -166,32 +142,30 @@ storage writer pod and a persistent volume claim with a capacity of 1G. To confirm that the persistent volume is up and running: -``` +```bash sudo k8s kubectl get pvc myclaim ``` You can inspect the storage-writer-pod with: -``` +```bash sudo k8s kubectl describe pod storage-writer-pod ``` -### 9. Disable Components (Storage) +### 9. Disable Local Storage Begin by removing the pod along with the persistent volume claim: -``` +```bash sudo k8s kubectl delete pvc myclaim sudo k8s kubectl delete pod storage-writer-pod ``` -Next, disable the storage component: +Next, disable the local storage: +```bash +sudo k8s disable local-storage ``` -sudo k8s disable storage -``` - -Note: To disable any component, execute `sudo k8s disable `. ### 10. Remove Canonical Kubernetes (Optional) diff --git a/docs/src/tutorial/kubectl.md b/docs/src/tutorial/kubectl.md index 10603a186..23e294bed 100644 --- a/docs/src/tutorial/kubectl.md +++ b/docs/src/tutorial/kubectl.md @@ -2,14 +2,14 @@ Kubernetes provides a command line tool for communicating with a Kubernetes cluster's control plane, using the Kubernetes API. This guide outlines how some of the everyday operations of your -Charmed Kubernetes cluster can be managed with this tool. +Kubernetes cluster can be managed with this tool. ## What you will need Before you begin, make sure you have the following: - A bootstrapped Canonical Kubernetes cluster (See - [Getting Started](https://github.com/canonical/k8s-snap/blob/main/docs/src/tutorial/getting-started.md)) + [Getting Started](getting-started.md)) - You are using the built-in `kubectl` command from the snap. ### 1. The Kubectl Command @@ -24,7 +24,7 @@ original upstream source into the `k8s` snap you have installed. To access `kubectl`, run the following command: -```sh +```bash sudo k8s kubectl ``` @@ -35,7 +35,7 @@ sudo k8s kubectl In Canonical Kubernetes, the `kubeconfig` file that is being read to display the configuration when you run `kubectl config view` lives at -`/snap/k8scurrent/k8s/config/kubeconfig`. You can change this by setting a +`/etc/kubernetes/admin.conf`. You can change this by setting a `KUBECONFIG` environment variable or passing the `--kubeconfig` flag to a command. @@ -50,7 +50,7 @@ guide. To see what pods were created when we enabled the `network` and `dns` components: -```sh +```bash sudo k8s kubectl get pods -o wide -n kube-system ``` @@ -59,7 +59,7 @@ You should be seeing the network operator, networking agent and coredns pods. > **Note**: If you see an error message here, it is likely that you forgot to > bootstrap your cluster. -```sh +```bash sudo k8s kubectl get services --all-namespace ``` @@ -71,20 +71,20 @@ will communicate. Let's deploy an NGINX server using this command: -```sh +```bash sudo k8s kubectl create deployment nginx --image=nginx:latest ``` To observe the NGINX pod running in the default namespace: -```sh +```bash sudo k8s kubectl get pods ``` Let's now scale this deployment, which means increasing the number of pods it manages. -```sh +```bash sudo k8s kubectl scale deployment nginx --replicas=3 ``` @@ -97,13 +97,13 @@ declared state of the cluster is maintained. First, open a new terminal so you can watch the changes as they happen. Run this command in a new terminal: -```sh +```bash sudo k8s kubectl get pods --all-namespace --watch ``` Now, go back to your original terminal and run: -```sh +```bash sudo k8s kubectl delete pods -l app=nginx ```