diff --git a/RELEASE b/RELEASE index f5bdd09a4e..f54adf16bf 100644 --- a/RELEASE +++ b/RELEASE @@ -1,6 +1,6 @@ # Generated by `make release` command. # DO NOT EDIT. -tag: v0.49.4 +tag: v0.50.0 releaseNoteGenerator: showCommitter: false diff --git a/docs/config.toml b/docs/config.toml index b78d7c0afe..b28b1a0368 100644 --- a/docs/config.toml +++ b/docs/config.toml @@ -167,6 +167,10 @@ no = 'Sorry to hear that. Please + This page describes several core concepts in PipeCD. +--- + +![](/images/architecture-overview.png) +

+Component Architecture +

+ +### Piped + +`piped` is a single binary component you run as an agent in your cluster, your local network to handle the deployment tasks. +It can be run inside a Kubernetes cluster by simply starting a Pod or a Deployment. +This component is designed to be stateless, so it can also be run in a single VM or even your local machine. + +### Control Plane + +A centralized component managing deployment data and provides gRPC API for connecting `piped`s as well as all web-functionalities of PipeCD such as +authentication, showing deployment list/details, application list/details, delivery insights... + +### Project + +A project is a logical group of applications to be managed by a group of users. +Each project can have multiple `piped` instances from different clouds or environments. + +There are three types of project roles: + +- **Viewer** has only permissions of viewing to deployment and application in the project. +- **Editor** has all viewer permissions, plus permissions for actions that modify state such as manually trigger/cancel the deployment. +- **Admin** has all editor permissions, plus permissions for managing project data, managing project `piped`. + +### Application + +A collect of resources (containers, services, infrastructure components...) and configurations that are managed together. +PipeCD supports multiple kinds of applications such as `KUBERNETES`, `TERRAFORM`, `ECS`, `CLOUDRUN`, `LAMBDA`... + +### Application Configuration + +A YAML file that contains information to define and configure application. +Each application requires one file at application directory stored in the Git repository. +The default file name is `app.pipecd.yaml`. + +### Application Directory + +A directory in Git repository containing application configuration file and application manifests. +Each application must have one application directory. + +### Deployment + +A deployment is a process that does transition from the current state (running state) to the desired state (specified state in Git) of a specific application. +When the deployment is success, it means the running state is being synced with the desired state specified in the target commit. + +### Sync Strategy + +There are 3 strategies that PipeCD supports while syncing your application state with its configuration stored in Git. Which are: +- Quick Sync: a fast way to make the running application state as same as its Git stored configuration. The generated pipeline contains only one predefined `SYNC` stage. +- Pipeline Sync: sync the running application state with its Git stored configuration through a pipeline defined in its application configuration. +- Auto Sync: depends on your defined application configuration, `piped` will decide the best way to sync your application state with its Git stored configuration. + +### Platform Provider + +Note: The previous name of this concept was Cloud Provider. + +PipeCD supports multiple platforms and multiple kinds of applications. +Platform Provider defines which platform, cloud and where application should be deployed to. + +Currently, PipeCD is supporting these five platform providers: `KUBERNETES`, `ECS`, `TERRAFORM`, `CLOUDRUN`, `LAMBDA`. + +### Analysis Provider +An external product that provides metrics/logs to evaluate deployments, such as `Prometheus`, `Datadog`, `Stackdriver`, `CloudWatch` and so on. +It is mainly used in the [Automated deployment analysis](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) context. diff --git a/docs/content/en/docs-v0.50.x/contribution-guidelines/_index.md b/docs/content/en/docs-v0.50.x/contribution-guidelines/_index.md new file mode 100755 index 0000000000..b47753d9aa --- /dev/null +++ b/docs/content/en/docs-v0.50.x/contribution-guidelines/_index.md @@ -0,0 +1,7 @@ +--- +title: "Contributor Guide" +linkTitle: "Contributor Guide" +weight: 6 +description: > + This guide is for anyone who want to contribute to PipeCD project. We are so excited to have you! +--- diff --git a/docs/content/en/docs-v0.50.x/contribution-guidelines/architectural-overview.md b/docs/content/en/docs-v0.50.x/contribution-guidelines/architectural-overview.md new file mode 100644 index 0000000000..c7569db0f4 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/contribution-guidelines/architectural-overview.md @@ -0,0 +1,36 @@ +--- +title: "Architectural overview" +linkTitle: "Architectural overview" +weight: 3 +description: > + This page describes the architecture of PipeCD. +--- + +![](/images/architecture-overview.png) +

+Component Architecture +

+ +### Piped + +A single binary component runs in your cluster, your local network to handle the deployment tasks. +It can be run inside a Kubernetes cluster by simply starting a Pod or a Deployment. +This component is designed to be stateless, so it can also be run in a single VM or even your local machine. + +### Control Plane + +A centralized component manages deployment data and provides gRPC API for connecting `piped`s as well as all web-functionalities of PipeCD such as +authentication, showing deployment list/details, application list/details, delivery insights... + +Control Plane contains the following components: +- `server`: a service to provide api for piped, web and serve static assets for web. +- `ops`: a service to provide administrative features for Control Plane owner like adding/managing projects. +- `cache`: a redis cache service for caching internal data. +- `datastore`: data storage for storing deployment, application data + - this can be a fully-managed service such as `Firestore`, `Cloud SQL`... + - or a self-managed such as `MySQL` +- `filestore`: file storage for storing logs, application states + - this can a fully-managed service such as `GCS`, `S3`... + - or a self-managed service such as `Minio` + +For more information, see [Architecture overview of Control Plane](../../user-guide/managing-controlplane/architecture-overview/). diff --git a/docs/content/en/docs-v0.50.x/contribution-guidelines/contributing.md b/docs/content/en/docs-v0.50.x/contribution-guidelines/contributing.md new file mode 100644 index 0000000000..87eb1a51c0 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/contribution-guidelines/contributing.md @@ -0,0 +1,9 @@ +--- +title: "Contributing" +linkTitle: "Contributing" +weight: 1 +description: > + This page describes how to contribute to PipeCD. +--- + +PipeCD is an open source project that anyone in the community can use, improve, and enjoy. We'd love you to join us! [Contributing to PipeCD](https://github.com/pipe-cd/pipecd/tree/master/CONTRIBUTING.md) is the best place to start with. \ No newline at end of file diff --git a/docs/content/en/docs-v0.50.x/examples/_index.md b/docs/content/en/docs-v0.50.x/examples/_index.md new file mode 100755 index 0000000000..23ee01ffba --- /dev/null +++ b/docs/content/en/docs-v0.50.x/examples/_index.md @@ -0,0 +1,93 @@ +--- +title: "Examples" +linkTitle: "Examples" +weight: 7 +description: > + Some examples of PipeCD in action! +--- + +One of the best ways to see what PipeCD can do, and learn how to deploy your applications with it, is to see some real examples. + +We have prepared some examples for each kind of application. +The examples can be found at the following repository: + +https://github.com/pipe-cd/examples + +### Kubernetes Applications + +| Name | Description | +|-----------------------------------------------------------------------------|-------------| +| [simple](https://github.com/pipe-cd/examples/tree/master/kubernetes/simple) | Deploy plain-yaml manifests in application directory without using pipeline. | +| [helm-local-chart](https://github.com/pipe-cd/examples/tree/master/kubernetes/helm-local-chart) | Deploy a helm chart sourced from the same Git repository. | +| [helm-remote-chart](https://github.com/pipe-cd/examples/tree/master/kubernetes/helm-remote-chart) | Deploy a helm chart sourced from a [Helm Chart Repository](https://helm.sh/docs/topics/chart_repository/). | +| [helm-remote-git-chart](https://github.com/pipe-cd/examples/tree/master/kubernetes/helm-remote-git-chart) | Deploy a helm chart sourced from another Git repository. | +| [kustomize-local-base](https://github.com/pipe-cd/examples/tree/master/kubernetes/kustomize-local-base) | Deploy a kustomize package that just uses the local bases from the same Git repository. | +| [kustomize-remote-base](https://github.com/pipe-cd/examples/tree/master/kubernetes/kustomize-remote-base) | Deploy a kustomize package that uses remote bases from other Git repositories. | +| [canary](https://github.com/pipe-cd/examples/tree/master/kubernetes/canary) | Deployment pipeline with canary strategy. | +| [canary-by-config-change](https://github.com/pipe-cd/examples/tree/master/kubernetes/canary-by-config-change) | Deployment pipeline with canary strategy when ConfigMap was changed. | +| [canary-patch](https://github.com/pipe-cd/examples/tree/master/kubernetes/canary-patch) | Demonstrate how to customize manifests for Canary variant using [patches](../user-guide/configuration-reference/#kubernetescanaryrolloutstageoptions) option. | +| [bluegreen](https://github.com/pipe-cd/examples/tree/master/kubernetes/bluegreen) | Deployment pipeline with bluegreen strategy. This also contains a manual approval stage. | +| [mesh-istio-canary](https://github.com/pipe-cd/examples/tree/master/kubernetes/mesh-istio-canary) | Deployment pipeline with canary strategy by using Istio for traffic routing. | +| [mesh-istio-bluegreen](https://github.com/pipe-cd/examples/tree/master/kubernetes/mesh-istio-bluegreen) | Deployment pipeline with bluegreen strategy by using Istio for traffic routing. | +| [mesh-smi-canary](https://github.com/pipe-cd/examples/tree/master/kubernetes/mesh-smi-canary) | Deployment pipeline with canary strategy by using SMI for traffic routing. | +| [mesh-smi-bluegreen](https://github.com/pipe-cd/examples/tree/master/kubernetes/mesh-smi-bluegreen) | Deployment pipeline with bluegreen strategy by using SMI for traffic routing. | +| [wait-approval](https://github.com/pipe-cd/examples/tree/master/kubernetes/wait-approval) | Deployment pipeline that contains a manual approval stage. | +| [multi-steps-canary](https://github.com/pipe-cd/examples/tree/master/kubernetes/multi-steps-canary) | Deployment pipeline with multiple canary steps. | +| [analysis-by-metrics](https://github.com/pipe-cd/examples/tree/master/kubernetes/analysis-by-metrics) | Deployment pipeline with analysis stage by metrics. | +| [analysis-by-http](https://github.com/pipe-cd/examples/tree/master/kubernetes/analysis-by-http) | Deployment pipeline with analysis stage by running http requests. | +| [analysis-by-log](https://github.com/pipe-cd/examples/tree/master/kubernetes/analysis-by-log) | Deployment pipeline with analysis stage by checking logs. | +| [analysis-with-baseline](https://github.com/pipe-cd/examples/tree/master/kubernetes/analysis-with-baseline) | Deployment pipeline with analysis stage by comparing baseline and canary. | +| [secret-management](https://github.com/pipe-cd/examples/tree/master/kubernetes/secret-management) | Demonstrate how to manage sensitive data by using [Secret Management](../user-guide/managing-application/secret-management/) feature. | + +### Terraform Applications + +| Name | Description | +|-----------------------------------------------------------------------------|-------------| +| [simple](https://github.com/pipe-cd/examples/tree/master/terraform/simple) | Automatically applies when any changes were detected. | +| [local-module](https://github.com/pipe-cd/examples/tree/master/terraform/local-module) | Deploy application that using local terraform modules from the same Git repository. | +| [remote-module](https://github.com/pipe-cd/examples/tree/master/terraform/remote-module) | Deploy application that using remote terraform modules from other Git repositories. | +| [wait-approval](https://github.com/pipe-cd/examples/tree/master/terraform/wait-approval) | Deployment pipeline that contains a manual approval stage. | +| [autorollback](https://github.com/pipe-cd/examples/tree/master/terraform/autorollback) | Automatically rollback the changes when deployment was failed. | +| [secret-management](https://github.com/pipe-cd/examples/tree/master/terraform/secret-management) | Demonstrate how to manage sensitive data by using [Secret Management](../user-guide/managing-application/secret-management/) feature. | + +### Cloud Run Applications + +| Name | Description | +|-----------------------------------------------------------------------------|-------------| +| [simple](https://github.com/pipe-cd/examples/tree/master/cloudrun/simple) | Quick sync by rolling out the new version and switching all traffic to it. | +| [canary](https://github.com/pipe-cd/examples/tree/master/cloudrun/canary) | Deployment pipeline with canary strategy. | +| [analysis](https://github.com/pipe-cd/examples/tree/master/cloudrun/analysis) | Deployment pipeline that contains an analysis stage. | +| [secret-management](https://github.com/pipe-cd/examples/tree/master/cloudrun/secret-management) | Demonstrate how to manage sensitive data by using [Secret Management](../user-guide/managing-application/secret-management/) feature. | +| [wait-approval](https://github.com/pipe-cd/examples/tree/master/cloudrun/wait-approval) | Deployment pipeline that contains a manual approval stage. | + +### Lambda Applications + +| Name | Description | +|-----------------------------------------------------------------------------|-------------| +| [simple](https://github.com/pipe-cd/examples/tree/master/lambda/simple) | Quick sync by rolling out the new version and switching all traffic to it. | +| [canary](https://github.com/pipe-cd/examples/tree/master/lambda/canary) | Deployment pipeline with canary strategy. | +| [analysis](https://github.com/pipe-cd/examples/tree/master/lambda/analysis) | Deployment pipeline that contains an analysis stage. | +| [secret-management](https://github.com/pipe-cd/examples/tree/master/lambda/secret-management) | Demonstrate how to manage sensitive data by using [Secret Management](../user-guide/managing-application/secret-management/) feature. | +| [wait-approval](https://github.com/pipe-cd/examples/tree/master/lambda/wait-approval) | Deployment pipeline that contains a manual approval stage. | +| [remote-git](https://github.com/pipe-cd/examples/tree/master/lambda/remote-git) | Deploy the lambda code sourced from another Git repository. | +| [zip-packing-s3](https://github.com/pipe-cd/examples/tree/master/lambda/zip-packing-s3) | Deployment pipeline of kind Lambda which uses s3 stored zip file as function code. | + +### ECS Applications + +| Name | Description | +|-----------------------------------------------------------------------------|-------------| +| [simple](https://github.com/pipe-cd/examples/tree/master/ecs/simple) | Quick sync by rolling out the new version and switching all traffic to it. | +| [simple-via-servicediscovery](https://github.com/pipe-cd/examples/tree/master/ecs/servicediscovery/simple) | Quick sync by rolling out the new version and switching all traffic to it for ECS Service Discovery. | +| [canary](https://github.com/pipe-cd/examples/tree/master/ecs/canary) | Deployment pipeline with canary strategy. | +| [canary-via-servicediscovery](https://github.com/pipe-cd/examples/tree/master/ecs/servicediscovery/canary) | Deployment pipeline with canary strategy for ECS Service Discovery. | +| [bluegreen](https://github.com/pipe-cd/examples/tree/master/ecs/bluegreen) | Deployment pipeline with blue-green strategy. | +| [secret-management](https://github.com/pipe-cd/examples/tree/master/ecs/secret-management) | Demonstrate how to manage sensitive data by using [Secret Management](../user-guide/managing-application/secret-management/) feature. | +| [wait-approval](https://github.com/pipe-cd/examples/tree/master/ecs/wait-approval) | Deployment pipeline that contains a manual approval stage. | +| [standalone-task](https://github.com/pipe-cd/examples/tree/master/ecs/standalone-task) | Deployment Standalone Task. (`Standalone task is only supported for Quick sync`) | + + +### Deployment chain + +| Name | Description | +|-----------------------------------------------------------------------------|-------------| +| [simple](https://github.com/pipe-cd/examples/tree/master/deployment-chain/simple) | Simple deployment chain which uses application name as a filter in chain configuration. | diff --git a/docs/content/en/docs-v0.50.x/faq/_index.md b/docs/content/en/docs-v0.50.x/faq/_index.md new file mode 100644 index 0000000000..e4a99acc8c --- /dev/null +++ b/docs/content/en/docs-v0.50.x/faq/_index.md @@ -0,0 +1,66 @@ +--- +title: "FAQ" +linkTitle: "FAQ" +weight: 9 +description: > + List of frequently asked questions. +--- + +If you have any other questions, please feel free to create the issue in the [pipe-cd/pipecd](https://github.com/pipe-cd/pipecd/issues/new/choose) repository or contact us on [Cloud Native Slack](https://slack.cncf.io) (channel [#pipecd](https://app.slack.com/client/T08PSQ7BQ/C01B27F9T0X)). + +### 1. What kind of application (platform provider) will be supported? + +Currently, PipeCD can be used to deploy `Kubernetes`, `ECS`, `Terraform`, `CloudRun`, `Lambda` applications. + +In the near future we also want to support `Crossplane`... + +### 2. What kind of templating methods for Kubernetes application will be supported? + +Currently, PipeCD is supporting `Helm` and `Kustomize` as templating method for Kubernetes applications. + +### 3. Istio is supported now? + +Yes, you can use PipeCD for both mesh (Istio, SMI) applications and non-mesh applications. + +### 4. What are the differences between PipeCD and FluxCD? + +- Not just Kubernetes applications, PipeCD also provides a unified interface for other cloud services (CloudRun, AWS Lamda...) and Terraform +- One tool for both GitOps sync and progressive deployment +- Supports multiple Git repositories +- Has web UI for better visibility + - Log viewer for each deployment + - Visualization of application component/state in realtime + - Show configuration drift in realtime +- Also supports Canary and BlueGreen for non-mesh applications +- Has built-in secrets management +- Shows the delivery performance insights + +### 5. What are the differences between PipeCD and ArgoCD? + +- Not just Kubernetes applications, PipeCD also provides a unified interface for other cloud services (GCP CloudRun, AWS Lamda...) and Terraform +- One tool for both GitOps sync and progressive deployment +- Don't need another CRD or changing the existing manifests for doing Canary/BlueGreen. PipeCD just uses the standard Kubernetes deployment object +- Easier and safer to operate multi-tenancy, multi-cluster for multiple teams (even some teams are running in a private/restricted network) +- Has built-in secrets management +- Shows the delivery performance insights + +### 6. What should I do if I lost my Piped key? + +You can create a new Piped key. Go to the `Piped` tab at `Settings` page, and click the vertical ellipsis of the Piped that you would like to create the new Piped key. Don't forget deleting the old Key, too. + +### 7. What is the strong point if PipeCD is used only for Kubernetes? + +- Simple interface, easy to understand no extra CRD required +- Easy to install, upgrade, and manage (both the ControlPlane and the agent Piped) +- Not strict depend on any Kubernetes API, not being part of issues for your Kubernetes cluster versioning upgrade +- Easy to interact with any CI; Plan preview feature gives you an early look at what will be changed in your cluster even before manifests update +- Insights show metrics like lead time, deployment frequency, MTTR, and change failure rate to measure delivery performance + +### 8. Is it open source? + +Yes, PipeCD is fully open source project with APACHE LICENSE, VERSION 2.0!! + +### 9. How should I investigate high CPU usage or memory usage in piped, or when OOM occurs? + +If you're noticing high CPU usage, memory usage, or facing OOM issues in Piped, you can use the built-in support for `pprof`, a tool for visualization and analysis of profiling data. +`pprof` can help you identify the parts of your application that are consuming the most resources. For more detailed information and examples of how to use `pprof` in Piped, please refer to our [Using Pprof in Piped guide](../managing-piped/using-pprof-in-piped). diff --git a/docs/content/en/docs-v0.50.x/feature-status/_index.md b/docs/content/en/docs-v0.50.x/feature-status/_index.md new file mode 100644 index 0000000000..fe69106533 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/feature-status/_index.md @@ -0,0 +1,142 @@ +--- +title: "Feature Status" +linkTitle: "Feature Status" +weight: 8 +description: > + This page lists the relative maturity of every PipeCD features. +--- + +Please note that the phases (Incubating, Alpha, Beta, and Stable) are applied to individual features within the project, not to the project as a whole. + +## Feature Phase Definitions + +| Phase | Definition | +|-|-| +| Incubating | Under planning/developing the prototype and still not ready to be used. | +| Alpha | Demo-able, works end-to-end but has limitations. No guarantees on backward compatibility. | +| Beta | **Usable in production**. Documented. | +| Stable | Production hardened. Backward compatibility. Documented. | + +## Provider + +### Kubernetes + +| Feature | Phase | +|-|-| +| Quick sync deployment | Beta | +| Deployment with a defined pipeline (e.g. canary, analysis) | Beta | +| [Automated rollback](../user-guide/managing-application/rolling-back-a-deployment/) | Beta | +| [Automated configuration drift detection](../user-guide/managing-application/configuration-drift-detection/) | Beta | +| [Application live state](../user-guide/managing-application/application-live-state/) | Beta | +| Support Helm | Beta | +| Support Kustomize | Beta | +| Support Istio service mesh | Beta | +| Support SMI service mesh | Incubating | +| [Plan preview](../user-guide/plan-preview) | Beta | +| [Manifest attachment](../user-guide/managing-application/manifest-attachment) | Alpha | + +### Terraform + +| Feature | Phase | +|-|-| +| Quick sync deployment | Beta | +| Deployment with a defined pipeline (e.g. manual-approval) | Beta | +| [Automated rollback](../user-guide/managing-application/rolling-back-a-deployment/) | Beta | +| [Automated configuration drift detection](../user-guide/managing-application/configuration-drift-detection/) | Alpha | +| [Application live state](../user-guide/managing-application/application-live-state/) | Incubating | +| [Plan preview](../user-guide/plan-preview) | Beta | +| [Manifest attachment](../user-guide/managing-application/manifest-attachment) | Alpha | + +### Cloud Run + +| Feature | Phase | +|-|-| +| Quick sync deployment | Beta | +| Deployment with a defined pipeline (e.g. canary, analysis) | Beta | +| [Automated rollback](../user-guide/managing-application/rolling-back-a-deployment/) | Beta | +| [Automated configuration drift detection](../user-guide/managing-application/configuration-drift-detection/) | Beta | +| [Application live state](../user-guide/managing-application/application-live-state/) | Beta | +| [Plan preview](../user-guide/plan-preview) | Beta | +| [Manifest attachment](../user-guide/managing-application/manifest-attachment) | Alpha | + +### Lambda + +| Feature | Phase | +|-|-| +| Quick sync deployment | Beta | +| Deployment with a defined pipeline (e.g. canary, analysis) | Beta | +| [Automated rollback](../user-guide/managing-application/rolling-back-a-deployment/) | Beta | +| [Automated configuration drift detection](../user-guide/managing-application/configuration-drift-detection/) | Alpha | +| [Application live state](../user-guide/managing-application/application-live-state/) | Alpha | +| [Plan preview](../user-guide/plan-preview) | Alpha | +| [Manifest attachment](../user-guide/managing-application/manifest-attachment) | Alpha | + +### Amazon ECS + +| Feature | Phase | +|-|-| +| Quick sync deployment | Beta | +| Deployment with a defined pipeline (e.g. canary, analysis) | Beta | +| [Automated rollback](../user-guide/managing-application/rolling-back-a-deployment/) | Beta | +| [Automated configuration drift detection](../user-guide/managing-application/configuration-drift-detection/) | Alpha *1 | +| [Application live state](../user-guide/managing-application/application-live-state/) | Alpha *1 | +| Quick sync deployment for [ECS Service Discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) | Alpha | +| Deployment with a defined pipeline for [ECS Service Discovery](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-discovery.html) | Alpha | +| [Plan preview](../user-guide/plan-preview) | Alpha | +| [Manifest attachment](../user-guide/managing-application/manifest-attachment) | Alpha | + +*1. Not supported yet for standalone tasks. + +## Piped Agent + +| Feature | Phase | +|-|-| +| [Deployment wait stage](../user-guide/managing-application/customizing-deployment/adding-a-wait-stage/) | Beta | +| [Deployment manual approval stage](../user-guide/managing-application/customizing-deployment/adding-a-manual-approval/) | Beta | +| [Notification](../user-guide/managing-piped/configuring-notifications/) to Slack | Beta | +| [Notification](../user-guide/managing-piped/configuring-notifications/) to external service via webhook | Beta | +| [Secrets management](../user-guide/managing-application/secret-management/) - Storing secrets safely in the Git repository | Beta | +| [Event watcher](../user-guide/event-watcher/) - Updating files in Git automatically for given events | Beta | +| [Pipectl](../user-guide/command-line-tool/) - Command-line tool for interacting with Control Plane | Beta | +| Deployment plugin - Allow executing user-created deployment plugin | Incubating | +| [ADA](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) (Automated Deployment Analysis) by Prometheus metrics | Beta | +| [ADA](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) by Datadog metrics | Beta | +| [ADA](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) by Stackdriver metrics | Incubating | +| [ADA](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) by Stackdriver log | Incubating | +| [ADA](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) by CloudWatch metrics | Incubating | +| [ADA](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) by CloudWatch log | Incubating | +| [ADA](../user-guide/managing-application/customizing-deployment/automated-deployment-analysis/) by HTTP request (smoke test...) | Incubating | +| [Remote upgrade](../user-guide/managing-piped/remote-upgrade-remote-config/#remote-upgrade) - Ability to upgrade Piped from the web console | Beta | +| [Remote config](../user-guide/managing-piped/remote-upgrade-remote-config/#remote-config) - Watch and reload configuration from a remote location such as Git | Beta | + +## Control Plane + +| Feature | Phase | +|-|-| +| Project/Piped/Application/Deployment management | Beta | +| Rendering deployment pipeline in realtime | Beta | +| Canceling a deployment from console | Beta | +| Triggering a deployment manually from console | Beta | +| RBAC on PipeCD resources such as Application, Piped... | Alpha | +| Authentication by username/password for static admin | Beta | +| GitHub & GitHub Enterprise Server SSO | Beta | +| Support GCP [Firestore](https://cloud.google.com/firestore) as data store | Beta | +| Support [MySQL v8.0](https://www.mysql.com/) as data store | Beta | +| Support file store as data store | Alpha | +| Support GCP [GCS](https://cloud.google.com/storage) as file store | Beta | +| Support AWS [S3](https://aws.amazon.com/s3/) as file store | Beta | +| Support [Minio](https://github.com/minio/minio) as file store | Beta | +| Support using file storage such as GCS, S3, Minio for both data store and file store (It means no database is required to run control plane) | Incubating | +| [Insights](../user-guide/insights/) - Show the delivery performance of a team or an application | Incubating | +| [Deployment Chain](../user-guide/managing-application/deployment-chain/) - Allow rolling out to multiple clusters gradually or promoting across environments | Alpha | +| [Metrics](../user-guide/managing-controlplane/metrics/) - Dashboards for PipeCD and Piped metrics | Beta | + +## [pipectl](../user-guide/command-line-tool/) + +### [pipectl init](../user-guide/command-line-tool.md#generating-an-application-config-apppipecdyaml) + +| Feature | Phase | +|-|-| +| Kubernetes - QuickSync | Incubating | +| ECS - QuickSync | Alpha | +| ECS - Pipeline Sync | Incubating | diff --git a/docs/content/en/docs-v0.50.x/installation/_index.md b/docs/content/en/docs-v0.50.x/installation/_index.md new file mode 100644 index 0000000000..76a1629a37 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/_index.md @@ -0,0 +1,20 @@ +--- +title: "Installation" +linkTitle: "Installation" +weight: 4 +description: > + Complete guideline for installing and configuring PipeCD on your own. +--- + +Before starting to install PipeCD, let’s have a look at PipeCD’s components, determine your role, and which components you will interact with while installing/using PipeCD. You’re recommended to read about PipeCD’s [Control Plane](../concepts/#control-plane) and [Piped](../concepts/#piped) on the concepts page. + +![](/images/architecture-overview-with-roles.png) +

+PipeCD's components with roles +

+ +Basically, there are two types of users/roles that exist in the PipeCD system, which are: +- Developers/Production team: Users who use PipeCD to manage their applications’ deployments. You will interact with Piped and may or may not need to install Piped by yourself. +- Operators/Platform team: Users who operate the PipeCD for other developers can use it. You will interact with the Control Plane and Piped, you will be the one who installs the Control Plane and keeps it up for other Pipeds to connect while managing their applications’ deployments. + +This section contains the guideline for installing PipeCD's Control Plane and Piped step by step. You can choose what to read based on your roles. diff --git a/docs/content/en/docs-v0.50.x/installation/install-control-plane/_index.md b/docs/content/en/docs-v0.50.x/installation/install-control-plane/_index.md new file mode 100644 index 0000000000..fe68c06c8f --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-control-plane/_index.md @@ -0,0 +1,9 @@ +--- +title: "Install Control Plane" +linkTitle: "Install Control Plane" +weight: 3 +description: > + This page describes how to install a control plane. +--- + +Since Control Plane is a centralized component managing deployment data and provides gRPC API, it needs some components fo storing data or credential... and so on. We explain how to deploy Control Plane components. diff --git a/docs/content/en/docs-v0.50.x/installation/install-control-plane/installing-controlplane-on-ECS.md b/docs/content/en/docs-v0.50.x/installation/install-control-plane/installing-controlplane-on-ECS.md new file mode 100644 index 0000000000..acf89b201e --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-control-plane/installing-controlplane-on-ECS.md @@ -0,0 +1,12 @@ +--- +title: "Installing Control Plane on ECS" +linkTitle: "Installing Control Plane on ECS" +weight: 2 +description: > + This page describes how to install control plane on ECS. +--- + +Currently, we provide the example of deploying Control Plane to ECS using terraform. + +Please refer to the blog post :) +[PipeCD best practice 02 - control plane on ECS]({{< ref "/blog/control-plane-on-ecs.md" >}} "PipeCD best practice 02 - control plane on ECS"). diff --git a/docs/content/en/docs-v0.50.x/installation/install-control-plane/installing-controlplane-on-k8s.md b/docs/content/en/docs-v0.50.x/installation/install-control-plane/installing-controlplane-on-k8s.md new file mode 100644 index 0000000000..b41a8c71bd --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-control-plane/installing-controlplane-on-k8s.md @@ -0,0 +1,172 @@ +--- +title: "Installing Control Plane on Kubernetes" +linkTitle: "Installing Control Plane on Kubernetes" +weight: 1 +description: > + This page describes how to install control plane on a Kubernetes cluster. +--- + +## Prerequisites + +- Having a running Kubernetes cluster +- Installed [Helm](https://helm.sh/docs/intro/install/) (3.8.0 or later) + +## Installation + +### 1. Preparing an encryption key + +PipeCD requires a key for encrypting sensitive data or signing JWT token while authenticating. You can use one of the following commands to generate an encryption key. + +``` console +openssl rand 64 | base64 > encryption-key + +# or +cat /dev/urandom | head -c64 | base64 > encryption-key +``` + +### 2. Preparing Control Plane configuration file and installing + +![](/images/control-plane-components.png) +

+Control Plane Architecture +

+ +The Control Plane of PipeCD is constructed by several components, as shown in the above graph (for more in detail please read [Control Plane architecture overview docs](../../../user-guide/managing-controlplane/architecture-overview/)). As mentioned in the graph, the PipeCD's data can be stored in one of the provided fully-managed or self-managed services. So you have to decide which kind of [data store](../../../user-guide/managing-controlplane/architecture-overview/#data-store) and [file store](../../../user-guide/managing-controlplane/architecture-overview/#file-store) you want to use and prepare a Control Plane configuration file suitable for that choice. + +#### Using Firestore and GCS + +PipeCD requires a GCS bucket and service account files to access Firestore and GCS service. Here is an example of configuration file: + +``` yaml +apiVersion: "pipecd.dev/v1beta1" +kind: ControlPlane +spec: + stateKey: {RANDOM_STRING} + datastore: + type: FIRESTORE + config: + namespace: pipecd + environment: dev + project: {YOUR_GCP_PROJECT_NAME} + # Must be a service account with "Cloud Datastore User" and "Cloud Datastore Index Admin" roles + # since PipeCD needs them to creates the needed Firestore composite indexes in the background. + credentialsFile: /etc/pipecd-secret/firestore-service-account + filestore: + type: GCS + config: + bucket: {YOUR_BUCKET_NAME} + # Must be a service account with "Storage Object Admin (roles/storage.objectAdmin)" role on the given bucket + # since PipeCD need to write file object such as deployment log file to that bucket. + credentialsFile: /etc/pipecd-secret/gcs-service-account +``` + +See [ConfigurationReference](../../../user-guide/managing-controlplane/configuration-reference/) for the full configuration. + +After all, install the Control Plane as bellow: + +``` console +helm upgrade -i pipecd oci://ghcr.io/pipe-cd/chart/pipecd --version {{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set-file config.data=path-to-control-plane-configuration-file \ + --set-file secret.encryptionKey.data=path-to-encryption-key-file \ + --set-file secret.firestoreServiceAccount.data=path-to-service-account-file \ + --set-file secret.gcsServiceAccount.data=path-to-service-account-file +``` + +Currently, besides `Firestore` PipeCD supports other databases as its datastore such as `MySQL`. Also as for filestore, PipeCD supports `AWS S3` and `MINIO` either. + +For example, in case of using `MySQL` as datastore and `MINIO` as filestore, the ControlPlane configuration will be as follow: + +```yaml +apiVersion: "pipecd.dev/v1beta1" +kind: ControlPlane +spec: + stateKey: {RANDOM_STRING} + datastore: + type: MYSQL + config: + url: {YOUR_MYSQL_ADDRESS} + database: {YOUR_DATABASE_NAME} + filestore: + type: MINIO + config: + endpoint: {YOUR_MINIO_ADDRESS} + bucket: {YOUR_BUCKET_NAME} + accessKeyFile: /etc/pipecd-secret/minio-access-key + secretKeyFile: /etc/pipecd-secret/minio-secret-key + autoCreateBucket: true +``` + +You can find required configurations to use other datastores and filestores from [ConfigurationReference](../../../user-guide/managing-controlplane/configuration-reference/). + +__Caution__: In case of using `MySQL` as Control Plane's datastore, please note that the implementation of PipeCD requires some features that only available on [MySQL v8](https://dev.mysql.com/doc/refman/8.0/en/), make sure your MySQL service is satisfied the requirement. + +### 3. Accessing the PipeCD web + +If your installation was including an [ingress](https://github.com/pipe-cd/pipecd/blob/master/manifests/pipecd/values.yaml#L7), the PipeCD web can be accessed by the ingress's IP address or domain. +Otherwise, private PipeCD web can be accessed by using `kubectl port-forward` to expose the installed Control Plane on your localhost: + +``` console +kubectl port-forward svc/pipecd 8080 --namespace={NAMESPACE} +``` + +Now go to [http://localhost:8080](http://localhost:8080) on your browser, you will see a page to login to your project. + +Up to here, you have a installed PipeCD's Control Plane. To logging in, you need to initialize a new project. + +### 4. Initialize a new project + +To create a new project, you need to access to the `ops` pod in your installed PipeCD control plane, using `kubectl port-forward` command: + +```console +kubectl port-forward service/pipecd-ops 9082 --namespace={NAMESPACE} +``` + +Then, access to [http://localhost:9082](http://localhost:9082). + +On that page, you will see the list of registered projects and a link to register new projects. Registering a new project requires only a unique ID string and an optional description text. + +Once a new project has been registered, a static admin (username, password) will be automatically generated for the project admin, you can use that to login via the login form in the above section. + +For more about adding a new project in detail, please read the following [docs](../../../user-guide/managing-controlplane/adding-a-project/). + +### 4'. Upgrade Control Plane version + +To upgrade the PipeCD Control Plane, preparations and commands remain as you do when installing PipeCD Control Plane. Only need to change the version flag in command to the specified version you want to upgrade your PipeCD Control Plane to. + +``` console +helm upgrade -i pipecd oci://ghcr.io/pipe-cd/chart/pipecd --version {NEW_VERSION} --namespace={NAMESPACE} \ + --set-file config.data=path-to-control-plane-configuration-file \ + --set-file secret.encryptionKey.data=path-to-encryption-key-file \ + --set-file secret.firestoreServiceAccount.data=path-to-service-account-file \ + --set-file secret.gcsServiceAccount.data=path-to-service-account-file +``` + +## Production Hardening + +This part provides guidance for a production hardened deployment of the control plane. + +- Publishing the control plane + + You can allow external access to the control plane by enabling the [ingress](https://github.com/pipe-cd/pipecd/blob/master/manifests/pipecd/values.yaml#L7) configuration. + +- End-to-End TLS + + After switching to HTTPs, do not forget to set the `api.args.secureCookie` parameter to be `true` to disallow using cookie on unsecured HTTP connection. + + Alternatively in the case of GKE Ingress, PipeCD also requires a TLS certificate for internal use. This can be a self-signed one and generated by this command: + + ``` console + openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj "/CN={YOUR_DOMAIN}" + ``` + Those key and cert can be configured via [`secret.internalTLSKey.data`](https://github.com/pipe-cd/pipecd/blob/master/manifests/pipecd/values.yaml#L118) and [`secret.internalTLSCert.data`](https://github.com/pipe-cd/pipecd/blob/master/manifests/pipecd/values.yaml#L121). + + To enable internal tls connection, please set the `gateway.internalTLS.enabled` parameter to be `true`. + + Otherwise, the `cloud.google.com/app-protocols` annotation is also should be configured as the following: + + ``` yaml + service: + port: 443 + annotations: + cloud.google.com/app-protocols: '{"service":"HTTP2"}' + ``` diff --git a/docs/content/en/docs-v0.50.x/installation/install-piped/_index.md b/docs/content/en/docs-v0.50.x/installation/install-piped/_index.md new file mode 100644 index 0000000000..71a5199f66 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-piped/_index.md @@ -0,0 +1,9 @@ +--- +title: "Install Piped" +linkTitle: "Install Piped" +weight: 3 +description: > + This page describes how to install a Piped. +--- + +Since Piped is a stateless agent, no database or storage is required to run. In addition, a Piped can interact with one or multiple platform providers, so the number of Piped and where they should run is entirely up to your preference. For example, you can run your Pipeds in a Kubernetes cluster to deploy not just Kubernetes applications but your Terraform and Cloud Run applications as well. diff --git a/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-cloudrun.md b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-cloudrun.md new file mode 100644 index 0000000000..786f920829 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-cloudrun.md @@ -0,0 +1,174 @@ +--- +title: "Installing on Cloud Run" +linkTitle: "Installing on Cloud Run" +weight: 3 +description: > + This page describes how to install Piped on Cloud Run. +--- + +## Prerequisites + +##### Having piped's ID and Key strings +- Ensure that the `piped` has been registered and you are having its PIPED_ID and PIPED_KEY strings. +- If you are not having them, this [page](../../../user-guide/managing-controlplane/registering-a-piped/) guides you how to register a new one. + +##### Preparing SSH key +- If your Git repositories are private, `piped` requires a private SSH key to access those repositories. +- Please checkout [this documentation](https://help.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for how to generate a new SSH key pair. Then add the public key to your repositories. (If you are using GitHub, you can add it to Deploy Keys at the repository's Settings page.) + +## Installation + +- Preparing a piped configuration file as the following: + + ``` yaml + apiVersion: pipecd.dev/v1beta1 + kind: Piped + spec: + projectID: {PROJECT_ID} + pipedID: {PIPED_ID} + pipedKeyData: {BASE64_ENCODED_PIPED_KEY} + # Write in a format like "host:443" because the communication is done via gRPC. + apiAddress: {CONTROL_PLANE_API_ADDRESS} + + git: + sshKeyData: {BASE64_ENCODED_PRIVATE_SSH_KEY} + + repositories: + - repoId: {REPO_ID_OR_NAME} + remote: git@github.com:{GIT_ORG}/{GIT_REPO}.git + branch: {GIT_BRANCH} + + # Optional + # Enable this Piped to handle Cloud Run application. + platformProviders: + - name: cloudrun-in-project + type: CLOUDRUN + config: + project: {GCP_PROJECT_ID} + region: {GCP_PROJECT_REGION} + + # Optional + # Uncomment this if you want to enable this Piped to handle Terraform application. + # - name: terraform-gcp + # type: TERRAFORM + + # Optional + # Uncomment this if you want to enable SecretManagement feature. + # https://pipecd.dev//docs/user-guide/managing-application/secret-management/ + # secretManagement: + # type: KEY_PAIR + # config: + # privateKeyData: {BASE64_ENCODED_PRIVATE_KEY} + # publicKeyData: {BASE64_ENCODED_PUBLIC_KEY} + ``` + +See [ConfigurationReference](../../../user-guide/managing-piped/configuration-reference/) for the full configuration. + +- Creating a new secret in [SecretManager](https://cloud.google.com/secret-manager/docs/creating-and-accessing-secrets) to store above configuration data securely + + ``` console + gcloud secrets create cloudrun-piped-config --data-file={PATH_TO_CONFIG_FILE} + ``` + + then make sure that Cloud Run has the ability to access that secret as [this guide](https://cloud.google.com/run/docs/configuring/secrets#access-secret). + +- Running Piped in Cloud Run + + Prepare a Cloud Run service manifest file as below. + + {{< tabpane >}} + {{< tab lang="yaml" header="Piped with Remote-upgrade" >}} +# Enable remote-upgrade feature of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-upgrade +# This allows upgrading Piped to a new version from the web console. + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: piped + annotaions: + run.googleapis.com/ingress: internal + run.googleapis.com/ingress-status: internal +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/maxScale: '1' # This must be 1. + autoscaling.knative.dev/minScale: '1' # This must be 1. + run.googleapis.com/cpu-throttling: "false" # This is required. + spec: + containerConcurrency: 1 # This must be 1 to ensure Piped work correctly. + containers: + - image: gcr.io/pipecd/launcher:{{< blocks/latest_version >}} + args: + - launcher + - --launcher-admin-port=9086 + - --config-file=/etc/piped-config/config.yaml + ports: + - containerPort: 9086 + volumeMounts: + - mountPath: /etc/piped-config + name: piped-config + resources: + limits: + cpu: 1000m + memory: 2Gi + volumes: + - name: piped-config + secret: + secretName: cloudrun-piped-config + items: + - path: config.yaml + key: latest + {{< /tab >}} + {{< tab lang="yaml" header="Piped" >}} +# This just installs a Piped with the specified version. +# Whenever you want to upgrade that Piped to a new version or update its config data you have to restart it. + +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: piped + annotaions: + run.googleapis.com/ingress: internal + run.googleapis.com/ingress-status: internal +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/maxScale: '1' # This must be 1. + autoscaling.knative.dev/minScale: '1' # This must be 1. + run.googleapis.com/cpu-throttling: "false" # This is required. + spec: + containerConcurrency: 1 # This must be 1. + containers: + - image: gcr.io/pipecd/piped:{{< blocks/latest_version >}} + args: + - piped + - --config-file=/etc/piped-config/config.yaml + ports: + - containerPort: 9085 + volumeMounts: + - mountPath: /etc/piped-config + name: piped-config + resources: + limits: + cpu: 1000m + memory: 2Gi + volumes: + - name: piped-config + secret: + secretName: cloudrun-piped-config + items: + - path: config.yaml + key: latest + {{< /tab >}} + {{< /tabpane >}} + + Run Piped service on Cloud Run with the following command: + + ``` console + gcloud beta run services replace cloudrun-piped-service.yaml + ``` + + Note: Make sure that the created secret is accessible from this Piped service. See more [here](https://cloud.google.com/run/docs/configuring/secrets#access-secret). diff --git a/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-fargate.md b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-fargate.md new file mode 100644 index 0000000000..32031b7fa6 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-fargate.md @@ -0,0 +1,199 @@ +--- +title: "Installing on ECS Fargate" +linkTitle: "Installing on ECS Fargate" +weight: 4 +description: > + This page describes how to install Piped as a task on ECS cluster backed by AWS Fargate. +--- + +## Prerequisites + +##### Having piped's ID and Key strings +- Ensure that the `piped` has been registered and you are having its PIPED_ID and PIPED_KEY strings. +- If you are not having them, this [page](../../../user-guide/managing-controlplane/registering-a-piped/) guides you how to register a new one. + +##### Preparing SSH key +- If your Git repositories are private, `piped` requires a private SSH key to access those repositories. +- Please checkout [this documentation](https://help.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for how to generate a new SSH key pair. Then add the public key to your repositories. (If you are using GitHub, you can add it to Deploy Keys at the repository's Settings page.) + +## Installation + +- Preparing a piped configuration file as follows: + + ``` yaml + apiVersion: pipecd.dev/v1beta1 + kind: Piped + spec: + projectID: {PROJECT_ID} + pipedID: {PIPED_ID} + pipedKeyData: {BASE64_ENCODED_PIPED_KEY} + # Write in a format like "host:443" because the communication is done via gRPC. + apiAddress: {CONTROL_PLANE_API_ADDRESS} + + git: + sshKeyData: {BASE64_ENCODED_PRIVATE_SSH_KEY} + + repositories: + - repoId: {REPO_ID_OR_NAME} + remote: git@github.com:{GIT_ORG}/{GIT_REPO}.git + branch: {GIT_BRANCH} + + # Optional + # Enable this Piped to handle ECS application. + platformProviders: + - name: ecs-dev + type: ECS + config: + region: {ECS_RUNNING_REGION} + + # Optional + # Uncomment this if you want to enable this Piped to handle Terraform application. + # - name: terraform-dev + # type: TERRAFORM + + # Optional + # Uncomment this if you want to enable SecretManagement feature. + # https://pipecd.dev//docs/user-guide/managing-application/secret-management/ + # secretManagement: + # type: KEY_PAIR + # config: + # privateKeyData: {BASE64_ENCODED_PRIVATE_KEY} + # publicKeyData: {BASE64_ENCODED_PUBLIC_KEY} + ``` + +See [ConfigurationReference](../../../user-guide/managing-piped/configuration-reference/) for the full configuration. + +- Store the above configuration data to AWS to enable using it while creating piped task. Both [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) and [AWS Systems Manager Parameter Store](https://docs.aws.amazon.com/systems-manager/latest/userguide/systems-manager-parameter-store.html) are available to address this task. + + {{< tabpane >}} + {{< tab lang="bash" header="Store in AWS Secrets Manager" >}} + aws secretsmanager create-secret --name PipedConfig \ + --description "Configuration of piped running as ECS Fargate task" \ + --secret-string `base64 piped-config.yaml` + {{< /tab >}} + {{< tab lang="bash" header="Store in AWS Systems Manager Parameter Store" >}} + aws ssm put-parameter \ + --name PipedConfig \ + --value `base64 piped-config.yaml` \ + --type SecureString + {{< /tab >}} + {{< /tabpane >}} + +- Prepare task definition for your piped task. Basically, you can just define your piped TaskDefinition as normal TaskDefinition, the only thing that needs to be beware is, in case you used [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) to store piped configuration, to enable your piped accesses it's configuration we created as a secret on above, you need to add `secretsmanager:GetSecretValue` policy to your piped task `executionRole`. Read more in [Required IAM permissions for Amazon ECS secrets](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/specifying-sensitive-data-secrets.html). + + A sample TaskDefinition for Piped as follows + + {{< tabpane >}} + {{< tab lang="json" header="Piped with Remote-upgrade" >}} +# Enable remote-upgrade feature of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-upgrade +# This allows upgrading Piped to a new version from the web console. + +{ + "family": "piped", + "executionRoleArn": "{PIPED_TASK_EXECUTION_ROLE_ARN}", + "containerDefinitions": [ + { + "name": "piped", + "essential": true, + "image": "ghcr.io/pipe-cd/launcher:{{< blocks/latest_version >}}", + "entryPoint": [ + "sh", + "-c" + ], + "command": [ + "/bin/sh -c \"launcher launcher --config-data=$(echo $CONFIG_DATA)\"" + ], + "secrets": [ + { + "valueFrom": "{PIPED_SECRET_MANAGER_ARN}", + "name": "CONFIG_DATA" + } + ], + } + ], + "requiresCompatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": "512", + "cpu": "256" +} + {{< /tab >}} + {{< tab lang="json" header="Piped" >}} +# This just installs a Piped with the specified version. +# Whenever you want to upgrade that Piped to a new version or update its config data you have to restart it. + +{ + "family": "piped", + "executionRoleArn": "{PIPED_TASK_EXECUTION_ROLE_ARN}", + "containerDefinitions": [ + { + "name": "piped", + "essential": true, + "image": "ghcr.io/pipe-cd/piped:{{< blocks/latest_version >}}", + "entryPoint": [ + "sh", + "-c" + ], + "command": [ + "/bin/sh -c \"piped piped --config-data=$(echo $CONFIG_DATA)\"" + ], + "secrets": [ + { + "valueFrom": "{PIPED_SECRET_MANAGER_ARN}", + "name": "CONFIG_DATA" + } + ], + } + ], + "requiresCompatibilities": [ + "FARGATE" + ], + "networkMode": "awsvpc", + "memory": "512", + "cpu": "256" +} + {{< /tab >}} + {{< /tabpane >}} + + Register this piped task definition and start piped task: + + ```console + aws ecs register-task-definition --cli-input-json file://taskdef.json + aws ecs run-task --cluster {ECS_CLUSTER} --task-definition piped + ``` + + Once the task is created, it will run continuously because of the piped execution. Since this task is run without [startedBy](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_StartTask.html#API_StartTask_RequestSyntax) setting, in case the piped is stopped, it will not automatically be restarted. To do so, you must define an ECS service to control piped task deployment. + + A sample Service definition to control piped task deployment. + + ```json + { + "cluster": "{ECS_CLUSTER}", + "serviceName": "piped", + "desiredCount": 1, # This must be 1. + "taskDefinition": "{PIPED_TASK_DEFINITION_ARN}", + "deploymentConfiguration": { + "minimumHealthyPercent": 0, + "maximumPercent": 100 + }, + "schedulingStrategy": "REPLICA", + "launchType": "FARGATE", + "networkConfiguration": { + "awsvpcConfiguration": { + "assignPublicIp": "ENABLED", # This is need to enable ECS deployment to pull piped container images. + ... + } + } + } + ``` + + Then start your piped task controller service. + + ```console + aws ecs create-service \ + --cluster {ECS_CLUSTER} \ + --service-name piped \ + --cli-input-json file://service.json + ``` diff --git a/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-google-cloud-vm.md b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-google-cloud-vm.md new file mode 100644 index 0000000000..84cb85160f --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-google-cloud-vm.md @@ -0,0 +1,138 @@ +--- +title: "Installing on Google Cloud VM" +linkTitle: "Installing on Google Cloud VM" +weight: 2 +description: > + This page describes how to install Piped on Google Cloud VM. +--- + +## Prerequisites + +##### Having piped's ID and Key strings +- Ensure that the `piped` has been registered and you are having its `PIPED_ID` and `PIPED_KEY` strings. +- If you are not having them, this [page](../../../user-guide/managing-controlplane/registering-a-piped/) guides you how to register a new one. + +##### Preparing SSH key +- If your Git repositories are private, `piped` requires a private SSH key to access those repositories. +- Please checkout [this documentation](https://help.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for how to generate a new SSH key pair. Then add the public key to your repositories. (If you are using GitHub, you can add it to Deploy Keys at the repository's Settings page.) + +## Installation + +- Preparing a piped configuration file as the following: + + ``` yaml + apiVersion: pipecd.dev/v1beta1 + kind: Piped + spec: + projectID: {PROJECT_ID} + pipedID: {PIPED_ID} + pipedKeyData: {BASE64_ENCODED_PIPED_KEY} + # Write in a format like "host:443" because the communication is done via gRPC. + apiAddress: {CONTROL_PLANE_API_ADDRESS} + + git: + sshKeyData: {BASE64_ENCODED_PRIVATE_SSH_KEY} + + repositories: + - repoId: {REPO_ID_OR_NAME} + remote: git@github.com:{GIT_ORG}/{GIT_REPO}.git + branch: {GIT_BRANCH} + + # Optional + # Uncomment this if you want to enable this Piped to handle Cloud Run application. + # platformProviders: + # - name: cloudrun-in-project + # type: CLOUDRUN + # config: + # project: {GCP_PROJECT_ID} + # region: {GCP_PROJECT_REGION} + + # Optional + # Uncomment this if you want to enable this Piped to handle Terraform application. + # - name: terraform-gcp + # type: TERRAFORM + + # Optional + # Uncomment this if you want to enable SecretManagement feature. + # https://pipecd.dev//docs/user-guide/managing-application/secret-management/ + # secretManagement: + # type: KEY_PAIR + # config: + # privateKeyData: {BASE64_ENCODED_PRIVATE_KEY} + # publicKeyData: {BASE64_ENCODED_PUBLIC_KEY} + ``` + +See [ConfigurationReference](../../../user-guide/managing-piped/configuration-reference/) for the full configuration. + +- Creating a new secret in [SecretManager](https://cloud.google.com/secret-manager/docs/creating-and-accessing-secrets) to store above configuration data securely + + ``` shell + gcloud secrets create vm-piped-config --data-file={PATH_TO_CONFIG_FILE} + ``` + +- Creating a new Service Account for Piped and giving it needed roles + + ``` shell + gcloud iam service-accounts create vm-piped \ + --description="Using by Piped running on Google Cloud VM" \ + --display-name="vm-piped" + + # Allow Piped to access the created secret. + gcloud secrets add-iam-policy-binding vm-piped-config \ + --member="serviceAccount:vm-piped@{GCP_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/secretmanager.secretAccessor" + + # Allow Piped to write its log messages to Google Cloud Logging service. + gcloud projects add-iam-policy-binding {GCP_PROJECT_ID} \ + --member="serviceAccount:vm-piped@{GCP_PROJECT_ID}.iam.gserviceaccount.com" \ + --role="roles/logging.logWriter" + + # Optional + # If you want to use this Piped to handle Cloud Run application + # run the following command to give it the needed roles. + # https://cloud.google.com/run/docs/reference/iam/roles#additional-configuration + # + # gcloud projects add-iam-policy-binding {GCP_PROJECT_ID} \ + # --member="serviceAccount:vm-piped@{GCP_PROJECT_ID}.iam.gserviceaccount.com" \ + # --role="roles/run.developer" + # + # gcloud iam service-accounts add-iam-policy-binding {GCP_PROJECT_NUMBER}-compute@developer.gserviceaccount.com \ + # --member="serviceAccount:vm-piped@{GCP_PROJECT_ID}.iam.gserviceaccount.com" \ + # --role="roles/iam.serviceAccountUser" + ``` + +- Running Piped on a Google Cloud VM + + {{< tabpane >}} + {{< tab lang="console" header="Piped with Remote-upgrade" >}} +# Enable remote-upgrade feature of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-upgrade +# This allows upgrading Piped to a new version from the web console. + + gcloud compute instances create-with-container vm-piped \ + --container-image="ghcr.io/pipe-cd/launcher:{{< blocks/latest_version >}}" \ + --container-arg="launcher" \ + --container-arg="--config-from-gcp-secret=true" \ + --container-arg="--gcp-secret-id=projects/{GCP_PROJECT_ID}/secrets/vm-piped-config/versions/{SECRET_VERSION}" \ + --network="{VPC_NETWORK}" \ + --subnet="{VPC_SUBNET}" \ + --scopes="cloud-platform" \ + --service-account="vm-piped@{GCP_PROJECT_ID}.iam.gserviceaccount.com" + {{< /tab >}} + {{< tab lang="console" header="Piped" >}} +# This just installs a Piped with the specified version. +# Whenever you want to upgrade that Piped to a new version or update its config data you have to restart it. + + gcloud compute instances create-with-container vm-piped \ + --container-image="ghcr.io/pipe-cd/piped:{{< blocks/latest_version >}}" \ + --container-arg="piped" \ + --container-arg="--config-gcp-secret=projects/{GCP_PROJECT_ID}/secrets/vm-piped-config/versions/{SECRET_VERSION}" \ + --network="{VPC_NETWORK}" \ + --subnet="{VPC_SUBNET}" \ + --scopes="cloud-platform" \ + --service-account="vm-piped@{GCP_PROJECT_ID}.iam.gserviceaccount.com" + {{< /tab >}} + {{< /tabpane >}} + +After that, you can see on PipeCD web at `Settings` page that Piped is connecting to the Control Plane. +You can also view Piped log as described [here](https://cloud.google.com/compute/docs/containers/deploying-containers#viewing_logs). diff --git a/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-kubernetes.md b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-kubernetes.md new file mode 100644 index 0000000000..932888081f --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-kubernetes.md @@ -0,0 +1,246 @@ +--- +title: "Installing on Kubernetes cluster" +linkTitle: "Installing on Kubernetes cluster" +weight: 1 +description: > + This page describes how to install Piped on Kubernetes cluster. +--- + +## Prerequisites + +##### Having piped's ID and Key strings +- Ensure that the `piped` has been registered and you are having its PIPED_ID and PIPED_KEY strings. +- If you are not having them, this [page](../../../user-guide/managing-controlplane/registering-a-piped/) guides you how to register a new one. + +##### Preparing SSH key +- If your Git repositories are private, `piped` requires a private SSH key to access those repositories. +- Please checkout [this documentation](https://help.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for how to generate a new SSH key pair. Then add the public key to your repositories. (If you are using GitHub, you can add it to Deploy Keys at the repository's Settings page.) + +## In the cluster-wide mode +This way requires installing cluster-level resources. Piped installed with this way can perform deployment workloads against any other namespaces than the where Piped runs on. + +- Preparing a piped configuration file as the following + + ``` yaml + apiVersion: pipecd.dev/v1beta1 + kind: Piped + spec: + projectID: {PROJECT_ID} + pipedID: {PIPED_ID} + pipedKeyFile: /etc/piped-secret/piped-key + # Write in a format like "host:443" because the communication is done via gRPC. + apiAddress: {CONTROL_PLANE_API_ADDRESS} + git: + sshKeyFile: /etc/piped-secret/ssh-key + repositories: + - repoId: {REPO_ID_OR_NAME} + remote: git@github.com:{GIT_ORG}/{GIT_REPO}.git + branch: {GIT_BRANCH} + syncInterval: 1m + ``` + +See [ConfigurationReference](../../../user-guide/managing-piped/configuration-reference/) for the full configuration. + +- Installing by using [Helm](https://helm.sh/docs/intro/install/) (3.8.0 or later) + + {{< tabpane >}} + {{< tab lang="bash" header="Piped" >}} +# This command just installs a Piped with the specified version. +# Whenever you want to upgrade that Piped to a new version or update its config data +# you have to restart it by re-running this command. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set-file config.data={PATH_TO_PIPED_CONFIG_FILE} \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} + {{< /tab >}} + {{< tab lang="bash" header="Piped with Remote-upgrade" >}} +# Enable remote-upgrade feature of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-upgrade +# This allows upgrading Piped to a new version from the web console. +# But we still need to restart Piped when we want to update its config data. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set launcher.enabled=true \ + --set-file config.data={PATH_TO_PIPED_CONFIG_FILE} \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} + {{< /tab >}} + {{< tab lang="bash" header="Piped with Remote-upgrade and Remote-config" >}} +# Enable both remote-upgrade and remote-config features of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-config +# Beside of the ability to upgrade Piped to a new version from the web console, +# remote-config allows loading the Piped config stored in a remote location such as a Git repository. +# Whenever the config data is changed, it loads the new config and restarts Piped to use that new config. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set launcher.enabled=true \ + --set launcher.configFromGitRepo.enabled=true \ + --set launcher.configFromGitRepo.repoUrl=git@github.com:{GIT_ORG}/{GIT_REPO}.git \ + --set launcher.configFromGitRepo.branch={GIT_BRANCH} \ + --set launcher.configFromGitRepo.configFile={RELATIVE_PATH_TO_PIPED_CONFIG_FILE_IN_GIT_REPO} \ + --set launcher.configFromGitRepo.sshKeyFile=/etc/piped-secret/ssh-key \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} + {{< /tab >}} + {{< /tabpane >}} + + Note: Be sure to set `--set args.insecure=true` if your Control Plane has not TLS-enabled yet. + + See [values.yaml](https://github.com/pipe-cd/pipecd/blob/master/manifests/piped/values.yaml) for the full values. + +## In the namespaced mode +The previous way requires installing cluster-level resources. If you want to restrict Piped's permission within the namespace where Piped runs on, this way is for you. +Most parts are identical to the previous way, but some are slightly different. + +- Adding a new cloud provider like below to the previous piped configuration file + + ``` yaml + apiVersion: pipecd.dev/v1beta1 + kind: Piped + spec: + projectID: {PROJECT_ID} + pipedID: {PIPED_ID} + pipedKeyFile: /etc/piped-secret/piped-key + # Write in a format like "host:443" because the communication is done via gRPC. + apiAddress: {CONTROL_PLANE_API_ADDRESS} + git: + sshKeyFile: /etc/piped-secret/ssh-key + repositories: + - repoId: REPO_ID_OR_NAME + remote: git@github.com:{GIT_ORG}/{GIT_REPO}.git + branch: {GIT_BRANCH} + syncInterval: 1m + # This is needed to restrict to limit the access range to within a namespace. + platformProviders: + - name: my-kubernetes + type: KUBERNETES + config: + appStateInformer: + namespace: {NAMESPACE} + ``` + +- Installing by using [Helm](https://helm.sh/docs/intro/install/) (3.8.0 or later) + + {{< tabpane >}} + {{< tab lang="bash" header="Piped" >}} +# This command just installs a Piped with the specified version. +# Whenever you want to upgrade that Piped to a new version or update its config data +# you have to restart it by re-running this command. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set-file config.data={PATH_TO_PIPED_CONFIG_FILE} \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} \ + --set args.enableDefaultKubernetesCloudProvider=false \ + --set rbac.scope=namespace + {{< /tab >}} + {{< tab lang="bash" header="Piped with Remote-upgrade" >}} +# Enable remote-upgrade feature of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-upgrade +# This allows upgrading Piped to a new version from the web console. +# But we still need to restart Piped when we want to update its config data. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set launcher.enabled=true \ + --set-file config.data={PATH_TO_PIPED_CONFIG_FILE} \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} \ + --set args.enableDefaultKubernetesCloudProvider=false \ + --set rbac.scope=namespace + {{< /tab >}} + {{< tab lang="bash" header="Piped with Remote-upgrade and Remote-config" >}} +# Enable both remote-upgrade and remote-config features of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-config +# Beside of the ability to upgrade Piped to a new version from the web console, +# remote-config allows loading the Piped config stored in a remote location such as a Git repository. +# Whenever the config data is changed, it loads the new config and restarts Piped to use that new config. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set launcher.enabled=true \ + --set launcher.configFromGitRepo.enabled=true \ + --set launcher.configFromGitRepo.repoUrl=git@github.com:{GIT_ORG}/{GIT_REPO}.git \ + --set launcher.configFromGitRepo.branch={GIT_BRANCH} \ + --set launcher.configFromGitRepo.configFile={RELATIVE_PATH_TO_PIPED_CONFIG_FILE_IN_GIT_REPO} \ + --set launcher.configFromGitRepo.sshKeyFile=/etc/piped-secret/ssh-key \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} \ + --set args.enableDefaultKubernetesCloudProvider=false \ + --set rbac.scope=namespace + {{< /tab >}} + {{< /tabpane >}} + +#### In case on OpenShift less than 4.2 + +OpenShift uses an arbitrarily assigned user ID when it starts a container. +Starting from OpenShift 4.2, it also inserts that user into `/etc/passwd` for using by the application inside the container, +but before that version, the assigned user is missing in that file. That blocks workloads of `ghcr.io/pipe-cd/piped` image. +Therefore if you are running on OpenShift with a version before 4.2, please use `ghcr.io/pipe-cd/piped-okd` image with the following command: + +- Installing by using [Helm](https://helm.sh/docs/intro/install/) (3.8.0 or later) + + {{< tabpane >}} + {{< tab lang="bash" header="Piped" >}} +# This command just installs a Piped with the specified version. +# Whenever you want to upgrade that Piped to a new version or update its config data +# you have to restart it by re-running this command. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set-file config.data={PATH_TO_PIPED_CONFIG_FILE} \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} \ + --set args.enableDefaultKubernetesCloudProvider=false \ + --set rbac.scope=namespace + --set args.addLoginUserToPasswd=true \ + --set securityContext.runAsNonRoot=true \ + --set securityContext.runAsUser={UID} \ + --set securityContext.fsGroup={FS_GROUP} \ + --set securityContext.runAsGroup=0 \ + --set image.repository="ghcr.io/pipe-cd/piped-okd" + {{< /tab >}} + {{< tab lang="bash" header="Piped with Remote-upgrade" >}} +# Enable remote-upgrade feature of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-upgrade +# This allows upgrading Piped to a new version from the web console. +# But we still need to restart Piped when we want to update its config data. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set launcher.enabled=true \ + --set-file config.data={PATH_TO_PIPED_CONFIG_FILE} \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} \ + --set args.enableDefaultKubernetesCloudProvider=false \ + --set rbac.scope=namespace + --set args.addLoginUserToPasswd=true \ + --set securityContext.runAsNonRoot=true \ + --set securityContext.runAsUser={UID} \ + --set securityContext.fsGroup={FS_GROUP} \ + --set securityContext.runAsGroup=0 \ + --set launcher.image.repository="ghcr.io/pipe-cd/launcher-okd" + {{< /tab >}} + {{< tab lang="bash" header="Piped with Remote-upgrade and Remote-config" >}} +# Enable both remote-upgrade and remote-config features of Piped. +# https://pipecd.dev/docs/user-guide/managing-piped/remote-upgrade-remote-config/#remote-config +# Beside of the ability to upgrade Piped to a new version from the web console, +# remote-config allows loading the Piped config stored in a remote location such as a Git repository. +# Whenever the config data is changed, it loads the new config and restarts Piped to use that new config. + +helm upgrade -i dev-piped oci://ghcr.io/pipe-cd/chart/piped --version={{< blocks/latest_version >}} --namespace={NAMESPACE} \ + --set launcher.enabled=true \ + --set launcher.configFromGitRepo.enabled=true \ + --set launcher.configFromGitRepo.repoUrl=git@github.com:{GIT_ORG}/{GIT_REPO}.git \ + --set launcher.configFromGitRepo.branch={GIT_BRANCH} \ + --set launcher.configFromGitRepo.configFile={RELATIVE_PATH_TO_PIPED_CONFIG_FILE_IN_GIT_REPO} \ + --set launcher.configFromGitRepo.sshKeyFile=/etc/piped-secret/ssh-key \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} \ + --set args.enableDefaultKubernetesCloudProvider=false \ + --set rbac.scope=namespace + --set args.addLoginUserToPasswd=true \ + --set securityContext.runAsNonRoot=true \ + --set securityContext.runAsUser={UID} \ + --set securityContext.fsGroup={FS_GROUP} \ + --set securityContext.runAsGroup=0 \ + --set launcher.image.repository="ghcr.io/pipe-cd/launcher-okd" + {{< /tab >}} + {{< /tabpane >}} diff --git a/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-single-machine.md b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-single-machine.md new file mode 100644 index 0000000000..018d9cf55e --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-piped/installing-on-single-machine.md @@ -0,0 +1,52 @@ +--- +title: "Installing on a single machine" +linkTitle: "Installing on a single machine" +weight: 5 +description: > + This page describes how to install a Piped on a single machine. +--- + +## Prerequisites + +##### Having piped's ID and Key strings +- Ensure that the `piped` has been registered and you are having its PIPED_ID and PIPED_KEY strings. +- If you are not having them, this [page](../../../user-guide/managing-controlplane/registering-a-piped/) guides you how to register a new one. + +##### Preparing SSH key +- If your Git repositories are private, `piped` requires a private SSH key to access those repositories. +- Please checkout [this documentation](https://help.github.com/en/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent) for how to generate a new SSH key pair. Then add the public key to your repositories. (If you are using GitHub, you can add it to Deploy Keys at the repository's Settings page.) + +## Installation + +- Downloading the latest `piped` binary for your machine + + https://github.com/pipe-cd/pipecd/releases + +- Preparing a piped configuration file as the following: + + ``` yaml + apiVersion: pipecd.dev/v1beta1 + kind: Piped + spec: + projectID: {PROJECT_ID} + pipedID: {PIPED_ID} + pipedKeyFile: {PATH_TO_PIPED_KEY_FILE} + # Write in a format like "host:443" because the communication is done via gRPC. + apiAddress: {CONTROL_PLANE_API_ADDRESS} + git: + sshKeyFile: {PATH_TO_SSH_KEY_FILE} + repositories: + - repoId: {REPO_ID_OR_NAME} + remote: git@github.com:{GIT_ORG}/{GIT_REPO}.git + branch: {GIT_BRANCH} + syncInterval: 1m + ``` + +See [ConfigurationReference](../../../user-guide/managing-piped/configuration-reference/) for the full configuration. + +- Start running the `piped` + + ``` console + ./piped piped --config-file={PATH_TO_PIPED_CONFIG_FILE} + ``` + diff --git a/docs/content/en/docs-v0.50.x/installation/install-piped/required-permissions.md b/docs/content/en/docs-v0.50.x/installation/install-piped/required-permissions.md new file mode 100644 index 0000000000..7350b65846 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/installation/install-piped/required-permissions.md @@ -0,0 +1,102 @@ +--- +title: "Required Permissions" +linkTitle: "Required Permissions" +weight: 6 +description: > + This page describes what permissions are required for a Piped to deploy applications. +--- + +A Piped requires some permissions to deploy applications, depending on the platform. + +Note: If you run a piped as an ECS task, you need to attach the permissions on the piped task's `task role`, not `task execution role`. + +## For ECSApp + +You need IAM actions like the following example. You can restrict `Resource`. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "ecs:CreateService", + "ecs:CreateTaskSet", + "ecs:DeleteTaskSet", + "ecs:DeregisterTaskDefinition", + "ecs:DescribeServices", + "ecs:DescribeTaskDefinition", + "ecs:DescribeTaskSets", + "ecs:DescribeTasks", + "ecs:ListClusters", + "ecs:ListServices", + "ecs:ListTasks", + "ecs:RegisterTaskDefinition", + "ecs:RunTask", + "ecs:TagResource", + "ecs:UpdateService", + "ecs:UpdateServicePrimaryTaskSet", + "elasticloadbalancing:DescribeListeners", + "elasticloadbalancing:DescribeRules", + "elasticloadbalancing:DescribeTargetGroups", + "elasticloadbalancing:ModifyListener", + "elasticloadbalancing:ModifyRule" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole" + ], + "Resource": [ + "arn:aws:iam:::role/", + "arn:aws:iam:::role/" + ] + } + ] +} +``` + +## For LambdaApp + +You need IAM actions like the following example. You can restrict `Resource`. + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "lambda:CreateAlias", + "lambda:CreateFunction", + "lambda:GetAlias", + "lambda:GetFunction", + "lambda:ListFunctions", + "lambda:PublishVersion", + "lambda:TagResource", + "lambda:UntagResource", + "lambda:UpdateAlias", + "lambda:UpdateFunctionCode", + "lambda:UpdateFunctionConfiguration" + ], + "Resource": [ + "*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "iam:PassRole" + ], + "Resource": [ + "arn:aws:iam:::role/" + ] + } + ] +} +``` \ No newline at end of file diff --git a/docs/content/en/docs-v0.50.x/overview/_index.md b/docs/content/en/docs-v0.50.x/overview/_index.md new file mode 100644 index 0000000000..9fbaf09e67 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/overview/_index.md @@ -0,0 +1,78 @@ +--- +title: "Overview" +linkTitle: "Overview" +weight: 1 +description: > + Overview about PipeCD. +--- + +![](/images/pipecd-explanation.png) +

+PipeCD - a GitOps style continuous delivery solution +

+ +## What Is PipeCD? + +{{% pageinfo %}} +PipeCD provides a unified continuous delivery solution for multiple application kinds on multi-cloud that empowers engineers to deploy faster with more confidence, a GitOps tool that enables doing deployment operations by pull request on Git. +{{% /pageinfo %}} + +## Why PipeCD? + +- Simple, unified and easy to use but powerful pipeline definition to construct your deployment +- Same deployment interface to deploy applications of any platform, including Kubernetes, Terraform, GCP Cloud Run, AWS Lambda, AWS ECS +- No CRD or applications' manifest changes are required; Only need a pipeline definition along with your application manifests +- No deployment credentials are exposed or required outside the application cluster +- Built-in deployment analysis as part of the deployment pipeline to measure impact based on metrics, logs, emitted requests +- Easy to interact with any CI; The CI tests and builds artifacts, PipeCD takes the rest +- Insights show metrics like lead time, deployment frequency, MTTR and change failure rate to measure delivery performance +- Designed to manage thousands of cross-platform applications in multi-cloud for company scale but also work well for small projects + +## PipeCD's Characteristics in detail + +**Visibility** +- Deployment pipeline UI shows clarify what is happening +- Separate logs viewer for each individual deployment +- Realtime visualization of application state +- Deployment notifications to slack, webhook endpoints +- Insights show metrics like lead time, deployment frequency, MTTR and change failure rate to measure delivery performance + +**Automation** +- Automated deployment analysis to measure deployment impact based on metrics, logs, emitted requests +- Automatically roll back to the previous state as soon as analysis or a pipeline stage fails +- Automatically detect configuration drift to notify and render the changes +- Automatically trigger a new deployment when a defined event has occurred (e.g. container image pushed, helm chart published, etc) + +**Safety and Security** +- Support single sign-on and role-based access control +- Credentials are not exposed outside the cluster and not saved in the Control Plane +- Piped makes only outbound requests and can run inside a restricted network +- Built-in secrets management + +**Multi-provider & Multi-Tenancy** +- Support multiple application kinds on multi-cloud including Kubernetes, Terraform, Cloud Run, AWS Lambda, Amazon ECS +- Support multiple analysis providers including Prometheus, Datadog, Stackdriver, and more +- Easy to operate multi-cluster, multi-tenancy by separating Control Plane and Piped + +**Open Source** + +- Released as an Open Source project +- Under APACHE 2.0 license, see [LICENSE](https://github.com/pipe-cd/pipecd/blob/master/LICENSE) + +## Where should I go next? + +For a good understanding of the PipeCD's components. +- [Concepts](../concepts): describes each components. +- [FAQ](../faq): describes the difference between PipeCD and other tools. + +If you are an **operator** wanting to install and configure PipeCD for other developers. +- [Quickstart](../quickstart/) +- [Managing Control Plane](../user-guide/managing-controlplane/) +- [Managing Piped](../user-guide/managing-piped/) + +If you are a **user** using PipeCD to deploy your application/infrastructure: +- [User Guide](../user-guide/) +- [Examples](../user-guide/examples) + +If you want to be a **contributor**: +- [Contributor Guide](../contribution-guidelines/) diff --git a/docs/content/en/docs-v0.50.x/quickstart/_index.md b/docs/content/en/docs-v0.50.x/quickstart/_index.md new file mode 100644 index 0000000000..a19b1b3b38 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/quickstart/_index.md @@ -0,0 +1,120 @@ +--- +title: "Quickstart" +linkTitle: "Quickstart" +weight: 3 +description: > + This page describes how to quickly get started with PipeCD on Kubernetes. +--- + +PipeCD is constructed by two components: the Control plane and the piped (agent) (ref: [PipeCD concepts](../concepts/)). The control plane can be thought of as a regular web service application that can be installed anywhere, while the piped agent is a single binary that can run as a pod in a Kubernetes cluster, a container on ECS, a serverless function like Lambda, Cloud Run, or a process running directly on your local machine. + +This page is a guideline for installing PipeCD (both two components) into your Kubernetes cluster and deploying a "hello world" application to that same Kubernetes cluster. + +Note: + +- It's not required to install the PipeCD control plane to the cluster where your applications are running. Please read this [blog post](/blog/2021/12/29/pipecd-best-practice-01-operate-your-own-pipecd-cluster/) to understand more about PipeCD in real life use cases. +- If you want to experiment with PipeCD freely or don't have a Kubernetes cluster, we recommend [this Tutorial](https://github.com/pipe-cd/tutorial). + +### Prerequisites +- Having a Kubernetes cluster and connect to it via [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/). + +### 1. Installing PipeCD in quickstart mode + +Across the [PipeCD concepts](/docs/concepts/), PipeCD platform is constructed by 2 components: Control Plane and Piped (the agent). + +#### 1.1. Installing PipeCD Control Plane + +```console +$ kubectl create namespace pipecd +$ kubectl apply -n pipecd -f https://raw.githubusercontent.com/pipe-cd/pipecd/master/quickstart/manifests/control-plane.yaml +``` + +The PipeCD control plane will be installed with a default project named `quickstart`. To access the PipeCD Control Plane UI, run the following command + +```console +$ kubectl port-forward -n pipecd svc/pipecd 8080 +``` + +You can access the PipeCD console at [http://localhost:8080?project=quickstart](http://localhost:8080?project=quickstart) + +To login, you can use the configured static admin account as below: +- username: `hello-pipecd` +- password: `hello-pipecd` + +And you will access the main page of PipeCD Control Plane console, which looks like this + +![](/images/pipecd-control-plane-mainpage.png) + +For more about PipeCD control plane management, please check [Managing ControlPlane](/docs/user-guide/managing-controlplane/). + +#### 1.2. Installing Piped + +Next, in order to perform CD tasks, you need to install a Piped agent to the cluster. + +From your logged in tab, navigate to the PipeCD setting page at [http://localhost:8080/settings/piped?project=quickstart](http://localhost:8080/settings/piped?project=quickstart). + +You will find the `+ADD` button around the top left of this page, click there and insert information to register the `piped`. + +![](/images/quickstart-adding-piped.png) + +Click on the `Save` button, and then you can see the piped-id and secret-key. + +![](/images/quickstart-piped-registered.png) + +You need to copy two values, `Piped Id` and `Base64 Encoded Piped Key`, and fill in `` and `` respectively this below command + +```console +$ curl -s https://raw.githubusercontent.com/pipe-cd/pipecd/master/quickstart/manifests/piped.yaml | \ + sed -e 's///g' \ + -e 's///g' | \ + kubectl apply -n pipecd -f - +``` + +For more about Piped management, please check [Managing Piped](/docs/user-guide/managing-piped/). + +That's all! You are ready to use PipeCD to manage your application's deployment. + +You can check the readiness of all PipeCD components via command + +```console +$ kubectl get pod -n pipecd +NAME READY STATUS RESTARTS AGE +pipecd-cache-56c7c65ddc-xqcst 1/1 Running 0 38m +pipecd-gateway-58589b55f9-9nbrv 1/1 Running 0 38m +pipecd-minio-677999d5bb-xnb78 1/1 Running 0 38m +pipecd-mysql-6fff49fbc7-hkvt4 1/1 Running 0 38m +pipecd-ops-779d6844db-nvbwn 1/1 Running 0 38m +pipecd-server-5769df7fcb-9hc45 1/1 Running 1 (38m ago) 38m +piped-8477b5d55d-74s5v 1/1 Running 0 97s +``` + +### 2. Deploy a Kubernetes application with PipeCD + +Above is all that is necessary to set up your own PipeCD (both control plane and agent), let's use the installed one to deploy your first Kubernetes application with PipeCD. + +Navigate to the `Applications` page, click on the `+ADD` button on the top left corner. + +Go to the `ADD FROM SUGGESTIONS` tab, then select: +- Piped: `dev` (you just registered) +- PlatformProvider: `kubernetes-default` + +You should see a lot of suggested applications. Select one of listed applications and click the `SAVE` button to register. + +![](/images/quickstart-adding-application-from-suggestions.png) + +After a bit, the first deployment is complete and will automatically sync the application to the state specified in the current Git commit. + +![](/images/quickstart-first-deployment.png) + +For more about manage applications' deployment with PipeCD, referrence to [Managing application](/docs/user-guide/managing-application/) + +### 3. Cleanup +When you’re finished experimenting with PipeCD quickstart mode, you can uninstall it using: + +``` console +$ kubectl delete ns pipecd +``` + +### What's next? + +To prepare your PipeCD for a production environment, please visit the [Installation](../installation/) guideline. For guidelines to use PipeCD to deploy your application in daily usage, please visit the [User guide](../user-guide/) docs. diff --git a/docs/content/en/docs-v0.50.x/releases/_index.md b/docs/content/en/docs-v0.50.x/releases/_index.md new file mode 100644 index 0000000000..25ac023e4b --- /dev/null +++ b/docs/content/en/docs-v0.50.x/releases/_index.md @@ -0,0 +1,6 @@ +--- +title: "Releases ⧉" +manualLink: "https://github.com/pipe-cd/pipecd/releases" +manualLinkTarget: "_blank" +weight: 99 +--- \ No newline at end of file diff --git a/docs/content/en/docs-v0.50.x/user-guide/_index.md b/docs/content/en/docs-v0.50.x/user-guide/_index.md new file mode 100755 index 0000000000..5482b97115 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/_index.md @@ -0,0 +1,9 @@ +--- +title: "User Guide" +linkTitle: "User Guide" +weight: 5 +description: > + Guideline to use PipeCD, from installation to common features for daily usage. +--- + + diff --git a/docs/content/en/docs-v0.50.x/user-guide/command-line-tool.md b/docs/content/en/docs-v0.50.x/user-guide/command-line-tool.md new file mode 100644 index 0000000000..5133fbc0de --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/command-line-tool.md @@ -0,0 +1,394 @@ +--- +title: "Command-line tool: pipectl" +linkTitle: "Command-line tool: pipectl" +weight: 9 +description: > + This page describes how to install and use pipectl to manage PipeCD's resources. +--- + +Besides using web UI, PipeCD also provides a command-line tool, pipectl, which allows you to run commands against your project's resources. +You can use pipectl to add and sync applications, wait for a deployment status. + +## Installation + +The Pipectl command-line tool can be installed using one of the following methods: + +### Directly download and set up binary + +1. Download the appropriate version for your platform from [PipeCD Releases](https://github.com/pipe-cd/pipecd/releases). + + We recommend using the latest version of pipectl to avoid unforeseen issues. + Run the following script: + + ``` console + # OS="darwin" or "linux" + curl -Lo ./pipectl https://github.com/pipe-cd/pipecd/releases/download/{{< blocks/latest_version >}}/pipectl_{{< blocks/latest_version >}}_{OS}_amd64 + ``` + +2. Make the pipectl binary executable. + + ``` console + chmod +x ./pipectl + ``` + +3. Move the binary to your PATH. + + ``` console + sudo mv ./pipectl /usr/local/bin/pipectl + ``` + +4. Test to ensure the version you installed is up-to-date. + + ``` console + pipectl version + ``` + +### Using Asdf + +About [Asdf](https://asdf-vm.com/) + +1. Add pipectl plugin to asdf. (If you have not yet `asdf add plugin add pipectl`.) + ```console + asdf plugin add pipectl + ``` + +2. Install pipectl. Available versions are [here](https://github.com/pipe-cd/pipecd/releases). + ```console + asdf install pipectl {VERSION} + ``` + +3. Set a version. + ```console + asdf global pipectl {VERSION} + ``` + +4. Test to ensure the version you installed is up-to-date. + + ``` console + pipectl version + ``` + +### Using Aqua + +About [Aqua](https://aquaproj.github.io/) + +1. Add pipectl to `aqua.yaml`. (If you want to select a version, use `aqua g -i -s pipe-cd/pipecd/pipectl`) + ```console + aqua g -i pipe-cd/pipecd/pipectl + ``` + +2. Install pipectl. + ```console + aqua i + ``` + +3. Test to ensure the version you installed is up-to-date. + ```console + pipectl version + ``` + +### Using Homebrew + +About [Homebrew](https://brew.sh/) + +1. Add the `pipe-cd/tap` and fetch new formulae from GitHub. + ```console + brew tap pipe-cd/tap + brew update + ``` + +2. Install pipectl. + ```console + brew install pipectl + ``` + +3. Test to ensure the version you installed is up-to-date. + ```console + pipectl version + ``` + +### Run in Docker container + +We are storing every version of docker image for pipectl on GitHub Container Registry. +Available versions are [here](https://github.com/pipe-cd/pipecd/releases). + +``` +docker run --rm ghcr.io/pipe-cd/pipectl:{VERSION} -h +``` + +## Authentication + +In order for pipectl to authenticate with PipeCD's Control Plane, it needs an API key, which can be created from `Settings/API Key` tab on the web UI. +There are two kinds of key role: `READ_ONLY` and `READ_WRITE`. Depending on the command, it might require an appropriate role to execute. + +![](/images/settings-api-key.png) +

+Adding a new API key from Settings tab +

+ +When executing a command of pipectl you have to specify either a string of API key via `--api-key` flag or a path to the API key file via `--api-key-file` flag. + +## Usage + +### Help + +Run `help` to know the available commands: + +``` console +$ pipectl --help + +The command line tool for PipeCD. + +Usage: + pipectl [command] + +Available Commands: + application Manage application resources. + deployment Manage deployment resources. + encrypt Encrypt the plaintext entered in either stdin or the --input-file flag. + event Manage event resources. + help Help about any command + init Generate an application config (app.pipecd.yaml) easily and interactively. + piped Manage piped resources. + plan-preview Show plan preview against the specified commit. + version Print the information of current binary. + +Flags: + -h, --help help for pipectl + --log-encoding string The encoding type for logger [json|console|humanize]. (default "humanize") + --log-level string The minimum enabled logging level. (default "info") + --metrics Whether metrics is enabled or not. (default true) + --profile If true enables uploading the profiles to Stackdriver. + --profile-debug-logging If true enables logging debug information of profiler. + --profiler-credentials-file string The path to the credentials file using while sending profiles to Stackdriver. + +Use "pipectl [command] --help" for more information about a command. +``` + +### Adding a new application + +Add a new application into the project: + +``` console +pipectl application add \ + --address=CONTROL_PLANE_API_ADDRESS \ + --api-key=API_KEY \ + --app-name=simple \ + --app-kind=KUBERNETES \ + --piped-id=PIPED_ID \ + --platform-provider=kubernetes-default \ + --repo-id=examples \ + --app-dir=kubernetes/simple +``` + +Run `help` to know what command flags should be specified: + +``` console +$ pipectl application add --help + +Add a new application. + +Usage: + pipectl application add [flags] + +Flags: + --app-dir string The relative path from the root of repository to the application directory. + --app-kind string The kind of application. (KUBERNETES|TERRAFORM|LAMBDA|CLOUDRUN) + --app-name string The application name. + --platform-provider string The platform provider name. One of the registered providers in the piped configuration. The previous name of this field is cloud-provider. + --config-file-name string The configuration file name. (default "app.pipecd.yaml") + --description string The description of the application. + -h, --help help for add + --piped-id string The ID of piped that should handle this application. + --repo-id string The repository ID. One the registered repositories in the piped configuration. + +Global Flags: + --address string The address to Control Plane api. + --api-key string The API key used while authenticating with Control Plane. + --api-key-file string Path to the file containing API key used while authenticating with Control Plane. + --cert-file string The path to the TLS certificate file. + --insecure Whether disabling transport security while connecting to Control Plane. + --log-encoding string The encoding type for logger [json|console|humanize]. (default "humanize") + --log-level string The minimum enabled logging level. (default "info") + --metrics Whether metrics is enabled or not. (default true) + --profile If true enables uploading the profiles to Stackdriver. + --profile-debug-logging If true enables logging debug information of profiler. + --profiler-credentials-file string The path to the credentials file using while sending profiles to Stackdriver. +``` + +### Syncing an application + +- Send a request to sync an application and exit immediately when the deployment is triggered: + + ``` console + pipectl application sync \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --app-id={APPLICATION_ID} + ``` + +- Send a request to sync an application and wait until the triggered deployment reaches one of the specified statuses: + + ``` console + pipectl application sync \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --app-id={APPLICATION_ID} \ + --wait-status=DEPLOYMENT_SUCCESS,DEPLOYMENT_FAILURE + ``` + +### Getting an application + +Display the information of a given application in JSON format: + +``` console +pipectl application get \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --app-id={APPLICATION_ID} +``` + +### Listing applications + +Find and display the information of matching applications in JSON format: + +``` console +pipectl application list \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --app-name={APPLICATION_NAME} \ + --app-kind=KUBERNETES \ +``` + +### Disable an application + +Disable an application with given id: + +``` console +pipectl application disable \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --app-id={APPLICATION_ID} +``` + +### Deleting an application + +Delete an application with given id: + +``` console +pipectl application delete \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --app-id={APPLICATION_ID} +``` + +### List deployments + +Show the list of deployments based on filters. + +```console +pipectl deployment list \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --app-id={APPLICATION_ID} +``` + +### Waiting a deployment status + +Wait until a given deployment reaches one of the specified statuses: + +``` console +pipectl deployment wait-status \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --deployment-id={DEPLOYMENT_ID} \ + --status=DEPLOYMENT_SUCCESS +``` + +### Get deployment stages log + +Get deployment stages log. + +```console +pipectl deployment logs \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --deployment-id={DEPLOYMENT_ID} +``` + +### Registering an event for EventWatcher + +Register an event that can be used by EventWatcher: + +``` console +pipectl event register \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --name=example-image-pushed \ + --data=gcr.io/pipecd/example:v0.1.0 +``` + +See more on [usage of Event Watcher](./event-watcher.md). + +### Encrypting the data you want to use when deploying + +Encrypt the plaintext entered either in stdin or via the `--input-file` flag. + +You can encrypt it the same way you do [from the web](../managing-application/secret-management/#encrypting-secret-data). + +- From stdin: + + ``` console + pipectl encrypt \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --piped-id={PIPED_ID} <{PATH_TO_SECRET_FILE} + ``` + +- From the `--input-file` flag: + + ``` console + pipectl encrypt \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --piped-id={PIPED_ID} \ + --input-file={PATH_TO_SECRET_FILE} + ``` + +Note: The docs for pipectl available command is maybe outdated, we suggest users use the `help` command for the updated usage while using pipectl. + +### Generating an application config (app.pipecd.yaml) + + +Generate an app.pipecd.yaml interactively: + +``` console +$ pipectl init +Which platform? Enter the number [0]Kubernetes [1]ECS: 1 +Name of the application: myApp +... +``` + +After the above interaction, you can get the config YAML: + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: ECSApp +spec: + name: myApp + input: + serviceDefinitionFile: serviceDef.yaml + taskDefinitionFile: taskDef.yaml + targetGroups: + primary: + targetGroupArn: arn:aws:elasticloadbalancing:ap-northeast-1:123456789012:targetgroup/xxx/xxx + containerName: web + containerPort: 80 + description: Generated by `pipectl init`. See https://pipecd.dev/docs/user-guide/configuration-reference/ for more. +``` + +See [Feature Status](../feature-status/_index.md#pipectl-init). + +### You want more? + +We always want to add more needed commands into pipectl. Please let us know what command you want to add by creating issues in the [pipe-cd/pipecd](https://github.com/pipe-cd/pipecd/issues) repository. We also welcome your pull request to add the command. diff --git a/docs/content/en/docs-v0.50.x/user-guide/configuration-reference.md b/docs/content/en/docs-v0.50.x/user-guide/configuration-reference.md new file mode 100644 index 0000000000..f2fa64d848 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/configuration-reference.md @@ -0,0 +1,839 @@ +--- +title: "Configuration reference" +linkTitle: "Configuration reference" +weight: 11 +description: > + This page describes all configurable fields in the application configuration and analysis template. +--- + +## Kubernetes Application + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + pipeline: + ... +``` + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The application name. | Yes (if you want to create PipeCD application through the application configuration file) | +| labels | map[string]string | Additional attributes to identify applications. | No | +| description | string | Notes on the Application. | No | +| input | [KubernetesDeploymentInput](#kubernetesdeploymentinput) | Input for Kubernetes deployment such as kubectl version, helm version, manifests filter... | No | +| trigger | [DeploymentTrigger](#deploymenttrigger) | Configuration for trigger used to determine should we trigger a new deployment or not. | No | +| planner | [DeploymentPlanner](#deploymentplanner) | Configuration for planner used while planning deployment. | No | +| commitMatcher | [CommitMatcher](#commitmatcher) | Forcibly use QuickSync or Pipeline when commit message matched the specified pattern. | No | +| quickSync | [KubernetesQuickSync](#kubernetesquicksync) | Configuration for quick sync. | No | +| pipeline | [Pipeline](#pipeline) | Pipeline for deploying progressively. | No | +| service | [KubernetesService](#kubernetesservice) | Which Kubernetes resource should be considered as the Service of application. Empty means the first Service resource will be used. | No | +| workloads | [][KubernetesWorkload](#kubernetesworkload) | Which Kubernetes resources should be considered as the Workloads of application. Empty means all Deployment resources. | No | +| trafficRouting | [KubernetesTrafficRouting](#kubernetestrafficrouting) | How to change traffic routing percentages. | No | +| encryption | [SecretEncryption](#secretencryption) | List of encrypted secrets and targets that should be decrypted before using. | No | +| attachment | [Attachment](#attachment) | List of attachment sources and targets that should be attached to manifests before using. | No | +| timeout | duration | The maximum length of time to execute deployment before giving up. Default is 6h. | No | +| notification | [DeploymentNotification](#deploymentnotification) | Additional configuration used while sending notification to external services. | No | +| postSync | [PostSync](#postsync) | Additional configuration used as extra actions once the deployment is triggered. | No | +| variantLabel | [KubernetesVariantLabel](#kubernetesvariantlabel) | The label will be configured to variant manifests used to distinguish them. | No | +| eventWatcher | [][EventWatcher](#eventwatcher) | List of configurations for event watcher. | No | +| driftDetection | [DriftDetection](#driftdetection) | Configuration for drift detection. | No | + +### Annotations + +Kubernetes resources can be managed by some annotations provided by PipeCD. + +| Annotation key | Target resource(s) | Possible values | Description | +|-|-|-|-| +| `pipecd.dev/ignore-drift-detection` | any | "true" | Whether the drift detection should ignore this resource. | +| `pipecd.dev/server-side-apply` | any | "true" | Use server side apply instead of client side apply. | +| `pipecd.dev/sync-by-replace` | any | "enabled" | Use `replace` instead of `apply`. | +| `pipecd.dev/force-sync-by-replace` | any | "enabled" | Use `replace --force` instead of `apply`. | + +## Terraform application + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: TerraformApp +spec: + input: + pipeline: + ... +``` + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The application name. | Yes if you set the application through the application configuration file | +| labels | map[string]string | Additional attributes to identify applications. | No | +| description | string | Notes on the Application. | No | +| input | [TerraformDeploymentInput](#terraformdeploymentinput) | Input for Terraform deployment such as terraform version, workspace... | No | +| trigger | [DeploymentTrigger](#deploymenttrigger) | Configuration for trigger used to determine should we trigger a new deployment or not. | No | +| planner | [DeploymentPlanner](#deploymentplanner) | Configuration for planner used while planning deployment. | No | +| quickSync | [TerraformQuickSync](#terraformquicksync) | Configuration for quick sync. | No | +| pipeline | [Pipeline](#pipeline) | Pipeline for deploying progressively. | No | +| encryption | [SecretEncryption](#secretencryption) | List of encrypted secrets and targets that should be decrypted before using. | No | +| attachment | [Attachment](#attachment) | List of attachment sources and targets that should be attached to manifests before using. | No | +| timeout | duration | The maximum length of time to execute deployment before giving up. Default is 6h. | No | +| notification | [DeploymentNotification](#deploymentnotification) | Additional configuration used while sending notification to external services. | No | +| postSync | [PostSync](#postsync) | Additional configuration used as extra actions once the deployment is triggered. | No | +| eventWatcher | [][EventWatcher](#eventwatcher) | List of configurations for event watcher. | No | + +## Cloud Run application + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: CloudRunApp +spec: + input: + pipeline: + ... +``` + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The application name. | Yes if you set the application through the application configuration file | +| labels | map[string]string | Additional attributes to identify applications. | No | +| description | string | Notes on the Application. | No | +| input | [CloudRunDeploymentInput](#cloudrundeploymentinput) | Input for Cloud Run deployment such as docker image... | No | +| trigger | [DeploymentTrigger](#deploymenttrigger) | Configuration for trigger used to determine should we trigger a new deployment or not. | No | +| planner | [DeploymentPlanner](#deploymentplanner) | Configuration for planner used while planning deployment. | No | +| quickSync | [CloudRunQuickSync](#cloudrunquicksync) | Configuration for quick sync. | No | +| pipeline | [Pipeline](#pipeline) | Pipeline for deploying progressively. | No | +| encryption | [SecretEncryption](#secretencryption) | List of encrypted secrets and targets that should be decrypted before using. | No | +| attachment | [Attachment](#attachment) | List of attachment sources and targets that should be attached to manifests before using. | No | +| timeout | duration | The maximum length of time to execute deployment before giving up. Default is 6h. | No | +| notification | [DeploymentNotification](#deploymentnotification) | Additional configuration used while sending notification to external services. | No | +| postSync | [PostSync](#postsync) | Additional configuration used as extra actions once the deployment is triggered. | No | +| eventWatcher | [][EventWatcher](#eventwatcher) | List of configurations for event watcher. | No | + +## Lambda application + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: LambdaApp +spec: + pipeline: + ... +``` + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The application name. | Yes if you set the application through the application configuration file | +| labels | map[string]string | Additional attributes to identify applications. | No | +| description | string | Notes on the Application. | No | +| input | [LambdaDeploymentInput](#lambdadeploymentinput) | Input for Lambda deployment such as path to function manifest file... | No | +| architectures | []string| Specific architecture for which a function supports (Default x86_64). | No | +| trigger | [DeploymentTrigger](#deploymenttrigger) | Configuration for trigger used to determine should we trigger a new deployment or not. | No | +| planner | [DeploymentPlanner](#deploymentplanner) | Configuration for planner used while planning deployment. | No | +| quickSync | [LambdaQuickSync](#lambdaquicksync) | Configuration for quick sync. | No | +| pipeline | [Pipeline](#pipeline) | Pipeline for deploying progressively. | No | +| encryption | [SecretEncryption](#secretencryption) | List of encrypted secrets and targets that should be decrypted before using. | No | +| attachment | [Attachment](#attachment) | List of attachment sources and targets that should be attached to manifests before using. | No | +| timeout | duration | The maximum length of time to execute deployment before giving up. Default is 6h. | No | +| notification | [DeploymentNotification](#deploymentnotification) | Additional configuration used while sending notification to external services. | No | +| postSync | [PostSync](#postsync) | Additional configuration used as extra actions once the deployment is triggered. | No | +| eventWatcher | [][EventWatcher](#eventwatcher) | List of configurations for event watcher. | No | + +## ECS application + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: ECSApp +spec: + input: + pipeline: + ... +``` + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The application name. | Yes if you set the application through the application configuration file | +| labels | map[string]string | Additional attributes to identify applications. | No | +| description | string | Notes on the Application. | No | +| input | [ECSDeploymentInput](#ecsdeploymentinput) | Input for ECS deployment such as path to TaskDefinition, Service... | No | +| trigger | [DeploymentTrigger](#deploymenttrigger) | Configuration for trigger used to determine should we trigger a new deployment or not. | No | +| planner | [DeploymentPlanner](#deploymentplanner) | Configuration for planner used while planning deployment. | No | +| quickSync | [ECSQuickSync](#ecsquicksync) | Configuration for quick sync. | No | +| pipeline | [Pipeline](#pipeline) | Pipeline for deploying progressively. | No | +| encryption | [SecretEncryption](#secretencryption) | List of encrypted secrets and targets that should be decrypted before using. | No | +| attachment | [Attachment](#attachment) | List of attachment sources and targets that should be attached to manifests before using. | No | +| timeout | duration | The maximum length of time to execute deployment before giving up. Default is 6h. | No | +| notification | [DeploymentNotification](#deploymentnotification) | Additional configuration used while sending notification to external services. | No | +| postSync | [PostSync](#postsync) | Additional configuration used as extra actions once the deployment is triggered. | No | +| eventWatcher | [][EventWatcher](#eventwatcher) | List of configurations for event watcher. | No | + +## Analysis Template Configuration + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: AnalysisTemplate +spec: + metrics: + grpc_error_rate_percentage: + interval: 1m + provider: prometheus-dev + failureLimit: 1 + expected: + max: 10 + query: awesome_query +``` + +| Field | Type | Description | Required | +|-|-|-|-| +| metrics | map[string][AnalysisMetrics](#analysismetrics) | Template for metrics. | No | + +## Event Watcher Configuration (deprecated) + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: EventWatcher +spec: + events: + - name: helloworld-image-update + replacements: + - file: helloworld/deployment.yaml + yamlField: $.spec.template.spec.containers[0].image +``` + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The event name. | Yes | +| labels | map[string]string | Additional attributes of event. This can make an event definition unique even if the one with the same name exists. | No | +| replacements | [][EventWatcherReplacement](#eventwatcherreplacement) | List of places where will be replaced when the new event matches. | Yes | + +### EventWatcherReplacement +One of `yamlField` or `regex` is required. + +| Field | Type | Description | Required | +|-|-|-|-| +| file | string | The relative path from the repository root to the file to be updated. | Yes | +| yamlField | string | The yaml path to the field to be updated. It requires to start with `$` which represents the root element. e.g. `$.foo.bar[0].baz`. | No | +| regex | string | The regex string that specify what should be replaced. The only first capturing group enclosed by `()` will be replaced with the new value. e.g. `host.xz/foo/bar:(v[0-9].[0-9].[0-9])` | No | + +## CommitMatcher + +| Field | Type | Description | Required | +|-|-|-|-| +| quickSync | string | Regular expression string to forcibly do QuickSync when it matches the commit message. | No | +| pipeline | string | Regular expression string to forcibly do Pipeline when it matches the commit message. | No | + +## SecretEncryption + +| Field | Type | Description | Required | +|-|-|-|-| +| encryptedSecrets | map[string]string | List of encrypted secrets. | No | +| decryptionTargets | []string | List of files to be decrypted before using. | No | + +## Attachment + +| Field | Type | Description | Required | +|-|-|-|-| +| sources | map[string]string | List of attaching files with key is its refer name. | No | +| targets | []string | List of files which should contain the attachments. | No | + +## DeploymentPlanner + +| Field | Type | Description | Required | +|-|-|-|-| +| alwaysUsePipeline | bool | Always use the defined pipeline to deploy the application in all deployments. Default is `false`. | No | + +## DeploymentTrigger + +| Field | Type | Description | Required | +|-|-|-|-| +| onCommit | [OnCommit](#oncommit) | Controls triggering new deployment when new Git commits touched the application. | No | +| onCommand | [OnCommand](#oncommand) | Controls triggering new deployment when received a new `SYNC` command. | No | +| onOutOfSync | [OnOutOfSync](#onoutofsync) | Controls triggering new deployment when application is at `OUT_OF_SYNC` state. | No | +| onChain | [OnChain](#onchain) | Controls triggering new deployment when the application is counted as a node of some chains. | No | + +### OnCommit + +| Field | Type | Description | Required | +|-|-|-|-| +| disabled | bool | Whether to exclude application from triggering target when new Git commits touched it. Default is `false`. | No | +| paths | []string | List of directories or files where any changes of them will be considered as touching the application. Regular expression can be used. Empty means watching all changes under the application directory. | No | +| ignores | []string | List of directories or files where any changes of them will NOT be considered as touching the application. Regular expression can be used. This config has a higher priority compare to `paths`. | No | + +### OnCommand + +| Field | Type | Description | Required | +|-|-|-|-| +| disabled | bool | Whether to exclude application from triggering target when received a new `SYNC` command. Default is `false`. | No | + +### OnOutOfSync + +| Field | Type | Description | Required | +|-|-|-|-| +| disabled | bool | Whether to exclude application from triggering target when application is at `OUT_OF_SYNC` state. Default is `true`. | No | +| minWindow | duration | Minimum amount of time must be elapsed since the last deployment. This can be used to avoid triggering unnecessary continuous deployments based on `OUT_OF_SYNC` status. Default is `5m`. | No | + +### OnChain + +| Field | Type | Description | Required | +|-|-|-|-| +| disabled | bool | Whether to exclude application from triggering target when application is counted as a node of some chains. Default is `true`. | No | + +## Pipeline + +| Field | Type | Description | Required | +|-|-|-|-| +| stages | [][PipelineStage](#pipelinestage) | List of deployment pipeline stages. | No | + +### PipelineStage + +| Field | Type | Description | Required | +|-|-|-|-| +| id | string | The unique ID of the stage. | No | +| name | string | One of the provided stage names. | Yes | +| desc | string | The description about the stage. | No | +| timeout | duration | The maximum time the stage can be taken to run. | No | +| with | [StageOptions](#stageoptions) | Specific configuration for the stage. This must be one of these [StageOptions](#stageoptions). | No | + +## DeploymentNotification + +| Field | Type | Description | Required | +|-|-|-|-| +| mentions | [][NotificationMention](#notificationmention) | List of users to be notified for each event. | No | + +### NotificationMention + +| Field | Type | Description | Required | +|-|-|-|-| +| event | string | The event to be notified to users. | Yes | +| slack | []string | Deprecated: Please use `slackUsers` instead. List of user IDs for mentioning in Slack. See [here](https://api.slack.com/reference/surfaces/formatting#mentioning-users) for more information on how to check them. | No | +| slackUsers | []string | List of user IDs for mentioning in Slack. See [here](https://api.slack.com/reference/surfaces/formatting#mentioning-users) for more information on how to check them. | No | +| slackGroups | []string | List of group IDs for mentioning in Slack. See [here](https://api.slack.com/reference/surfaces/formatting#mentioning-groups) for more information on how to check them. | No | + +## KubernetesDeploymentInput + +| Field | Type | Description | Required | +|-|-|-|-| +| manifests | []string | List of manifest files in the application directory used to deploy. Empty means all manifest files in the directory will be used. | No | +| kubectlVersion | string | Version of kubectl will be used. Empty means the version set on [piped config](../managing-piped/configuration-reference/#platformproviderkubernetesconfig) or [default version](https://github.com/pipe-cd/pipecd/blob/master/pkg/app/piped/toolregistry/install.go#L29) will be used. | No | +| kustomizeVersion | string | Version of kustomize will be used. Empty means the [default version](https://github.com/pipe-cd/pipecd/blob/master/pkg/app/piped/toolregistry/install.go#L30) will be used. | No | +| kustomizeOptions | map[string]string | List of options that should be used by Kustomize commands. | No | +| helmVersion | string | Version of helm will be used. Empty means the [default version](https://github.com/pipe-cd/pipecd/blob/master/pkg/app/piped/toolregistry/install.go#L31) will be used. | No | +| helmChart | [HelmChart](#helmchart) | Where to fetch helm chart. | No | +| helmOptions | [HelmOptions](#helmoptions) | Configurable parameters for helm commands. | No | +| namespace | string | The namespace where manifests will be applied. | No | +| autoRollback | bool | Automatically reverts all deployment changes on failure. Default is `true`. | No | +| autoCreateNamespace | bool | Automatically create a new namespace if it does not exist. Default is `false`. | No | + +### HelmChart + +| Field | Type | Description | Required | +|-|-|-|-| +| gitRemote | string | Git remote address where the chart is placing. Empty means the same repository. | No | +| ref | string | The commit SHA or tag value. Only valid when gitRemote is not empty. | No | +| path | string | Relative path from the repository root to the chart directory. | No | +| repository | string | The name of a registered Helm Chart Repository. | No | +| name | string | The chart name. | No | +| version | string | The chart version. | No | + +### HelmOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| releaseName | string | The release name of helm deployment. By default, the release name is equal to the application name. | No | +| setValues | map[string]string | List of values. | No | +| valueFiles | []string | List of value files should be loaded. Only local files stored under the application directory or remote files served at the http(s) endpoint are allowed. | No | +| setFiles | map[string]string | List of file path for values. | No | +| apiVersions | []string | Kubernetes api versions used for Capabilities.APIVersions. | No | +| kubeVersion | string | Kubernetes version used for Capabilities.KubeVersion. | No | + +## KubernetesVariantLabel + +| Field | Type | Description | Required | +|-|-|-|-| +| key | string | The key of the label. Default is `pipecd.dev/variant`. | No | +| primaryValue | string | The label value for PRIMARY variant. Default is `primary`. | No | +| canaryValue | string | The label value for CANARY variant. Default is `canary`. | No | +| baselineValue | string | The label value for BASELINE variant. Default is `baseline`. | No | + +## KubernetesQuickSync + +| Field | Type | Description | Required | +|-|-|-|-| +| addVariantLabelToSelector | bool | Whether the PRIMARY variant label should be added to manifests if they were missing. Default is `false`. | No | +| prune | bool | Whether the resources that are no longer defined in Git should be removed or not. Default is `false` | No | + +## KubernetesService + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The name of Service manifest. | No | + +## KubernetesWorkload + +| Field | Type | Description | Required | +|-|-|-|-| +| kind | string | The kind name of workload manifests. Currently, only `Deployment` is supported. In the future, we also want to support `ReplicationController`, `DaemonSet`, `StatefulSet`. | No | +| name | string | The name of workload manifest. | No | + +## KubernetesTrafficRouting + +| Field | Type | Description | Required | +|-|-|-|-| +| method | string | Which traffic routing method will be used. Available values are `istio`, `smi`, `podselector`. Default is `podselector`. | No | +| istio | [IstioTrafficRouting](#istiotrafficrouting)| Istio configuration when the method is `istio`. | No | + +### IstioTrafficRouting + +| Field | Type | Description | Required | +|-|-|-|-| +| editableRoutes | []string | List of routes in the VirtualService that can be changed to update traffic routing. Empty means all routes should be updated. | No | +| host | string | The service host. | No | +| virtualService | [IstioVirtualService](#istiovirtualservice) | The reference to VirtualService manifest. Empty means the first VirtualService resource will be used. | No | + +#### IstioVirtualService + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The name of VirtualService manifest. | No | + +## TerraformDeploymentInput + +| Field | Type | Description | Required | +|-|-|-|-| +| workspace | string | The terraform workspace name. Empty means `default` workspace. | No | +| terraformVersion | string | The version of terraform should be used. Empty means the pre-installed version will be used. | No | +| vars | []string | List of variables that will be set directly on terraform commands with `-var` flag. The variable must be formatted by `key=value`. | No | +| varFiles | []string | List of variable files that will be set on terraform commands with `-var-file` flag. | No | +| commandFlags | [TerraformCommandFlags](#terraformcommandflags) | List of additional flags will be used while executing terraform commands. | No | +| commandEnvs | [TerraformCommandEnvs](#terraformcommandenvs) | List of additional environment variables will be used while executing terraform commands. | No | +| autoRollback | bool | Automatically reverts all changes from all stages when one of them failed. | No | + +### TerraformCommandFlags + +| Field | Type | Description | Required | +|-|-|-|-| +| shared | []string | List of additional flags used for all Terraform commands. | No | +| init | []string | List of additional flags used for Terraform `init` command. | No | +| plan | []string | List of additional flags used for Terraform `plan` command. | No | +| apply | []string | List of additional flags used for Terraform `apply` command. | No | + +### TerraformCommandEnvs + +| Field | Type | Description | Required | +|-|-|-|-| +| shared | []string | List of additional environment variables used for all Terraform commands. | No | +| init | []string | List of additional environment variables used for Terraform `init` command. | No | +| plan | []string | List of additional environment variables used for Terraform `plan` command. | No | +| apply | []string | List of additional environment variables used for Terraform `apply` command. | No | + +## TerraformQuickSync + +| Field | Type | Description | Required | +|-|-|-|-| +| retries | int | How many times to retry applying terraform changes. Default is `0`. | No | + +## CloudRunDeploymentInput + +| Field | Type | Description | Required | +|-|-|-|-| +| serviceManifestFile | string | The name of service manifest file placing in application directory. Default is `service.yaml`. | No | +| autoRollback | bool | Automatically reverts to the previous state when the deployment is failed. Default is `true`. | No | + +## CloudRunQuickSync + +| Field | Type | Description | Required | +|-|-|-|-| + +## LambdaDeploymentInput + +| Field | Type | Description | Required | +|-|-|-|-| +| functionManifestFile | string | The name of function manifest file placing in application directory. Default is `function.yaml`. | No | +| autoRollback | bool | Automatically reverts to the previous state when the deployment is failed. Default is `true`. | No | + +### Specific function.yaml + +One of `image`, `s3Bucket`, or `source` is required. + +- If you use `s3Bucket`, `s3Key` and `s3ObjectVersion` are required. + +- If you use `s3Bucket` or `source`, `handler` and `runtime` are required. + +See [Configuring Lambda application](../managing-application/defining-app-configuration/lambda) for more details. + +| Field | Type | Description | Required | +|------------------|------------------|------------------------------------|----------| +| name | string | Name of the Lambda function | Yes | +| role | string | IAM role ARN | Yes | +| image | string | URI of the container image | No | +| s3Bucket | string | S3 bucket name for code package | No | +| s3Key | string | S3 key for code package | No | +| s3ObjectVersion | string | S3 object version for code package | No | +| source | [source](#source) | Git settings | No | +| handler | string | Lambda function handler | No | +| runtime | string | Runtime environment | No | +| architectures | [][Architecture](#architecture) | Supported architectures | No | +| ephemeralStorage | [EphemeralStorage](#ephemeralstorage)| Ephemeral storage configuration | No | +| memory | int32 | Memory allocation (in MB) | Yes | +| timeout | int32 | Function timeout (in seconds) | Yes | +| tags | map[string]string| Key-value pairs for tags | No | +| environments | map[string]string| Environment variables | No | +| vpcConfig | [VPCConfig](#vpcconfig) | VPC configuration | No | +| layers | []string | ARNs of [layers](https://docs.aws.amazon.com/lambda/latest/dg/chapter-layers.html) to depend on | No | + +#### Source + +| Field | Type | Description | Required | +|-------|--------|--------------------------|----------| +| git | string | Git repository URL | Yes | +| ref | string | Git branch/tag/reference| Yes | +| path | string | Path within the repository | Yes | + +#### Architecture + +| Field | Type | Description | Required | +|-------|--------|------------------------|----------| +| name | string | Name of the architecture | Yes | + +#### EphemeralStorage + +| Field | Type | Description | Required | +|-------|-------|------------------------------|----------| +| size | int32 | Size of the ephemeral storage| Yes | + +#### VPCConfig + +| Field | Type | Description | Required | +|-----------------|----------|-----------------------------|----------| +| securityGroupIds| []string | List of security group IDs | No | +| subnetIds | []string | List of subnet IDs | No | + + +## LambdaQuickSync + +| Field | Type | Description | Required | +|-|-|-|-| + +## ECSDeploymentInput + +| Field | Type | Description | Required | +|-|-|-|-| +| serviceDefinitionFile | string | The path ECS Service configuration file. Allow file in both `yaml` and `json` format. The default value is `service.json`. See [here](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service_definition_parameters.html) and [Restrictions](#restrictions-of-service-definition) for parameters.| No | +| taskDefinitionFile | string | The path to ECS TaskDefinition configuration file. Allow file in both `yaml` and `json` format. The default value is `taskdef.json`. See [here](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) and [Restrictions](#restrictions-of-task-definition) for parameters. | No | +| targetGroups | [ECSTargetGroupInput](#ecstargetgroupinput) | The target groups configuration, will be used to routing traffic to created task sets. | Yes (if you want to perform progressive delivery) | +| runStandaloneTask | bool | Run standalone tasks during deployments. About standalone task, see [here](https://docs.aws.amazon.com/AmazonECS/latest/userguide/ecs_run_task-v2.html). The default value is `true`. | +| accessType | string | How the ECS service is accessed. One of `ELB` or `SERVICE_DISCOVERY`. See examples [here](https://github.com/pipe-cd/examples/tree/master/ecs/servicediscovery/simple). The default value is `ELB`. | + +### Restrictions of Service Definition + +There are some restrictions in configuring a service definition file. + +- As long as `desiredCount` is 0 or not set, `desiredCount` of your service will NOT be updated in deployments. + - If `desiredCount` is 0 or not set for a new service, the service's `desiredCount` will be 0. +- `capacityProviderStrategy` is not supported. +- `clientToken` is not supported. +- `deploymentController` is required and must be `EXTERNAL`. +- `loadBalancers` is not supported. Use `targetGroups` in [ECSDeploymentInput](#ecsdeploymentinput) instead. +- `platformFamily` is not supported. +- `propagateTags` is always set as `SERVICE`. +- `taskDefinition` is not supported. PipeCD uses the definition in `taskDefinitionFile` in [ECSDeploymentInput](#ecsdeploymentinput). + +### Restrictions of Task Definition + +There are some restrictions in configuring a task definition file. + +- `placementConstraints` is not supported. +- `proxyConfiguration` is not supported. +- `tags` is not supported. + +### ECSTargetGroupInput + +| Field | Type | Description | Required | +|-|-|-|-| +| primary | [ECSTargetGroupObject](#ecstargetgroupobject) | The PRIMARY target group, will be used to register the PRIMARY ECS task set. | Yes | +| canary | [ECSTargetGroupObject](#ecstargetgroupobject) | The CANARY target group, will be used to register the CANARY ECS task set if exist. It's required to enable PipeCD to perform the multi-stage deployment. | No | + +#### ECSTargetGroupObject + +| Field | Type | Description | Required | +|-|-|-|-| +| targetGroupArn | string | The name of the container (as it appears in a container definition) to associate with the load balancer | Yes | +| containerName | string | The full Amazon Resource Name (ARN) of the Elastic Load Balancing target group or groups associated with a service or task set. | Yes | +| containerPort | int | The port on the container to associate with the load balancer. | Yes | +| LoadBalancerName | string | The name of the load balancer to associate with the Amazon ECS service or task set. | No | + +Note: The available values are identical to those found in the aws-sdk-go-v2 Types.LoadBalancer. For more details, please refer to [this link](https://pkg.go.dev/github.com/aws/aws-sdk-go-v2/service/ecs/types#LoadBalancer) . + +## ECSQuickSync + +| Field | Type | Description | Required | +|-|-|-|-| +| recreate | bool | Whether to delete old tasksets before creating new ones or not. Default to false. | No | + +## AnalysisMetrics + +| Field | Type | Description | Required | +|-|-|-|-| +| provider | string | The unique name of provider defined in the Piped Configuration. | Yes | +| strategy | string | The strategy name. One of `THRESHOLD` or `PREVIOUS` or `CANARY_BASELINE` or `CANARY_PRIMARY` is available. Defaults to `THRESHOLD`. | No | +| query | string | A query performed against the [Analysis Provider](../../concepts/#analysis-provider). The stage will be skipped if no data points were returned. | Yes | +| expected | [AnalysisExpected](#analysisexpected) | The statically defined expected query result. This field is ignored if there was no data point as a result of the query. | Yes if the strategy is `THRESHOLD` | +| interval | duration | Run a query at specified intervals. | Yes | +| failureLimit | int | Acceptable number of failures. e.g. If 1 is set, the `ANALYSIS` stage will end with failure after two queries results failed. Defaults to 1. | No | +| skipOnNoData | bool | If true, it considers as a success when no data returned from the analysis provider. Defaults to false. | No | +| deviation | string | The stage fails on deviation in the specified direction. One of `LOW` or `HIGH` or `EITHER` is available. This can be used only for `PREVIOUS`, `CANARY_BASELINE` or `CANARY_PRIMARY`. Defaults to `EITHER`. | No | +| baselineArgs | map[string][string] | The custom arguments to be populated for the Baseline query. They can be reffered as `{{ .VariantCustomArgs.xxx }}`. | No | +| canaryArgs | map[string][string] | The custom arguments to be populated for the Canary query. They can be reffered as `{{ .VariantCustomArgs.xxx }}`. | No | +| primaryArgs | map[string][string] | The custom arguments to be populated for the Primary query. They can be reffered as `{{ .VariantCustomArgs.xxx }}`. | No | +| timeout | duration | How long after which the query times out. | No | +| template | [AnalysisTemplateRef](#analysistemplateref) | Reference to the template to be used. | No | + + +### AnalysisExpected + +| Field | Type | Description | Required | +|-|-|-|-| +| min | float64 | Failure, if the query result is less than this value. | No | +| max | float64 | Failure, if the query result is larger than this value. | No | + +### AnalysisTemplateRef + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The template name to refer. | Yes | +| appArgs | map[string]string | The arguments for custom-args. | No | + +## AnalysisLog + +| Field | Type | Description | Required | +|-|-|-|-| + +## AnalysisHttp + +| Field | Type | Description | Required | +|-|-|-|-| + +## SkipOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| commitMessagePrefixes | []string | List of commit message's prefixes. The stage will be skipped when the prefix of the commit's message matches any of them. Empty means the stage will not be skipped by this condition. | No | +| paths | []string | List of paths to directories or files. When all commit changes match them, the stage will be skipped. Empty means the stage will not be skipped by this condition. Regular expression can be used. | No | + +## StageOptions + +### KubernetesPrimaryRolloutStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| suffix | string | Suffix that should be used when naming the PRIMARY variant's resources. Default is `primary`. | No | +| createService | bool | Whether the PRIMARY service should be created. Default is `false`. | No | +| addVariantLabelToSelector | bool | Whether the PRIMARY variant label should be added to manifests if they were missing. Default is `false`. | No | +| prune | bool | Whether the resources that are no longer defined in Git should be removed or not. Default is `false` | No | + +### KubernetesCanaryRolloutStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| replicas | int | How many pods for CANARY workloads. Default is `1` pod. Alternatively, can be specified a string suffixed by "%" to indicate a percentage value compared to the pod number of PRIMARY | No | +| suffix | string | Suffix that should be used when naming the CANARY variant's resources. Default is `canary`. | No | +| createService | bool | Whether the CANARY service should be created. Default is `false`. | No | +| patches | [][KubernetesResourcePatch](#kubernetesresourcepatch) | List of patches used to customize manifests for CANARY variant. | No | + +### KubernetesCanaryCleanStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| | | | | + +### KubernetesBaselineRolloutStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| replicas | int | How many pods for BASELINE workloads. Default is `1` pod. Alternatively, can be specified a string suffixed by "%" to indicate a percentage value compared to the pod number of PRIMARY | No | +| suffix | string | Suffix that should be used when naming the BASELINE variant's resources. Default is `baseline`. | No | +| createService | bool | Whether the BASELINE service should be created. Default is `false`. | No | + +### KubernetesBaselineCleanStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| | | | | + +### KubernetesTrafficRoutingStageOptions +This stage routes traffic with the method specified in [KubernetesTrafficRouting](#kubernetestrafficrouting). +When using `podselector` method as a traffic routing method, routing is done by updating the Service selector. +Therefore, note that all traffic will be routed to the primary if the the primary variant's service is rolled out by running the `K8S_PRIMARY_ROLLOUT` stage. + +| Field | Type | Description | Required | +|-|-|-|-| +| all | string | Which variant should receive all traffic. Available values are "primary", "canary", "baseline". Default is `primary`. | No | +| primary | [Percentage](#percentage) | The percentage of traffic should be routed to PRIMARY variant. | No | +| canary | [Percentage](#percentage) | The percentage of traffic should be routed to CANARY variant. | No | +| baseline | [Percentage](#percentage) | The percentage of traffic should be routed to BASELINE variant. | No | + +### TerraformPlanStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| exitOnNoChanges | bool | Whether exiting the pipeline when the result has no changes | No | + +### TerraformApplyStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| retries | int | How many times to retry applying terraform changes. Default is `0`. | No | + +### CloudRunPromoteStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| percent | [Percentage](#percentage) | Percentage of traffic should be routed to the new version. | No | + +### LambdaCanaryRolloutStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| + +### LambdaPromoteStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| percent | [Percentage](#percentage) | Percentage of traffic should be routed to the new version. | No | + +### ECSPrimaryRolloutStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| + +### ECSCanaryRolloutStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| scale | [Percentage](#percentage) | The percentage of workloads should be rolled out as CANARY variant's workload. | Yes | + +### ECSTrafficRoutingStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| primary | [Percentage](#percentage) | The percentage of traffic should be routed to PRIMARY variant. | No | +| canary | [Percentage](#percentage) | The percentage of traffic should be routed to CANARY variant. | No | + +Note: By default, the sum of traffic is rounded to 100. If both `primary` and `canary` numbers are not set, the PRIMARY variant will receive 100% while the CANARY variant will receive 0% of the traffic. + +### AnalysisStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| duration | duration | Maximum time to perform the analysis. | Yes | +| metrics | [][AnalysisMetrics](#analysismetrics) | Configuration for analysis by metrics. | No | +| skipOn | [SkipOptions](#skipoptions) | When to skip this stage. | No | + +### WaitStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| duration | duration | Time to wait. | Yes | +| skipOn | [SkipOptions](#skipoptions) | When to skip this stage. | No | + +### WaitApprovalStageOptions + +| Field | Type | Description | Required | +|-|-|-|-| +| timeout | duration | The maximum length of time to wait before giving up. Default is 6h. | No | +| approvers | []string | List of username who has permission to approve. | Yes | +| minApproverNum | int | Number of minimum needed approvals to make this stage complete. Default is 1. | No | +| skipOn | [SkipOptions](#skipoptions) | When to skip this stage. | No | + +### CustomSyncStageOptions (deprecated) +| Field | Type | Description | Required | +|-|-|-|-| +| timeout | duration | The maximum time the stage can be taken to run. Default is `6h`| No | +| envs | map[string]string | Environment variables used with scripts. | No | +| run | string | Script run on this stage. | Yes | + +### ScriptRunStageOptions +| Field | Type | Description | Required | +|-|-|-|-| +| run | string | Script run on this stage. | Yes | +| env | map[string]string | Environment variables used with scripts. | No | +| timeout | duration | The maximum time the stage can be taken to run. Default is `6h`| No | +| skipOn | [SkipOptions](#skipoptions) | When to skip this stage. | No | + +## PostSync + +| Field | Type | Description | Required | +|-|-|-|-| +| chain | [DeploymentChain](#deploymentchain) | Deployment chain configuration, used to determine and build deployments that should be triggered once the current deployment is triggered. | No | + +### DeploymentChain + +| Field | Type | Description | Required | +|-|-|-|-| +| applications | [][DeploymentChainApplication](#deploymentchainapplication) | The list of applications which should be triggered once deployment of this application rolled out successfully. | Yes | + +#### DeploymentChainApplication + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The name of PipeCD application, note that application name is not unique in PipeCD datastore | No | +| kind | string | The kind of the PipeCD application, which should be triggered as a node in deployment chain. The value will be one of: KUBERNETES, TERRAFORM, CLOUDRUN, LAMBDA, ECS. | No | + +## EventWatcher + +| Field | Type | Description | Required | +|-|-|-|-| +| matcher | [EventWatcherMatcher](#eventwatchermatcher) | Which event will be handled. | Yes | +| handler | [EventWatcherHandler](#eventwatcherhandler) | What to do for the event which matched by the above matcher. | Yes | + +### EventWatcherMatcher + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The event name. | Yes | +| labels | map[string]string | Additional attributes of event. This can make an event definition unique even if the one with the same name exists. | No | + +### EventWatcherHandler + +| Field | Type | Description | Required | +|-|-|-|-| +| type | string | The handler type. Currently, only `GIT_UPDATE` is supported. | Yes | +| config | [EventWatcherHandlerConfig](#eventwatcherhandlerconfig) | Configuration for the event watcher handler. | Yes | + +### EventWatcherHandlerConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| commitMessage | string | The commit message used to push after replacing values. Default message is used if not given. | No | +| makePullRequest | bool | Whether to create a new branch or not when commit changes in event watcher. Default is `false`. | No | +| replacements | [][EventWatcherReplacement](#eventwatcherreplacement) | List of places where will be replaced when the new event matches. | Yes | + +## DriftDetection + +| Field | Type | Description | Required | +|-|-|-|-| +| ignoreFields | []string | List of fields path in manifests, which its diff should be ignored. This is available for only `KubernetesApp`. | No | + +## PipeCD rich defined types + +### Percentage +A wrapper of type `int` to represent percentage data. Basically, you can pass `10` or `"10"` or `10%` and they will be treated as `10%` in PipeCD. + +### KubernetesResourcePatch + +| Field | Type | Description | Required | +|-|-|-|-| +| target | [KubernetesResourcePatchTarget](#kubernetesresourcepatchtarget) | Which manifest, which field will be the target of patch operations. | Yes | +| ops | [][KubernetesResourcePatchOp](#kubernetesresourcepatchop) | List of operations should be applied to the above target. | No | + +### KubernetesResourcePatchTarget + +| Field | Type | Description | Required | +|-|-|-|-| +| kind | string | The resource kind. e.g. `ConfigMap` | Yes | +| name | string | The resource name. e.g. `config-map-name` | Yes | +| documentRoot | string | In case you want to manipulate the YAML or JSON data specified in a field of the manfiest, specify that field's path. The string value of that field will be used as input for the patch operations. Otherwise, the whole manifest will be the target of patch operations. e.g. `$.data.envoy-config` | No | + +### KubernetesResourcePatchOp + +| Field | Type | Description | Required | +|-|-|-|-| +| op | string | The operation type. This must be one of `yaml-replace`, `yaml-add`, `yaml-remove`, `json-replace`, `text-regex`. Default is `yaml-replace`. | No | +| path | string | The path string pointing to the manipulated field. For yaml operations it looks like `$.foo.array[0].bar`. | No | +| value | string | The value string whose content will be used as new value for the field. | No | diff --git a/docs/content/en/docs-v0.50.x/user-guide/event-watcher.md b/docs/content/en/docs-v0.50.x/user-guide/event-watcher.md new file mode 100644 index 0000000000..3af7adf3b5 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/event-watcher.md @@ -0,0 +1,265 @@ +--- +title: "Connect between CI and CD with event watcher" +linkTitle: "Event watcher" +weight: 5 +description: > + A helper facility to automatically update files when it finds out a new event. +--- + +![](/images/diff-by-eventwatcher.png) + +The only way to upgrade your application with PipeCD is modifying configuration files managed by the Git repositories. +It brings benefits quite a bit, but it can be painful to manually update them every time in some cases (e.g. continuous deployment to your development environment for debugging, the latest prerelease to the staging environment). + +If you're experiencing any of the above pains, Event watcher is for you. +Event watcher works as a helper facility to seamlessly link CI and CD. This feature lets you automatically update files managed by your Piped when an arbitrary event has occurred. +While it empowers you to build pretty versatile workflows, the canonical use case is that you trigger a new deployment by image updates, package releases, etc. + +This guide walks you through configuring Event watcher and how to push an Event. + +## Prerequisites +Before we get into configuring EventWatcher, be sure to configure Piped. See [here](../managing-piped/configuring-event-watcher/) for more details. + +## Usage +File updating can be done by registering the latest value corresponding to the Event in the Control Plane and comparing it with the current value. + +Therefore, you mainly need to: +1. define which values in which files should be updated when a new Event found. +1. integrate a step to push an Event to the Control Plane using `pipectl` into your CI workflow. + +### 1. Defining Events +#### Use the `.pipe/` directory +>NOTE: This way is deprecated and will be removed in the future, so please use the application configuration. + +Prepare EventWatcher configuration files under the `.pipe/` directory at the root of your Git repository. +In that files, you define which values in which files should be updated when the Piped found out a new Event. + +For instance, suppose you want to update the Kubernetes manifest defined in `helloworld/deployment.yaml` when an Event with the name `helloworld-image-update` occurs: + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: EventWatcher +spec: + events: + - name: helloworld-image-update + replacements: + - file: helloworld/deployment.yaml + yamlField: $.spec.template.spec.containers[0].image +``` + +The full list of configurable `EventWatcher` fields are [here](../configuration-reference/#event-watcher-configuration-deprecated). + +#### Use the application configuration + +Define what to do for which event in the application configuration file of the target application. + +- `matcher`: Which event should be handled. +- `handler`: What to do for the event which is specified by matcher. + +For instance, suppose you want to update the Kubernetes manifest defined in `helloworld/deployment.yaml` when an Event with the name `helloworld-image-update` occurs: +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: helloworld + eventWatcher: + - matcher: + name: helloworld-image-update + handler: + type: GIT_UPDATE + config: + replacements: + - file: deployment.yaml + yamlField: $.spec.template.spec.containers[0].image +``` + +The full list of configurable `eventWatcher` fields are [here](../configuration-reference/#eventwatcher). + +### 2. Pushing an Event with `pipectl` +To register a new value corresponding to Event such as the above in the Control Plane, you need to perform `pipectl`. +And we highly recommend integrating a step for that into your CI workflow. + +You first need to set-up the `pipectl`: + +- Install it on your CI system or where you want to run according to [this guide](../command-line-tool/#installation). +- Grab the API key to which the `READ_WRITE` role is attached according to [this guide](../command-line-tool/#authentication). + +Once you're all set up, pushing a new Event to the Control Plane by the following command: + +```bash +pipectl event register \ + --address={CONTROL_PLANE_API_ADDRESS} \ + --api-key={API_KEY} \ + --name=helloworld-image-update \ + --data=gcr.io/pipecd/helloworld:v0.2.0 +``` + +You can see the status on the event list page. + +![](/images/event-list-page.png) + + +After a while, Piped will create a commit as shown below: + +```diff + spec: + containers: + - name: helloworld +- image: gcr.io/pipecd/helloworld:v0.1.0 ++ image: gcr.io/pipecd/helloworld:v0.2.0 +``` + +NOTE: Keep in mind that it may take a little while because Piped periodically fetches the new events from the Control Plane. You can change its interval according to [here](../managing-piped/configuration-reference/#eventwatcher). + +### [optional] Using labels +Event watcher is a project-wide feature, hence an event name is unique inside a project. That is, you can update multiple repositories at the same time if you use the same event name for different events. + +On the contrary, if you want to explicitly distinguish those, we recommend using labels. You can make an event definition unique by using any number of labels with arbitrary keys and values. +Suppose you define an event with the labels `env: dev` and `appName: helloworld`: + +When you use the `.pipe/` directory, you can configure like below. +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: EventWatcher +spec: + events: + - name: image-update + labels: + env: dev + appName: helloworld + replacements: + - file: helloworld/deployment.yaml + yamlField: $.spec.template.spec.containers[0].image +``` + +The other example is like below. +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: ApplicationKind +spec: + name: helloworld + eventWatcher: + - matcher: + name: image-update + labels: + env: dev + appName: helloworld + handler: + type: GIT_UPDATE + config: + replacements: + - file: deployment.yaml + yamlField: $.spec.template.spec.containers[0].image +``` + +The file update will be executed only when the labels are explicitly specified with the `--labels` flag. + +```bash +pipectl event register \ + --address=CONTROL_PLANE_API_ADDRESS \ + --api-key=API_KEY \ + --name=image-update \ + --labels env=dev,appName=helloworld \ + --data=gcr.io/pipecd/helloworld:v0.2.0 +``` + +Note that it is considered a match only when labels are an exact match. + +### [optional] Using contexts + +You can also attach additional metadata to the event. +This information can be added as a trailer to the git commit when Event Watcher using the GIT_UPDATE handler. +This can be useful when attaching information from the source code repository to the manifest repository. + +For example, you can attach the source code commit link to the manifest repository. + +```bash +pipectl event register \ + --address=CONTROL_PLANE_API_ADDRESS \ + --api-key=API_KEY \ + --name=sample \ + --data=gcr.io/pipecd/helloworld:v0.48.0 \ + --contexts Source-Commit-Hash=xxxxxxx,Source-Commit-URL=https://github.com/pipe-cd/pipecd/commit/xxxxxxx +``` + +```bash +# In manifest repository +$ git show +commit ff46cdc9a3ce87a9a66436269251a4870ac55183 (HEAD -> main, origin/main, origin/HEAD) +Author: ffjlabo +Date: Wed Oct 30 16:56:36 2024 +0900 + + Replace values with "gcr.io/pipecd/helloworld:v0.48.0" set by Event "simple" + + Source-Commit-Hash: xxxxxxx + Source-Commit-URL: https://github.com/pipe-cd/pipecd/commit/xxxxxxx +``` + +![](/images/event-watcher-contexts.png) + +## Examples +Suppose you want to update your configuration file after releasing a new Helm chart. + +You define the configuration for event watcher in `helloworld/app.pipecd.yaml` file like: + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + helmChart: + name: helloworld + version: 0.1.0 + eventWatcher: + - matcher: + name: image-update + labels: + env: dev + appName: helloworld + handler: + type: GIT_UPDATE + config: + replacements: + - file: app.pipecd.yaml + yamlField: $.spec.input.helmChart.version +``` + +Push a new version `0.2.0` as data when the Helm release is completed. + +```bash +pipectl event register \ + --address=CONTROL_PLANE_API_ADDRESS \ + --api-key=API_KEY \ + --name=helm-release \ + --labels env=dev,appName=helloworld \ + --data=0.2.0 +``` + +Then you'll see that Piped updates as: + +```diff +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + helmChart: + name: helloworld +- version: 0.1.0 ++ version: 0.2.0 + eventWatcher: + - matcher: + name: image-update + labels: + env: dev + appName: helloworld + handler: + type: GIT_UPDATE + config: + replacements: + - file: app.pipecd.yaml + yamlField: $.spec.input.helmChart.version +``` + +## Github Actions +If you're using Github Actions in your CI workflow, [actions-event-register](https://github.com/marketplace/actions/pipecd-register-event) is for you! +With it, you can easily register events without any installation. diff --git a/docs/content/en/docs-v0.50.x/user-guide/examples/_index.md b/docs/content/en/docs-v0.50.x/user-guide/examples/_index.md new file mode 100755 index 0000000000..aa65fb850e --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/examples/_index.md @@ -0,0 +1,11 @@ +--- +title: "Examples" +linkTitle: "Examples" +weight: 12 +description: > + Some examples of PipeCD in action! +--- + +One of the best ways to see what PipeCD can do, and learn how to deploy your applications with it, is to see some real examples. + +We have prepared some examples for each kind of application, please visit the [PipeCD examples](../../examples/) page for details. diff --git a/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-bluegreen-with-istio.md b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-bluegreen-with-istio.md new file mode 100644 index 0000000000..7544f8ca79 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-bluegreen-with-istio.md @@ -0,0 +1,126 @@ +--- +title: "BlueGreen deployment for Kubernetes app with Istio" +linkTitle: "BlueGreen k8s app with Istio" +weight: 2 +description: > + How to enable blue-green deployment for Kubernetes application with Istio. +--- + +Similar to [canary deployment](../k8s-app-canary-with-istio/), PipeCD allows you to enable and automate the blue-green deployment strategy for your application based on Istio's weighted routing feature. + +In both canary and blue-green strategies, the old version and the new version of the application get deployed at the same time. +But while the canary strategy slowly routes the traffic to the new version, the blue-green strategy quickly routes all traffic to one of the versions. + +In this guide, we will show you how to configure the application configuration file to apply the blue-green strategy. + +Complete source code for this example is hosted in [pipe-cd/examples](https://github.com/pipe-cd/examples/tree/master/kubernetes/mesh-istio-bluegreen) repository. + +## Before you begin + +- Add a new Kubernetes application by following the instructions in [this guide](../../managing-application/adding-an-application/) +- Ensure having `pipecd.dev/variant: primary` [label](https://github.com/pipe-cd/examples/blob/master/kubernetes/mesh-istio-bluegreen/deployment.yaml#L17) and [selector](https://github.com/pipe-cd/examples/blob/master/kubernetes/mesh-istio-bluegreen/deployment.yaml#L12) in the workload template +- Ensure having at least one Istio's `DestinationRule` and defining the needed subsets (`primary` and `canary`) with `pipecd.dev/variant` label + +``` yaml +apiVersion: networking.istio.io/v1beta1 +kind: DestinationRule +metadata: + name: mesh-istio-bluegreen +spec: + host: mesh-istio-bluegreen + subsets: + - name: primary + labels: + pipecd.dev/variant: primary + - name: canary + labels: + pipecd.dev/variant: canary + trafficPolicy: + tls: + mode: ISTIO_MUTUAL +``` + +- Ensure having at least one Istio's `VirtualService` manifest and all traffic is routed to the `primary` + +``` yaml +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: mesh-istio-bluegreen +spec: + hosts: + - mesh-istio-bluegreen.pipecd.dev + gateways: + - mesh-istio-bluegreen + http: + - route: + - destination: + host: mesh-istio-bluegreen + subset: primary + weight: 100 +``` + +## Enabling blue-green strategy + +- Add the following application configuration file into the application directory in the Git repository. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + with: + replicas: 100% + - name: K8S_TRAFFIC_ROUTING + with: + all: canary + - name: WAIT_APPROVAL + - name: K8S_PRIMARY_ROLLOUT + - name: K8S_TRAFFIC_ROUTING + with: + all: primary + - name: K8S_CANARY_CLEAN + trafficRouting: + method: istio + istio: + host: mesh-istio-bluegreen +``` + +- Send a PR to update the container image version in the Deployment manifest and merge it to trigger a new deployment. PipeCD will plan the deployment with the specified blue-green strategy. + +![](/images/example-bluegreen-kubernetes-istio.png) +

+Deployment Details Page +

+ +- Now you have an automated blue-green deployment for your application. 🎉 + +## Understanding what happened + +In this example, you configured the application configuration file to switch all traffic from an old to a new version of the application using Istio's weighted routing feature. + +- Stage 1: `K8S_CANARY_ROLLOUT` ensures that the workloads of canary variant (new version) should be deployed. But at this time, they still handle nothing, all traffic is handled by workloads of primary variant. +The number of workloads (e.g. pod) for canary variant is configured to be 100% of the replicas number of primary varant. + +![](/images/example-bluegreen-kubernetes-istio-stage-1.png) + +- Stage 2: `K8S_TRAFFIC_ROUTING` ensures that all traffic should be routed to canary variant. Because the `trafficRouting` is configured to use Istio, PipeCD will find Istio's VirtualService resource of this application to control the traffic percentage. +(You can add an [ANALYSIS](../../managing-application/customizing-deployment/automated-deployment-analysis/) stage after this to validate the new version. When any negative impacts are detected, an auto-rollback stage will be executed to switch all traffic back to the primary variant.) + +![](/images/example-bluegreen-kubernetes-istio-stage-2.png) + +- Stage 3: `WAIT_APPROVAL` waits for a manual approval from someone in your team. + +- Stage 4: `K8S_PRIMARY_ROLLOUT` ensures that all resources of primary variant will be updated to the new version. + +![](/images/example-bluegreen-kubernetes-istio-stage-4.png) + +- Stage 5: `K8S_TRAFFIC_ROUTING` ensures that all traffic should be routed to primary variant. Now primary variant is running the new version so it means all traffic is handled by the new version. + +![](/images/example-bluegreen-kubernetes-istio-stage-5.png) + +- Stage 6: `K8S_CANARY_CLEAN` ensures all created resources for canary variant should be destroyed. + +![](/images/example-bluegreen-kubernetes-istio-stage-6.png) diff --git a/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-bluegreen-with-pod-selector.md b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-bluegreen-with-pod-selector.md new file mode 100644 index 0000000000..c303b64cbe --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-bluegreen-with-pod-selector.md @@ -0,0 +1,11 @@ +--- +title: "BlueGreen deployment for Kubernetes app with PodSelector" +linkTitle: "BlueGreen k8s app with PodSelector" +weight: 4 +description: > + How to enable blue-green deployment for Kubernetes application with PodSelector. +--- + +> TBA + +For applications that are not deployed on a service mesh, PipeCD can enable blue-green deployment with Kubernetes L4 networking. diff --git a/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-canary-with-istio.md b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-canary-with-istio.md new file mode 100644 index 0000000000..286b361ded --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-canary-with-istio.md @@ -0,0 +1,124 @@ +--- +title: "Canary deployment for Kubernetes app with Istio" +linkTitle: "Canary k8s app with Istio" +weight: 1 +description: > + How to enable canary deployment for Kubernetes application with Istio. +--- + +> Canary release is a technique to reduce the risk of introducing a new software version in production by slowly rolling out the change to a small subset of users before rolling it out to the entire infrastructure and making it available to everybody. +> -- [martinfowler.com/canaryrelease](https://martinfowler.com/bliki/CanaryRelease.html) + +With Istio, we can accomplish this goal by configuring a sequence of rules that route a percentage of traffic to each [variant](../../managing-application/defining-app-configuration/kubernetes/#sync-with-the-specified-pipeline) of the application. +And with PipeCD, you can enable and automate the canary strategy for your Kubernetes application even easier. + +In this guide, we will show you how to configure the application configuration file to send 10% of traffic to the new version and keep 90% to the primary variant. Then after waiting for manual approval, you will complete the migration by sending 100% of traffic to the new version. + +Complete source code for this example is hosted in [pipe-cd/examples](https://github.com/pipe-cd/examples/tree/master/kubernetes/mesh-istio-canary) repository. + +## Before you begin + +- Add a new Kubernetes application by following the instructions in [this guide](../../managing-application/adding-an-application/) +- Ensure having `pipecd.dev/variant: primary` [label](https://github.com/pipe-cd/examples/blob/master/kubernetes/mesh-istio-canary/deployment.yaml#L17) and [selector](https://github.com/pipe-cd/examples/blob/master/kubernetes/mesh-istio-canary/deployment.yaml#L12) in the workload template +- Ensure having at least one Istio's `DestinationRule` and defining the needed subsets (`primary` and `canary`) with `pipecd.dev/variant` label + +``` yaml +apiVersion: networking.istio.io/v1beta1 +kind: DestinationRule +metadata: + name: mesh-istio-canary +spec: + host: mesh-istio-canary.default.svc.cluster.local + subsets: + - name: primary + labels: + pipecd.dev/variant: primary + - name: canary + labels: + pipecd.dev/variant: canary +``` + +- Ensure having at least one Istio's `VirtualService` manifest and all traffic is routed to the `primary` + +``` yaml +apiVersion: networking.istio.io/v1beta1 +kind: VirtualService +metadata: + name: mesh-istio-canary +spec: + hosts: + - mesh-istio-canary.pipecd.dev + gateways: + - mesh-istio-canary + http: + - route: + - destination: + host: mesh-istio-canary.default.svc.cluster.local + subset: primary + weight: 100 +``` + +## Enabling canary strategy + +- Add the following application configuration file into the application directory in Git. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + with: + replicas: 50% + - name: K8S_TRAFFIC_ROUTING + with: + canary: 10 + primary: 90 + - name: WAIT_APPROVAL + - name: K8S_PRIMARY_ROLLOUT + - name: K8S_TRAFFIC_ROUTING + with: + primary: 100 + - name: K8S_CANARY_CLEAN + trafficRouting: + method: istio + istio: + host: mesh-istio-canary.default.svc.cluster.local +``` + +- Send a PR to update the container image version in the Deployment manifest and merge it to trigger a new deployment. PipeCD will plan the deployment with the specified canary strategy. + +![](/images/example-canary-kubernetes-istio.png) +

+Deployment Details Page +

+ +- Now you have an automated canary deployment for your application. 🎉 + +## Understanding what happened + +In this example, you configured the application configuration file to migrate traffic from an old to a new version of the application using Istio's weighted routing feature. + +- Stage 1: `K8S_CANARY_ROLLOUT` ensures that the workloads of canary variant (new version) should be deployed. But at this time, they still handle nothing, all traffic are handled by workloads of primary variant. +The number of workloads (e.g. pod) for canary variant is configured to be 50% of the replicas number of primary varant. + +![](/images/example-canary-kubernetes-istio-stage-1.png) + +- Stage 2: `K8S_TRAFFIC_ROUTING` ensures that 10% of traffic should be routed to canary variant and 90% to primary variant. Because the `trafficRouting` is configured to use Istio, PipeCD will find Istio's VirtualService resource of this application to control the traffic percentage. + +![](/images/example-canary-kubernetes-istio-stage-2.png) + +- Stage 3: `WAIT_APPROVAL` waits for a manual approval from someone in your team. + +- Stage 4: `K8S_PRIMARY_ROLLOUT` ensures that all resources of primary variant will be updated to the new version. + +![](/images/example-canary-kubernetes-istio-stage-4.png) + +- Stage 5: `K8S_TRAFFIC_ROUTING` ensures that all traffic should be routed to primary variant. Now primary variant is running the new version so it means all traffic is handled by the new version. + +![](/images/example-canary-kubernetes-istio-stage-5.png) + +- Stage 6: `K8S_CANARY_CLEAN` ensures all created resources for canary variant should be destroyed. + +![](/images/example-canary-kubernetes-istio-stage-6.png) diff --git a/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-canary-with-pod-selector.md b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-canary-with-pod-selector.md new file mode 100644 index 0000000000..5993bc101e --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/examples/k8s-app-canary-with-pod-selector.md @@ -0,0 +1,122 @@ +--- +title: "Canary deployment for Kubernetes app with PodSelector" +linkTitle: "Canary k8s app with PodSelector" +weight: 3 +description: > + How to enable canary deployment for Kubernetes application with PodSelector. +--- + +Using service mesh like [Istio](../k8s-app-canary-with-istio/) helps you doing canary deployment easier with many powerful features, but not all teams are ready to use service mesh in their environment. This page will walk you through using PipeCD to enable canary deployment for Kubernetes application running in a non-mesh environment. + +Basically, the idea behind is described as this [Kubernetes document](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#canary-deployments); the Service resource uses the common label set to route the traffic to both canary and primary workloads, and percentage of traffic for each variant is based on their replicas number. + +## Enabling canary strategy + +Assume your application has the following `Service` and `Deployment` manifests: + +- service.yaml + +``` yaml +apiVersion: v1 +kind: Service +metadata: + name: helloworld +spec: + selector: + app: helloworld + ports: + - protocol: TCP + port: 9085 +``` + +- deployment.yaml + +``` yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: helloworld + labels: + app: helloworld + pipecd.dev/variant: primary +spec: + replicas: 30 + revisionHistoryLimit: 2 + selector: + matchLabels: + app: helloworld + pipecd.dev/variant: primary + template: + metadata: + labels: + app: helloworld + pipecd.dev/variant: primary + spec: + containers: + - name: helloworld + image: gcr.io/pipecd/helloworld:v0.1.0 + args: + - server + ports: + - containerPort: 9085 +``` + +In PipeCD context, manifests defined in Git are the manifests for primary variant, so please note to ensure that your deployment manifest contains `pipecd.dev/variant: primary` label and selector in the spec. + +To enable canary strategy for this Kubernetes application, you will update your application configuration file to be as below: + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + # Deploy the workloads of CANARY variant. In this case, the number of + # workload replicas of CANARY variant is 50% of the replicas number of PRIMARY variant. + - name: K8S_CANARY_ROLLOUT + with: + replicas: 50% + - name: WAIT_APPROVAL + with: + duration: 10s + # Update the workload of PRIMARY variant to the new version. + - name: K8S_PRIMARY_ROLLOUT + # Destroy all workloads of CANARY variant. + - name: K8S_CANARY_CLEAN +``` + +That is all, now let try to send a PR to update the container image version in the Deployment manifest and merge it to trigger a new deployment. Then, PipeCD will plan the deployment with the specified canary strategy. + +![](/images/example-canary-kubernetes.png) +

+Deployment Details Page +

+ +Complete source code for this example is hosted in [pipe-cd/examples](https://github.com/pipe-cd/examples/tree/master/kubernetes/canary) repository. + +## Understanding what happened + +In this example, you configured your application to be deployed with a canary strategy using a native feature of Kubernetes: pod selector. +The traffic will be routed to both canary and primary workloads because they are sharing the same label: `app: helloworld`. +The percentage of traffic for each variant is based on the respective number of pods. + +Here are what happened in details: + +- Before deploying, all traffic gets routed to primary workloads. + + + +- Stage 1: `K8S_CANARY_ROLLOUT` ensures that the workloads of canary variant (new version) should be deployed. +The number of workloads (e.g. pod) for canary variant is configured to be 50% of the replicas number of primary variant. It means 15 canary pods will be started, and they receive 33.3% traffic while primary workloads receive the remaining 66.7% traffic. + + + +- Stage 2: `WAIT_APPROVAL` waits for a manual approval from someone in your team. + +- Stage 3: `K8S_PRIMARY_ROLLOUT` ensures that all resources of primary variant will be updated to the new version. + + + +- Stage 4: `K8S_CANARY_CLEAN` ensures all created resources for canary variant should be destroyed. After that, the primary workloads running in with the new version will receive all traffic. + + diff --git a/docs/content/en/docs-v0.50.x/user-guide/insights.md b/docs/content/en/docs-v0.50.x/user-guide/insights.md new file mode 100644 index 0000000000..ed77d21ee3 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/insights.md @@ -0,0 +1,35 @@ +--- +title: "Insights" +linkTitle: "Insights" +weight: 7 +description: > + This page describes how to see delivery performance. +--- + +![](/images/insights.png) + +### Application metrics + +The topmost block helps you understand how many applications your project has. + +### Deployment metrics + +Based on your executed deployment data, PipeCD provides charts that help you better understand the delivery performance of your organization. + +You can view daily, and monthly data visualizations of your entire project, a specific application, or a group of applications that match a list of labels. + +#### Deployment Frequency +How often does your application/project deploy code to production. + +#### Change Failure Rate +How often deployment failures occur in production that requires an immediate remedy (fix, rollback...). + +#### Lead Time for Changes +How long does it take to go from code committed to code successfully running on production. + +> WIP + +#### Mean Time To Restore +How long does it generally take to restore service when a service incident occurs. + +> WIP diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/_index.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/_index.md new file mode 100644 index 0000000000..99468227f5 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/_index.md @@ -0,0 +1,9 @@ +--- +title: "Managing application" +linkTitle: "Managing application" +weight: 2 +description: > + This guide is for developers who have PipeCD installed for them and are using PipeCD to deploy their applications. +--- + +> Note: You must have at least one activated/running Piped to enable using any of the following features of PipeCD. Please refer to [Piped installation docs](../../installation/install-piped/) if you do not have any Piped in your pocket. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/adding-an-application.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/adding-an-application.md new file mode 100644 index 0000000000..b9e4599326 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/adding-an-application.md @@ -0,0 +1,142 @@ +--- +title: "Adding an application" +linkTitle: "Adding an application" +weight: 1 +description: > + This page describes how to add a new application. +--- + +An application is a collection of resources and configurations that are managed together. +It represents the service which you are going to deploy. With PipeCD, all application's manifests and its application configuration (`app.pipecd.yaml`) must be committed into a directory of a Git repository. That directory is called as application directory. + +Each application can be handled by one and only one `piped`. Currently, PipeCD is supporting 5 kinds of application: Kubernetes, Terraform, CloudRun, Lambda, ECS. + +> Note: Be sure your application manifests repository is listed in [Piped managing repositories configuration](../managing-piped/configuration-reference/#gitrepository:~:text=No-,repositories,-%5B%5DRepository). + +Before deploying an application, it must be registered to help PipeCD knows +- where the application configuration is placed +- which `piped` should handle it and which platform the application should be deployed to + +Through the web console, you can register a new application in one of the following ways: +- Picking from a list of unused apps suggested by Pipeds while scanning Git repositories (Recommended) +- Manually configuring application information + +(If you prefer to use [`pipectl`](../../command-line-tool/#adding-a-new-application) command-line tool, see its usage for the details.) + +## Picking from a list of unused apps suggested by Pipeds + +You have to __prepare a configuration file__ which contains your application configuration and store that file in the Git repository which your Piped is watching first to enable adding a new application this way. + +The application configuration file name must be suffixed by `.pipecd.yaml` because Piped periodically checks for files with this suffix. + +{{< tabpane >}} +{{< tab lang="yaml" header="KubernetesApp" >}} +# For application's configuration in detail for KubernetesApp, please visit +# https://pipecd.dev/docs/user-guide/managing-application/defining-app-configuration/kubernetes/ + +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: foo + labels: + team: bar +{{< /tab >}} +{{< tab lang="yaml" header="TerraformApp" >}} +# For application's configuration in detail for TerraformApp, please visit +# https://pipecd.dev/docs/user-guide/managing-application/defining-app-configuration/terraform/ + +apiVersion: pipecd.dev/v1beta1 +kind: TerraformApp +spec: + name: foo + labels: + team: bar +{{< /tab >}} +{{< tab lang="yaml" header="LambdaApp" >}} +# For application's configuration in detail for LambdaApp, please visit +# https://pipecd.dev/docs/user-guide/managing-application/defining-app-configuration/lambda/ + +apiVersion: pipecd.dev/v1beta1 +kind: LambdaApp +spec: + name: foo + labels: + team: bar +{{< /tab >}} +{{< tab lang="yaml" header="CloudRunApp" >}} +# For application's configuration in detail for CloudRunApp, please visit +# https://pipecd.dev/docs/user-guide/managing-application/defining-app-configuration/cloudrun/ + +apiVersion: pipecd.dev/v1beta1 +kind: CloudRunApp +spec: + name: foo + labels: + team: bar +{{< /tab >}} +{{< tab lang="yaml" header="ECSApp" >}} +# For application's configuration in detail for ECSApp, please visit +# https://pipecd.dev/docs/user-guide/managing-application/defining-app-configuration/ecs/ + +apiVersion: pipecd.dev/v1beta1 +kind: ECSApp +spec: + name: foo + labels: + team: bar +{{< /tab >}} +{{< /tabpane >}} + +To define your application deployment pipeline which contains the guideline to show Piped how to deploy your application, please visit [Defining app configuration](../defining-app-configuration/). + +Go to the PipeCD web console on application list page, click the `+ADD` button at the top left corner of the application list page and then go to the `ADD FROM GIT` tab. + +Select the Piped and Platform Provider that you deploy to, once the Piped that's watching your Git repository catches the new unregistered application configuration file, it will be listed up in this panel. Click `ADD` to complete the registration. + +![](/images/registering-an-application-from-suggestions-new.png) +

+

+ +## Manually configuring application information + +This way, you can postpone the preparation for your application's configuration after submitting all the necessary information about your app on the web console. + +By clicking on `+ADD` button at the application list page, a popup will be revealed from the right side as below: + +![](/images/registering-an-application-manually-new.png) +

+

+ +After filling all the required fields, click `Save` button to complete the application registering. + +Here are the list of fields in the register form: + +| Field | Description | Required | +|-|-|-|-| +| Name | The application name | Yes | +| Kind | The application kind. Select one of these values: `Kubernetes`, `Terraform`, `CloudRun`, `Lambda` and `ECS`. | Yes | +| Piped | The piped that handles this application. Select one of the registered `piped`s at `Settings/Piped` page. | Yes | +| Repository | The Git repository contains application configuration and application configuration. Select one of the registered repositories in `piped` configuration. | Yes | +| Path | The relative path from the root of the Git repository to the directory containing application configuration and application configuration. Use `./` means repository root. | Yes | +| Config Filename | The name of application configuration file. Default is `app.pipecd.yaml`. | No | +| Platform Provider | Where the application will be deployed to. Select one of the registered cloud/platform providers in `piped` configuration. This field name previously was `Cloud Provider`. | Yes | + +> Note: Labels couldn't be set via this form. If you want, try the way to register via the application configuration defined in the Git repository. + +After submitting the form, one more step left is adding the application configuration file for that application into the application directory in Git repository same as we prepared in [the above method](../adding-an-application/#picking-from-a-list-of-unused-apps-suggested-by-pipeds). + +Please refer [Define your app's configuration](../defining-app-configuration/) or [pipecd/examples](../../examples/) for the examples of being supported application kind. + +## Updating an application +Regardless of which method you used to register the application, the web console can only be used to disable/enable/delete the application, besides the adding operation. All updates on application information must be done via the application configuration file stored in Git as a single source of truth. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: AppKind +spec: + name: new-name + labels: + team: new-team +``` + +Refer to [configuration reference](../../configuration-reference/) to see the full list of configurable fields. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/application-live-state.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/application-live-state.md new file mode 100644 index 0000000000..6cab5cd950 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/application-live-state.md @@ -0,0 +1,18 @@ +--- +title: "Application live state" +linkTitle: "Application live state" +weight: 7 +description: > + The live states of application components as well as their health status. +--- + +By default, `piped` continuously monitors the running resources/components of all deployed applications to determine the state of them and then send those results to the control plane. The application state will be visualized and rendered at the application details page in realtime. That helps developers can see what is running in the cluster as well as their health status. The application state includes: +- visual graph of application resources/components. Each resource/component node includes its metadata and health status. +- health status of the whole application. Application health status is `HEALTHY` if and only if the health statuses of all of its resources/components are `HEALTHY`. + +![](/images/application-details.png) +

+Application Details Page +

+ +By clicking on the resource/component node, a popup will be revealed from the right side to show more details about that resource/component. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/cancelling-a-deployment.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/cancelling-a-deployment.md new file mode 100644 index 0000000000..457a305e70 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/cancelling-a-deployment.md @@ -0,0 +1,17 @@ +--- +title: "Cancelling a deployment" +linkTitle: "Cancelling a deployment" +weight: 5 +description: > + This page describes how to cancel a running deployment. +--- + +A running deployment can be cancelled from web UI at the deployment details page. + +If the application rollback is enabled in the application configuration, the rollback process will be executed after the cancelling. You can also explicitly specify to rollback after the cancelling or not from the web UI by clicking on `▼` mark on the right side of the `CANCEL` button to select your option. + +![](/images/cancel-deployment.png) +

+Cancel a Deployment from web UI +

+ diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/configuration-drift-detection.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/configuration-drift-detection.md new file mode 100644 index 0000000000..6090abf7f5 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/configuration-drift-detection.md @@ -0,0 +1,101 @@ +--- +title: "Configuration drift detection" +linkTitle: "Configuration drift detection" +weight: 8 +description: > + Automatically detecting the configuration drift. +--- + +Configuration Drift is a phenomenon where running resources of service become more and more different from the definitions in Git as time goes on, due to manual ad-hoc changes and updates. +As PipeCD is using Git as a single source of truth, all application resources and infrastructure changes should be done by making a pull request to Git. Whenever a configuration drift occurs it should be notified to the developers and be fixed. + +PipeCD includes `Configuration Drift Detection` feature, which periodically compares running resources/configurations with the definitions in Git to detect the configuration drift and shows the comparing result in the application details web page as well as sends the notifications to the developers. + +### Detection Result +There are three statuses for the drift detection result: `SYNCED`, `OUT_OF_SYNC`, `DEPLOYING`. + +###### SYNCED + +This status means no configuration drift was detected. All resources/configurations are synced from the definitions in Git. From the application details page, this status is shown by a green "Synced" mark. + +![](/images/application-synced.png) +

+Application is in SYNCED state +

+ +###### OUT_OF_SYNC + +This status means a configuration drift was detected. An application is in this status when at least one of the following conditions is satisfied: +- at least one resource is defined in Git but NOT running in the cluster +- at least one resource is NOT defined in Git but running in the cluster +- at least one resource that is both defined in Git and running in the cluster but NOT in the same configuration + +This status is shown by a red "Out of Sync" mark on the application details page. + +![](/images/application-out-of-sync.png) +

+Application is in OUT_OF_SYNC state +

+ +Click on the "SHOW DETAILS" button to see more details about why the application is in the `OUT_OF_SYNC` status. In the below example, the replicas number of a Deployment was not matching, it was `300` in Git but `3` in the cluster. + +![](/images/application-out-of-sync-details.png) +

+The details shows why the application is in OUT_OF_SYNC state +

+ +###### DEPLOYING + +This status means the application is deploying and the configuration drift detection is not running a white. Whenever a new deployment of the application was started, the detection process will temporarily be stopped until that deployment finishes and will be continued after that. + +### How to enable + +This feature is automatically enabled for all applications. + +You can change the checking interval as well as [configure the notification](../../managing-piped/configuring-notifications/) for these events in `piped` configuration. + +Note: If you want to trigger deployment automatically when `OUT_OF_SYNC` occurs, see [Trigger configuration](./triggering-a-deployment/#trigger-configuration). + +### Ignore drift detection for specific fields + +> Note: This feature is currently supported for only Kubernetes application. + +You can also ignore drift detection for specified fields in your application manifests. In other words, even if the selected fields have different values between live state and Git, the application status will not be set to `Out of Sync`. + +For example, suppose you have the application's manifest as below + +```yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: simple +spec: + replicas: 2 + template: + spec: + containers: + - args: + - hi + - hello + image: gcr.io/pipecd/helloworld:v1.0.0 + name: helloworld +``` + +If you want to ignore the drift detection for the two sceans +- pod's replicas +- `helloworld` container's args + +Add the following statements to `app.pipecd.yaml` to ignore diff on those fields. + +```yaml +spec: + ... + driftDetection: + ignoreFields: + - apps/v1:Deployment:default:simple#spec.replicas + - apps/v1:Deployment:default:simple#spec.template.spec.containers.0.args +``` + +Note: The `ignoreFields` is in format `apiVersion:kind:namespace:name#yamlFieldPath` + +For more information, see the [configuration reference](../../configuration-reference/#driftdetection). diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/_index.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/_index.md new file mode 100644 index 0000000000..3f42bbdd32 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/_index.md @@ -0,0 +1,14 @@ +--- +title: "Customizing application's deployment pipeline" +linkTitle: "Customizing deployment" +weight: 3 +description: > + This page describes how to customize an application's deployment pipeline with PipeCD defined stages. +--- + +In the previous section, we knew how to use PipeCD supporting application kind's stages to build up a pipeline that defines how Piped should deploy your application. In this section, aside from the application kind specified stages, we will talk about some commonly defined pipeline stages, which can be used to build up a more fashionable deployment pipeline for your application. + +![](/images/deployment-wait-stage.png) +

+Example deployment with a WAIT stage +

diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/adding-a-manual-approval.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/adding-a-manual-approval.md new file mode 100644 index 0000000000..3ee946b5fd --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/adding-a-manual-approval.md @@ -0,0 +1,39 @@ +--- +title: "Adding a manual approval stage" +linkTitle: "Manual approval stage" +weight: 2 +description: > + This page describes how to add a manual approval stage. +--- + +While deploying an application to production environments, some teams require manual approvals before continuing. +The manual approval stage enables you to control when the deployment is allowed to continue by requiring a specific person or team to approve. +This stage is named by `WAIT_APPROVAL` and you can add it to your pipeline before some stages should be approved before they can be executed. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + - name: WAIT_APPROVAL + with: + timeout: 6h + approvers: + - user-abc + - name: K8S_PRIMARY_ROLLOUT +``` + +As above example, the deployment requires an approval from `user-abc` before `K8S_PRIMARY_ROLLOUT` stage can be executed. + +The value of user ID in the `approvers` list depends on your [SSO configuration](../../../managing-controlplane/auth/), it must be GitHub's user ID if your SSO was configured to use GitHub provider, it must be Gmail account if your SSO was configured to use Google provider. + +In case the `approvers` field was not configured, anyone in the project who has `Editor` or `Admin` role can approve the deployment pipeline. + +Also, it will end with failure when the time specified in `timeout` has elapsed. Default is `6h`. + +![](/images/deployment-wait-approval-stage.png) +

+Deployment with a WAIT_APPROVAL stage +

diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/adding-a-wait-stage.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/adding-a-wait-stage.md new file mode 100644 index 0000000000..f2d381d8f8 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/adding-a-wait-stage.md @@ -0,0 +1,29 @@ +--- +title: "Adding a wait stage" +linkTitle: "Wait stage" +weight: 1 +description: > + This page describes how to add a WAIT stage. +--- + +In addition to waiting for approvals from someones, the deployment pipeline can be configured to wait an amount of time before continuing. +This can be done by adding the `WAIT` stage into the pipeline. This stage has one configurable field is `duration` to configure how long should be waited. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + - name: WAIT + with: + duration: 5m + - name: K8S_PRIMARY_ROLLOUT + - name: K8S_CANARY_CLEAN +``` + +![](/images/deployment-wait-stage.png) +

+Deployment with a WAIT stage +

diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/automated-deployment-analysis.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/automated-deployment-analysis.md new file mode 100644 index 0000000000..6a3fec2ddb --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/automated-deployment-analysis.md @@ -0,0 +1,295 @@ +--- +title: "Adding an automated deployment analysis stage" +linkTitle: "Automated deployment analysis stage" +weight: 3 +description: > + This page describes how to configure Automated Deployment Analysis feature. +--- + +Automated Deployment Analysis (ADA) evaluates the impact of the deployment you are in the middle of by analyzing the metrics data, log entries, and the responses of the configured HTTP requests. +The analysis of the newly deployed application is often carried out in a manual, ad-hoc or statistically incorrect manner. +ADA automates that and helps to build a robust deployment process. +ADA is available as a stage in the pipeline specified in the application configuration file. + +ADA does the analysis by periodically performing queries against the [Analysis Provider](../../../../concepts/#analysis-provider) and evaluating the results to know the impact of the deployment. Then based on these evaluating results, the deployment can be rolled back immediately to minimize any negative impacts. + +The canonical use case for this stage is to determine if your canary deployment should proceed. + +![](/images/deployment-analysis-stage.png) +

+Automatic rollback based on the analysis result +

+ +## Prerequisites +Before enabling ADA inside the pipeline, all required Analysis Providers must be configured in the Piped Configuration according to [this guide](../../../managing-piped/adding-an-analysis-provider/). + +## Analysis by metrics +### Strategies +You can choose one of the four strategies to fit your use case. + +- `THRESHOLD`: A simple method to compare against a statically defined threshold (same as the typical analysis method up to `v0.18.0`). +- `PREVIOUS`: A method to compare metrics with the last successful deployment. +- `CANARY_BASELINE`: A method to compare the metrics between the Canary and Baseline variants. +- `CANARY_PRIMARY`(not recommended): A method to compare the metrics between the Canary and Primary variants. + +`THRESHOLD` is the simplest strategy, so it's for you if you attempt to evaluate this feature. + +`THRESHOLD` only checks if the query result falls within the statically specified range, whereas others evaluate by checking the deviation of two time-series data. +Therefore, those configuration fields are slightly different from each other. The next section covers how to configure the ADA stage for each strategy. + +### Configuration +Here is an example for the `THRESHOLD` strategy. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: ANALYSIS + with: + duration: 30m + metrics: + - strategy: THRESHOLD + provider: my-prometheus + interval: 5m + expected: + max: 0.01 + query: | + sum (rate(http_requests_total{status=~"5.*"}[5m])) + / + sum (rate(http_requests_total[5m])) +``` + +In the `provider` field, put the name of the provider in Piped configuration prepared in the [Prerequisites](#prerequisites) section. + +The `ANALYSIS` stage will continue to run for the period specified in the `duration` field. +In the meantime, Piped sends the given `query` to the Analysis Provider at each specified `interval`. + +For each query, it checks if the result is within the expected range. If it's not expected, this `ANALYSIS` stage will fail (typically the rollback stage will be started). +You can change the acceptable number of failures by setting the `failureLimit` field. + +The other strategies are basically the same, but there are slight differences. Let's take a look at them. + +##### PREVIOUS strategy +In the `PREVIOUS` strategy, Piped queries the analysis provider with the time range when the deployment was previously successful, and compares that metrics with the current metrics. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: ANALYSIS + with: + duration: 30m + metrics: + - strategy: PREVIOUS + provider: my-prometheus + deviation: HIGH + interval: 5m + query: | + sum (rate(http_requests_total{status=~"5.*"}[5m])) + / + sum (rate(http_requests_total[5m])) +``` + +In the `THRESHOLD` strategy, we used `expected` to evaluate the deployment, but here we use `deviation` instead. +The stage fails on deviation in the specified direction. In the above example, it fails if the current metrics is higher than the previous. + +##### CANARY strategy + +**With baseline**: + +In the `CANARY_BASELINE` strategy, Piped checks if there is a significant difference between the metrics of the two running variants, Canary and Baseline. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: ANALYSIS + with: + duration: 30m + metrics: + - strategy: CANARY_BASELINE + provider: my-prometheus + deviation: HIGH + interval: 5m + query: | + sum (rate(http_requests_total{job="foo-{{ .Variant.Name }}", status=~"5.*"}[5m])) + / + sum (rate(http_requests_total{job="foo-{{ .Variant.Name }}"}[5m])) +``` + +Like `PREVIOUS`, you specify the conditions for failure with `deviation`. + +It generates different queries for Canary and Baseline to compare the metrics. You can use the Variant args to template the queries. +Analysis Template uses the [Go templating engine](https://golang.org/pkg/text/template/) which only replaces values. This allows variant-specific data to be embedded in the query. + +The available built-in args currently are: + +| Property | Type | Description | +|-|-|-| +| Variant.Name | string | "canary", "baseline", or "primary" will be populated | + +Also, you can define the custom args using `baselineArgs` and `canaryArgs`, and refer them like `{{ .VariantCustom.Args.job }}`. + +```yaml + metrics: + - strategy: CANARY_BASELINE + provider: my-prometheus + deviation: HIGH + baselineArgs: + job: bar + canaryArgs: + job: baz + interval: 5m + query: cpu_usage{job="{{ .VariantCustomArgs.job }}", status=~"5.*"} +``` + +**With primary (not recommended)**: + +If for some reason you cannot provide the Baseline variant, you can also compare Canary and Primary. +However, we recommend that you compare it with Baseline that is a variant launched at the same time as Canary as much as possible. + +##### Comparison algorithm +The metric comparison algorithm in PipeCD uses a nonparametric statistical test called [Mann-Whitney U test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test) to check for a significant difference between two metrics collection (like Canary and Baseline, or the previous deployment and the current metrics). + +### Example pipelines + +**Analyze the canary variant using the `THRESHOLD` strategy:** + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + with: + replicas: 20% + - name: ANALYSIS + with: + duration: 30m + metrics: + - provider: my-prometheus + interval: 10m + expected: + max: 0.1 + query: rate(cpu_usage_total{app="foo"}[10m]) + - name: K8S_PRIMARY_ROLLOUT + - name: K8S_CANARY_CLEAN +``` + +**Analyze the primary variant using the `PREVIOUS` strategy:** + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: K8S_PRIMARY_ROLLOUT + - name: ANALYSIS + with: + duration: 30m + metrics: + - strategy: PREVIOUS + provider: my-prometheus + interval: 5m + deviation: HIGH + query: rate(cpu_usage_total{app="foo"}[5m]) +``` + +**Analyze the canary variant using the `CANARY_BASELINE` strategy:** + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + with: + replicas: 20% + - name: K8S_BASELINE_ROLLOUT + with: + replicas: 20% + - name: ANALYSIS + with: + duration: 30m + metrics: + - strategy: CANARY_BASELINE + provider: my-prometheus + interval: 10m + deviation: HIGH + query: rate(cpu_usage_total{app="foo", variant="{{ .Variant.Name }}"}[10m]) + - name: K8S_PRIMARY_ROLLOUT + - name: K8S_CANARY_CLEAN + - name: K8S_BASELINE_CLEAN +``` + +The full list of configurable `ANALYSIS` stage fields are [here](../../../configuration-reference/#analysisstageoptions). + +See more the [example](https://github.com/pipe-cd/examples/blob/master/kubernetes/analysis-by-metrics/app.pipecd.yaml). + +## Analysis by logs + +>TBA + +## Analysis by http + +>TBA + +### [Optional] Analysis Template +Analysis Templating is a feature that allows you to define some shared analysis configurations to be used by multiple applications. These templates must be placed at the `.pipe` directory at the root of the Git repository. Any application in that Git repository can use to the defined template by specifying the name of the template in the application configuration file. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: AnalysisTemplate +spec: + metrics: + http_error_rate: + interval: 30m + provider: my-prometheus + expected: + max: 0 + query: | + sum without(status) (rate(http_requests_total{status=~"5.*", job="{{ .App.Name }}"}[1m])) + / + sum without(status) (rate(http_requests_total{job="{{ .App.Name }}"}[1m])) +``` + +Once the AnalysisTemplate is defined, you can reference from the application configuration using the `template` field. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + pipeline: + stages: + - name: ANALYSIS + with: + duration: 30m + metrics: + - template: + name: http_error_rate +``` + +Analysis Template uses the [Go templating engine](https://golang.org/pkg/text/template/) which only replaces values. This allows deployment-specific data to be embedded in the analysis template. + +The available built-in args are: + +| Property | Type | Description | +|-|-|-| +| App.Name | string | Application Name. | +| K8s.Namespace | string | The Kubernetes namespace where manifests will be applied. | + +Also, custom args is supported. Custom args placeholders can be defined as `{{ .AppCustomArgs. }}`. + +Of course, it can be used in conjunction with [Variant args](#canary-strategy). + +See [here](https://github.com/pipe-cd/examples/blob/master/.pipe/analysis-template.yaml) for more examples. +And the full list of configurable `AnalysisTemplate` fields are [here](/docs/user-guide/configuration-reference/#analysis-template-configuration). diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/custom-sync.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/custom-sync.md new file mode 100644 index 0000000000..47d7d7a534 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/custom-sync.md @@ -0,0 +1,61 @@ +--- +title: "Custom Sync" +linkTitle: "Custom Sync" +weight: 4 +description: > + Specific guide for configuring Custom Sync +--- + +`CUSTOM_SYNC` is one stage in the pipeline and you can define scripts to deploy run in this stage. + +> Note: This feature is marked as a deprecated feature and will be removed later. + +## How to configure Custom Sync + +Add a `CUSTOM_SYNC` to your pipeline and write commands to deploy your infrastructure. +The commands run in the directory where this application configuration file exists. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: LambdaApp +spec: + name: sam-simple + labels: + env: example + team: abc + planner: + # Must add this configuration to force use CUSTOM_SYNC stage. + alwaysUsePipeline: true + pipeline: + stages: + - name: CUSTOM_SYNC + with: + envs: + AWS_PROFILE: "sample" + run: | + cd sam-app + sam build + echo y | sam deploy --profile $AWS_PROFILE +``` + +![](/images/custom-sync.png) + +Note: +1. You can use `CUSTOM_SYNC` with any current supporting application kind, but keep `alwaysUsePipeline` true to not run the application kind's default `QUICK_SYNC`. +2. Only one `CUSTOM_SYNC` stage should be used in an application pipeline. +3. The commands run with the enviroment variable `PATH` that refers `~/.piped/tools` at first. + +The public piped image available in PipeCD main repo (ref: [Dockerfile](https://github.com/pipe-cd/pipecd/blob/master/cmd/piped/Dockerfile)) is based on [alpine](https://hub.docker.com/_/alpine/) and only has a few UNIX command available (ref: [piped-base Dockerfile](https://github.com/pipe-cd/pipecd/blob/master/tool/piped-base/Dockerfile)). If you want to use your commands (`sam` in the above example), you can: + +- Prepare your own environment container image then add [piped binary](https://github.com/pipe-cd/pipecd/releases) to it. +- Build your own container image based on `ghcr.io/pipe-cd/piped` image. +- Manually update your running piped container (not recommended). + +## Auto Rollback + +When `autoRollback` is enabled, the deployment will be rolled back in the same way as [Rolling Back](../../rolling-back-a-deployment). + +When the rolling back process is triggered in the pipeline including `CUSTOM_SYNC`, `CUSTOM_SYNC_ROLLBACK` stage will be added to the deployment pipeline. +`CUSTOM_SYNC_ROLLBACK` is different from `ROLLBACK` that applications set defaultly, it runs the same commands as `CUSTOM_SYNC` in the runnning commit to reverts all the applied changes. + +![](/images/custom-sync-rollback.png) diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/script-run.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/script-run.md new file mode 100644 index 0000000000..dd4ba0544f --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/customizing-deployment/script-run.md @@ -0,0 +1,190 @@ +--- +title: "Script Run stage" +linkTitle: "Script Run stage" +weight: 4 +description: > + Specific guide for configuring Script Run stage +--- + +`SCRIPT_RUN` stage is one stage in the pipeline and you can execute any commands. + +> Note: This feature is at the alpha status. Currently you can use it on all application kinds, but the rollback feature is only for the application kind of KubernetesApp. + +## How to configure SCRIPT_RUN stage + +Add a `SCRIPT_RUN` to your pipeline and write commands. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: canary-with-script-run + labels: + env: example + team: product + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + with: + replicas: 10% + - name: WAIT + with: + duration: 10s + - name: SCRIPT_RUN + with: + env: + MSG: "execute script1" + run: | + echo $MSG + sleep 10 + - name: K8S_PRIMARY_ROLLOUT + - name: K8S_CANARY_CLEAN + - name: SCRIPT_RUN + with: + env: + MSG: "execute script2" + run: | + echo $MSG + sleep 10 +``` + +You can define the command as `run`. +Also, if you want to some values as variables, you can define them as `env`. + +The commands run in the directory where this application configuration file exists. + +![](/images/script-run.png) + +### Execute the script file + +If your script is so long, you can separate the script as a file. +You can put the file with the app.pipecd.yaml in the same dir and then you can execute the script like this. + +``` +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: script-run + pipeline: + stages: + - name: SCRIPT_RUN + with: + run: | + sh script.sh +``` + +``` +. +├── app.pipecd.yaml +└── script.sh +``` + +## Builtin commands + +Currently, you can use the commands which are installed in the environment for the piped. + +For example, If you are using the container platform and the offcial piped container image, you can use the command below. +- git +- ssh +- jq +- curl +- commands installed by piped in $PIPED_TOOL_DIR (check at runtime) +- built-in commands installed in the base image + +The public piped image available in PipeCD main repo (ref: [Dockerfile](https://github.com/pipe-cd/pipecd/blob/master/cmd/piped/Dockerfile)) is based on [alpine](https://hub.docker.com/_/alpine/) and only has a few UNIX commands available (ref: [piped-base Dockerfile](https://github.com/pipe-cd/pipecd/blob/master/tool/piped-base/Dockerfile)). + +If you want to use your commands, you can realize it with either step below. +- Prepare your own environment container image then add [piped binary](https://github.com/pipe-cd/pipecd/releases) to it. +- Build your own container image based on `ghcr.io/pipe-cd/piped` image. + +## Default environment values + +You can use the envrionment values related to the deployment. + +| Name | Description | Example | +|-|-|-| +|SR_DEPLOYMENT_ID| The deployment id | 877625fc-196a-40f9-b6a9-99decd5494a0 | +|SR_APPLICATION_ID| The application id | 8d7609e0-9ff6-4dc7-a5ac-39660768606a | +|SR_APPLICATION_NAME| The application name | example | +|SR_TRIGGERED_AT| The timestamp when the deployment is triggered | 1719571113 | +|SR_TRIGGERED_COMMIT_HASH| The commit hash that triggered the deployment | 2bf969a3dad043aaf8ae6419943255e49377da0d | +|SR_REPOSITORY_URL| The repository url configured in the piped config | git@github.com:org/repo.git, https://github.com/org/repo | +|SR_SUMMARY| The summary of the deployment | Sync with the specified pipeline because piped received a command from user via web console or pipectl| +|SR_CONTEXT_RAW| The json encoded string of above values | {"deploymentID":"877625fc-196a-40f9-b6a9-99decd5494a0","applicationID":"8d7609e0-9ff6-4dc7-a5ac-39660768606a","applicationName":"example","triggeredAt":1719571113,"triggeredCommitHash":"2bf969a3dad043aaf8ae6419943255e49377da0d","repositoryURL":"git@github.com:org/repo.git","labels":{"env":"example","team":"product"}} | +|SR_LABELS_XXX| The label attached to the deployment. The env name depends on the label name. For example, if a deployment has the labels `env:prd` and `team:server`, `SR_LABELS_ENV` and `SR_LABELS_TEAM` are registered. | prd, server | + +### Use `SR_CONTEXT_RAW` with jq + +You can use jq command to refer to the values from `SR_CONTEXT_RAW`. + +``` + - name: SCRIPT_RUN + with: + run: | + echo "Get deploymentID from SR_CONTEXT_RAW" + echo $SR_CONTEXT_RAW | jq -r '.deploymentID' + sleep 10 + onRollback: | + echo "rollback script-run" +``` + +## Rollback + +> Note: Currently, this feature is only for the application kind of KubernetesApp. + +You can define the command as `onRollback` to execute when to rollback similar to `run`. +Execute the command to rollback SCRIPT_RUN to the point where the deployment was canceled or failed. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + name: canary-with-script-run + labels: + env: example + team: product + pipeline: + stages: + - name: K8S_CANARY_ROLLOUT + with: + replicas: 10% + - name: WAIT + with: + duration: 10s + - name: SCRIPT_RUN + with: + env: + MSG: "execute script1" + R_MSG: "rollback script1" + run: | + echo $MSG + sleep 10 + onRollback: | + echo $R_MSG + sleep 10 + - name: K8S_PRIMARY_ROLLOUT + - name: K8S_CANARY_CLEAN +``` + +![](/images/script-run-onRollback.png) + +The command defined as `onRollback` is executed as `SCRIPT_RUN_ROLLBACK` stage after each `ROLLBACK` stage. + +When there are multiple SCRIPT_RUN stages, they are executed in the same order as SCRIPT_RUN on the pipeline. +Also, only for the executed SCRIPT_RUNs are rollbacked. + +For example, consider when deployment proceeds in the following order from 1 to 7. +``` +1. K8S_CANARY_ROLLOUT +2. WAIT +3. SCRIPT_RUN +4. K8S_PRIMARY_ROLLOUT +5. SCRIPT_RUN +6. K8S_CANARY_CLEAN +7. SCRIPT_RUN +``` + +Then +- If 3 is canceled or fails while running, only SCRIPT_RUN of 3 will be rollbacked. +- If 4 is canceled or fails while running, only SCRIPT_RUN of 3 will be rollbacked. +- If 6 is canceled or fails while running, only SCRIPT_RUNs 3 and 5 will be rollbacked. The order of executing is 3 -> 5. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/_index.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/_index.md new file mode 100644 index 0000000000..6bcca6b06f --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/_index.md @@ -0,0 +1,9 @@ +--- +title: "Defining application's configuration" +linkTitle: "Defining app configuration" +weight: 2 +description: > + This page describes how to configure your application's deployment for each application kind. +--- + +In the previous section, we knew that each PipeCD application requires a configuration file (we call it the application configuration file) that contains the application's information (such as name, label, etc) and also defines how should Piped deploy that application. In this section, we will show you how to define a deployment pipeline like that for each kind of PipeCD supporting application. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/cloudrun.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/cloudrun.md new file mode 100644 index 0000000000..7333dedf93 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/cloudrun.md @@ -0,0 +1,87 @@ +--- +title: "Configuring Cloud Run application" +linkTitle: "Cloud Run" +weight: 3 +description: > + Specific guide to configuring deployment for Cloud Run application. +--- + +Deploying a Cloud Run application requires a `service.yaml` file placing inside the application directory. That file contains the service specification used by Cloud Run as following: + +``` yaml +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: SERVICE_NAME +spec: + template: + metadata: + annotations: + autoscaling.knative.dev/maxScale: '5' + spec: + containerConcurrency: 80 + containers: + - args: + - server + image: gcr.io/pipecd/helloworld:v0.5 + ports: + - containerPort: 9085 + resources: + limits: + cpu: 1000m + memory: 128Mi +``` + +## Quick sync + +By default, when the [pipeline](../../../configuration-reference/#cloud-run-application) was not specified, PipeCD triggers a quick sync deployment for the merged pull request. +Quick sync for a Cloud Run deployment will roll out the new version and switch all traffic to it. + +## Sync with the specified pipeline + +The [pipeline](../../../configuration-reference/#cloud-run-application) field in the application configuration is used to customize the way to do the deployment. +You can add a manual approval before routing traffic to the new version or add an analysis stage the do some smoke tests against the new version before allowing them to receive the real traffic. + +These are the provided stages for Cloud Run application you can use to build your pipeline: + +- `CLOUDRUN_PROMOTE` + - promote the new version to receive an amount of traffic + +and other common stages: +- `WAIT` +- `WAIT_APPROVAL` +- `ANALYSIS` + +See the description of each stage at [Customize application deployment](../../customizing-deployment/). + +Here is an example that rolls out the new version gradually: + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: CloudRunApp +spec: + pipeline: + stages: + # Promote new version to receive 10% of traffic. + - name: CLOUDRUN_PROMOTE + with: + percent: 10 + - name: WAIT + with: + duration: 10m + # Promote new version to receive 50% of traffic. + - name: CLOUDRUN_PROMOTE + with: + percent: 50 + - name: WAIT + with: + duration: 10m + # Promote new version to receive all traffic. + - name: CLOUDRUN_PROMOTE + with: + percent: 100 +``` + +## Reference + +See [Configuration Reference](../../../configuration-reference/#cloud-run-application) for the full configuration. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/ecs.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/ecs.md new file mode 100644 index 0000000000..bf6c6001a2 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/ecs.md @@ -0,0 +1,164 @@ +--- +title: "Configuring ECS application" +linkTitle: "ECS" +weight: 5 +description: > + Specific guide to configuring deployment for Amazon ECS application. +--- + +There are two main ways to deploy an Amazon ECS application. +- Your application is a one-time or periodic batch job. + - it's a standalone task. + - you need to prepare `TaskDefinition` +- Your application is deployed to run continuously or behind a load balancer. + - you need to prepare `TaskDefinition` and `Service` + +To deploy an Amazon ECS application, the `TaskDefinition` configuration file must be located in the application directory. This file contains all configuration for [ECS TaskDefinition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html) object and will be used by Piped agent while deploying your application/service to the ECS cluster. + +To deploy your application to run continuously or to place it behind a load balancer, You need to create [ECS Service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html). The `Service` configuration file also must be located in the application directory. This file contains all configurations for [ECS Service](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html) object. + +If you're not familiar with ECS, you can get examples for those files from [here](../../../../examples/#ecs-applications). + +Note: + +You can generate an application config file easily and interactively by [`pipectl init`](../../command-line-tool.md#generating-an-application-config-apppipecdyaml). + + +## Quick sync + +By default, when the [pipeline](../../../configuration-reference/#ecs-application) was not specified, PipeCD triggers a quick sync deployment for the merged pull request. +Quick sync for an ECS deployment will roll out the new version and switch all traffic to it immediately. +> In case of standalone task, only Quick sync is supported. + +Here is an example for Quick sync. + + {{< tabpane >}} + {{< tab lang="yaml" header="application" >}} +apiVersion: pipecd.dev/v1beta1 +kind: ECSApp +spec: + name: simple + labels: + env: example + team: xyz + input: + # Path to Service configuration file in Yaml/JSON format. + serviceDefinitionFile: servicedef.yaml + # Path to TaskDefinition configuration file in Yaml/JSON format. + # Default is `taskdef.json` + taskDefinitionFile: taskdef.yaml + targetGroups: + primary: + targetGroupArn: arn:aws:elasticloadbalancing:ap-northeast-1:XXXX:targetgroup/ecs-lb/YYYY + containerName: web + containerPort: 80 + {{< /tab >}} + {{< tab lang="yaml" header="standalone task" >}} +apiVersion: pipecd.dev/v1beta1 +kind: ECSApp +spec: + name: standalonetask-fargate + labels: + env: example + team: xyz + input: + # Path to TaskDefinition configuration file in Yaml/JSON format. + # Default is `taskdef.json` + taskDefinitionFile: taskdef.yaml + clusterArn: arn:aws:ecs:ap-northeast-1:XXXX:cluster/test-cluster + awsvpcConfiguration: + assignPublicIp: ENABLED + subnets: + - subnet-YYYY + - subnet-YYYY + securityGroups: + - sg-YYYY + {{< /tab >}} + {{< /tabpane >}} + +## Sync with the specified pipeline + +The [pipeline](../../../configuration-reference/#ecs-application) field in the application configuration is used to customize the way to do the deployment. +You can add a manual approval before routing traffic to the new version or add an analysis stage the do some smoke tests against the new version before allowing them to receive the real traffic. + +These are the provided stages for ECS application you can use to build your pipeline: + +- `ECS_CANARY_ROLLOUT` + - deploy workloads of the new version as CANARY variant, but it is still receiving no traffic. +- `ECS_PRIMARY_ROLLOUT` + - deploy workloads of the new version as PRIMARY variant, but it is still receiving no traffic. +- `ECS_TRAFFIC_ROUTING` + - routing traffic to the specified variants. +- `ECS_CANARY_CLEAN` + - destroy all workloads of CANARY variant. + +and other common stages: +- `WAIT` +- `WAIT_APPROVAL` +- `ANALYSIS` + +See the description of each stage at [Customize application deployment](../../customizing-deployment/). + +Here is an example that rolls out the new version gradually: + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: ECSApp +spec: + input: + # Path to Service configuration file in Yaml/JSON format. + serviceDefinitionFile: servicedef.yaml + # Path to TaskDefinition configuration file in Yaml/JSON format. + # Default is `taskdef.json` + taskDefinitionFile: taskdef.yaml + targetGroups: + primary: + targetGroupArn: arn:aws:elasticloadbalancing:ap-northeast-1:XXXX:targetgroup/ecs-canary-blue/YYYY + containerName: web + containerPort: 80 + canary: + targetGroupArn: arn:aws:elasticloadbalancing:ap-northeast-1:XXXX:targetgroup/ecs-canary-green/YYYY + containerName: web + containerPort: 80 + pipeline: + stages: + # Deploy the workloads of CANARY variant, the number of workload + # for CANARY variant is equal to 30% of PRIMARY's workload. + # But this is still receiving no traffic. + - name: ECS_CANARY_ROLLOUT + with: + scale: 30 + # Change the traffic routing state where + # the CANARY workloads will receive the specified percentage of traffic. + # This is known as multi-phase canary strategy. + - name: ECS_TRAFFIC_ROUTING + with: + canary: 20 + # Optional: We can also add an ANALYSIS stage to verify the new version. + # If this stage finds any not good metrics of the new version, + # a rollback process to the previous version will be executed. + - name: ANALYSIS + # Update the workload of PRIMARY variant to the new version. + - name: ECS_PRIMARY_ROLLOUT + # Change the traffic routing state where + # the PRIMARY workloads will receive 100% of the traffic. + - name: ECS_TRAFFIC_ROUTING + with: + primary: 100 + # Destroy all workloads of CANARY variant. + - name: ECS_CANARY_CLEAN +``` + +## NOTE + +- When you use an ELB for deployments, all listener rules that have the same target groups as configured in app.pipecd.yaml will be controlled. + - That means you need to link target groups to your listener rules before deployments. + - For more information and diagrams, see [Issue#4733 [ECS] Modify ELB listener rules other than defaults without adding config](https://github.com/pipe-cd/pipecd/pull/4733). +- When you use [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html), you cannot use Canary or Blue/Green deployment yet because Service Connect does not support the external deployment yet. +- When you use AutoScaling for a service, you can disable reconciling `desiredCount` by following steps. + 1. Create a service without defining `desiredCount` in the service definition file. See [Restrictions of Service Definition](../../../configuration-reference/#restrictions-of-service-definition). + 2. Configure AutoScaling by yourself. + +## Reference + +See [Configuration Reference](../../../configuration-reference/#ecs-application) for the full configuration. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/kubernetes.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/kubernetes.md new file mode 100644 index 0000000000..8669874db1 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/kubernetes.md @@ -0,0 +1,121 @@ +--- +title: "Configuring Kubernetes application" +linkTitle: "Kubernetes" +weight: 1 +description: > + Specific guide to configuring deployment for Kubernetes application. +--- + +Based on the application configuration and the pull request changes, PipeCD plans how to execute the deployment: doing quick sync or doing progressive sync with the specified pipeline. + +Note: + +You can generate an application config file easily and interactively by [`pipectl init`](../../command-line-tool.md#generating-an-application-config-apppipecdyaml). + + +## Quick sync + +Quick sync is a fast way to sync application to the state specified in the target Git commit without any progressive strategy. It just applies all the defined manifiests to sync the application. +The quick sync will be planned in one of the following cases: +- no pipeline was specified in the application configuration file +- [pipeline](../../../configuration-reference/#pipeline) was specified but the PR did not make any changes on workload (e.g. Deployment's pod template) or config (e.g. ConfigMap, Secret) + +For example, the application configuration as below is missing the pipeline field. This means any pull request touches the application will trigger a quick sync deployment. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + helmChart: + repository: pipecd + name: helloworld + version: v0.3.0 +``` + +In another case, even when the pipeline was specified, a PR that just changes the Deployment's replicas number for scaling will also trigger a quick sync deployment. + +## Sync with the specified pipeline + +The `pipeline` field in the application configuration is used to customize the way to do deployment by specifying and configuring the execution stages. You may want to configure those stages to enable a progressive deployment with a strategy like canary, blue-green, a manual approval, an analysis stage. + +To enable customization, PipeCD defines three variants for each Kubernetes application: primary (aka stable), baseline and canary. +- `primary` runs the current version of code and configuration. +- `baseline` runs the same version of code and configuration as the primary variant. (Creating a brand-new baseline workload ensures that the metrics produced are free of any effects caused by long-running processes.) +- `canary` runs the proposed change of code or configuration. + +Depending on the configured pipeline, any variants can exist and receive the traffic during the deployment process but once the deployment is completed, only the `primary` variant should be remained. + +These are the provided stages for Kubernetes application you can use to build your pipeline: + +- `K8S_PRIMARY_ROLLOUT` + - update the primary resources to the state defined in the target commit +- `K8S_CANARY_ROLLOUT` + - generate canary resources based on the definition of the primary resource in the target commit and apply them +- `K8S_CANARY_CLEAN` + - remove all canary resources +- `K8S_BASELINE_ROLLOUT` + - generate baseline resources based on the definition of the primary resource in the target commit and apply them +- `K8S_BASELINE_CLEAN` + - remove all baseline resources +- `K8S_TRAFFIC_ROUTING` + - split traffic between variants + +and other common stages: +- `WAIT` +- `WAIT_APPROVAL` +- `ANALYSIS` + +See the description of each stage at [Customize application deployment](../../customizing-deployment/). + +## Manifest Templating + +In addition to plain-YAML, PipeCD also supports Helm and Kustomize for templating application manifests. + +A helm chart can be loaded from: +- the same git repository with the application directory, we call as a `local chart` + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + helmChart: + path: ../../local/helm-charts/helloworld +``` + +- a different git repository, we call as a `remote git chart` + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + helmChart: + gitRemote: git@github.com:pipe-cd/manifests.git + ref: v0.5.0 + path: manifests/helloworld +``` + +- a Helm chart repository, we call as a `remote chart` + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + helmChart: + repository: pipecd + name: helloworld + version: v0.5.0 +``` + +A kustomize base can be loaded from: +- the same git repository with the application directory, we call as a `local base` +- a different git repository, we call as a `remote base` + +See [Examples](../../../examples/#kubernetes-applications) for more specific. + +## Reference + +See [Configuration Reference](../../../configuration-reference/#kubernetes-application) for the full configuration. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/lambda.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/lambda.md new file mode 100644 index 0000000000..d6bf0a15e8 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/lambda.md @@ -0,0 +1,171 @@ +--- +title: "Configuring Lambda application" +linkTitle: "Lambda" +weight: 4 +description: > + Specific guide to configuring deployment for Lambda application. +--- + +Deploying a Lambda application requires a `function.yaml` file placing inside the application directory. That file contains values to be used to deploy Lambda function on your AWS cluster. +Currently, Piped supports deploying all types of Lambda deployment packages: +- container images (called [container image as Lambda function](https://aws.amazon.com/blogs/aws/new-for-aws-lambda-container-image-support/)) +- `.zip` file archives (which stored in AWS S3) + +Besides, Piped also supports deploying your Lambda function __directly from the function source code__ which is stored in a remote git repository. + +#### Deploy container image as Lambda function + +A sample `function.yaml` file for container image as Lambda function used deployment as follows: + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleFunction + image: ecr.ap-northeast-1.amazonaws.com/lambda-test:v0.0.1 + role: arn:aws:iam::76xxxxxxx:role/lambda-role + # The amount of memory available to the Lambda application + # at runtime. The value can be any multiple of 1 MB. + memory: 512 + # Timeout of the Lambda application, the value must + # in between 1 to 900 seconds. + timeout: 30 + tags: + app: simple + environments: + FOO: bar + # ephemeralStorage is optional value. If you define a ephemeral storage to lambda, you can + # use this field. The value must be in between 512 to 10240 MB. + ephemeralStorage: + size: 512 + # vpcConfig is optional value. If you define a vpc configuration to lambda, you can + # use this field. + vpcConfig: + securityGroupIds: + - sg-01234 + - sg-56789 + subnetIds: + - subnet-01234 + - subnet-56789 +``` + +Except the `tags` and the `environments` field, all others are required fields for the deployment to run. + +The `role` value represents the service role (for your Lambda function to run), not for Piped agent to deploy your Lambda application. To be able to pull container images from AWS ECR, besides policies to run as usual, you need to add `Lambda.ElasticContainerRegistry` __read__ permission to your Lambda function service role. + +The `environments` field represents environment variables that can be accessed by your Lambda application at runtime. __In case of no value set for this field, all environment variables for the deploying Lambda application will be revoked__, so make sure you set all currently required environment variables of your running Lambda application on `function.yaml` if you migrate your app to PipeCD deployment. + +#### Deploy .zip file archives as Lambda function + +It's recommended to use container image as Lambda function due to its simplicity, but as mentioned above, below is a sample `function.yaml` file for Lambda which uses zip packing source code stored in AWS S3. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleZipPackingS3Function + role: arn:aws:iam::76xxxxxxx:role/lambda-role + # --- 3 next lines allow Piped to determine your Lambda function code stored in AWS S3. + s3Bucket: pipecd-sample-lambda + s3Key: pipecd-sample-src + s3ObjectVersion: 1pTK9_v0Kd7I8Sk4n6abzCL + # --- + handler: app.lambdaHandler + runtime: nodejs14.x + memory: 512 + timeout: 30 + environments: + FOO: bar + tags: + app: simple-zip-s3 +``` + +Value for the `runtime` field should be listed in [AWS Lambda runtimes official docs](https://docs.aws.amazon.com/lambda/latest/dg/lambda-runtimes.html). All other fields setting are remained as in the case of using [container image as Lambda function](#deploy-container-image-as-lambda-function) pattern. + +#### Deploy source code directly as Lambda function + +In case you don’t have a separated CI pipeline that provides artifacts (such as container image, built zip files) as its outputs and want to set up a simple pipeline to deploy the Lambda function directly from your source code, this deployment package is for you. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: SimpleCanaryZipFunction + role: arn:aws:iam::76xxxxxxx:role/lambda-role + # source configuration use to determine the source code of your Lambda function. + source: + # git remote address where the source code is placing. + git: git@github.com:username/lambda-function-code.git + # the commit SHA or tag for remote git. Use branch name means you will always use + # the latest code of that branch as Lambda function code which is NOT recommended. + ref: dede7cdea5bbd3fdbcc4674bfcd2b2f9e0579603 + # relative path from the repository root directory to the function code directory. + path: hello-world + handler: app.lambdaHandler + runtime: nodejs14.x + memory: 128 + timeout: 5 + tags: + app: canary-zip +``` + +All other fields setting are remained as in the case of using [.zip archives as Lambda function](#deploy-zip-file-archives-as-lambda-function) pattern. + +## Quick sync + +By default, when the [pipeline](../../../configuration-reference/#lambda-application) was not specified, PipeCD triggers a quick sync deployment for the merged pull request. +Quick sync for a Lambda deployment will roll out the new version and switch all traffic to it. + +## Sync with the specified pipeline + +The [pipeline](../../../configuration-reference/#lambda-application) field in the application configuration is used to customize the way to do the deployment. +You can add a manual approval before routing traffic to the new version or add an analysis stage the do some smoke tests against the new version before allowing them to receive the real traffic. + +These are the provided stages for Lambda application you can use to build your pipeline: + +- `LAMBDA_CANARY_ROLLOUT` + - deploy workloads of the new version, but it is still receiving no traffic. +- `LAMBDA_PROMOTE` + - promote the new version to receive an amount of traffic. + +and other common stages: +- `WAIT` +- `WAIT_APPROVAL` +- `ANALYSIS` + +See the description of each stage at [Customize application deployment](../../customizing-deployment/). + +Here is an example that rolls out the new version gradually: + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: LambdaApp +spec: + pipeline: + stages: + # Deploy workloads of the new version. + # But this is still receiving no traffic. + - name: LAMBDA_CANARY_ROLLOUT + # Promote new version to receive 10% of traffic. + - name: LAMBDA_PROMOTE + with: + percent: 10 + - name: WAIT + with: + duration: 10m + # Promote new version to receive 50% of traffic. + - name: LAMBDA_PROMOTE + with: + percent: 50 + - name: WAIT + with: + duration: 10m + # Promote new version to receive all traffic. + - name: LAMBDA_PROMOTE + with: + percent: 100 +``` + +## Reference + +See [Configuration Reference](../../../configuration-reference/#lambda-application) for the full configuration. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/terraform.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/terraform.md new file mode 100644 index 0000000000..351992e133 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/defining-app-configuration/terraform.md @@ -0,0 +1,42 @@ +--- +title: "Configuring Terraform application" +linkTitle: "Terraform" +weight: 2 +description: > + Specific guide to configuring deployment for Terraform application. +--- + +## Quick Sync + +By default, when the [pipeline](../../../configuration-reference/#terraform-application) was not specified, PipeCD triggers a quick sync deployment for the merged pull request. +Quick sync for a Terraform deployment does `terraform plan` and if there are any changes detected it applies those changes automatically. + +## Sync with the specified pipeline + +The [pipeline](../../../configuration-reference/#terraform-application) field in the application configuration is used to customize the way to do the deployment. +You can add a manual approval before doing `terraform apply` or add an analysis stage after applying the changes to determine the impact of those changes. + +These are the provided stages for Terraform application you can use to build your pipeline: + +- `TERRAFORM_PLAN` + - do the terraform plan and show the changes will be applied +- `TERRAFORM_APPLY` + - apply all the infrastructure changes + +and other common stages: +- `WAIT` +- `WAIT_APPROVAL` +- `ANALYSIS` + +See the description of each stage at [Customize application deployment](../../customizing-deployment/). + +## Module location + +Terraform module can be loaded from: + +- the same git repository with the application directory, we call as a `local module` +- a different git repository, we call as a `remote module` + +## Reference + +See [Configuration Reference](../../../configuration-reference/#terraform-application) for the full configuration. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/deployment-chain.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/deployment-chain.md new file mode 100644 index 0000000000..052a539234 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/deployment-chain.md @@ -0,0 +1,64 @@ +--- +title: "Deployment chain" +linkTitle: "Deployment chain" +weight: 11 +description: > + Specific guide for configuring chain of deployments. +--- + +For users who want to use PipeCD to build a complex deployment flow, which contains multiple applications across multiple application kinds and roll out them to multiple clusters gradually or promoting across environments, this guideline will show you how to use PipeCD to achieve that requirement. + +## Configuration + +The idea of this feature is to trigger the whole deployment chain when a specified deployment is triggered. To enable trigger the deployment chain, we need to add a configuration section named `postSync` which contains all configurations that be used when the deployment is triggered. For this `Deployment Chain` feature, configuration for it is under `postSync.chain` section. + +A canonical configuration looks as below: + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: TerraformApp +spec: + input: + ... + pipeline: + ... + postSync: + chain: + applications: + # Find all applications with name `application-2` and trigger them. + - name: application-2 + # Fill all applications with name `application-3` of kind `KUBERNETES` + # and trigger them. + - name: application-3 + kind: KUBERNETES +``` + +As a result, the above configuration will be used to create a deployment chain like the below figure + +![](/images/deployment-chain-figure.png) + +In the context of the deployment chain in PipeCD, a chain is made up of many `blocks`, and each block contains multiple `nodes` which is the reference to a deployment. The first block in the chain always contains only one node, which is the deployment that triggers the whole chain. Other blocks of the chain are built using filters which are configurable via `postSync.chain.applications` section. As for the above example, the second block `Block 2` contains 2 different nodes, which are 2 different PipeCD applications with the same name `application-2`. + +__Tip__: + +1. If you followed all the configuration references and built your deployment chain configuration, but some deployments in your defined chain are not triggered as you want, please re-check those deployments [`trigger configuration`](../triggering-a-deployment/#trigger-configuration). The `onChain` trigger is __disabled by default__; you need to enable that configuration to enable your deployment to be triggered as a node in the deployment chain. +2. Values configured under `postSync.chain.applications` - we call it __Application matcher__'s values are merged using `AND` operator. Currently, only `name` and `kind` are supported, but `labels` will also be supported soon. + +See [Examples](../../examples/#deployment-chain) for more specific. + +## Deployment chain characteristic + +Something you need to care about while creating your deployment chain with PipeCD + +1. The deployment chain blocks are run in sequence, one by one. But all nodes in the same block are run in parallel, you should ensure that all nodes(deployments) in the same block do not depend on each other. +2. Once a node in a block has finished with `FAILURE` or `CANCELLED` status, the containing block will be set to fail, and all other nodes which have not yet finished will be set to `CANCELLED` status (those nodes will be rolled back if they're in the middle of its deploying process). Consequently, all blocks after that failed block will be set to `CANCELLED` status and be stopped. + +## Console view + +![](/images/deployment-chain-console.png) + +The UI for this deployment chain feature currently is under development, we can only __view deployments in chain one by one__ on the deployments page and deployment detail page as usual. + +## Reference + +See [Configuration Reference](../../configuration-reference/#postsync) for the full configuration. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/manifest-attachment.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/manifest-attachment.md new file mode 100644 index 0000000000..affb77e2ce --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/manifest-attachment.md @@ -0,0 +1,65 @@ +--- +title: "Manifest attachment" +linkTitle: "Manifest attachment" +weight: 10 +description: > + Attach configuration cross manifest files while deployment. +--- + +For insensitive data which needs to be attached/mounted as a configuration of other resources, Kubernetes ConfigMaps is a simple and bright idea. How about the other application kinds, which need something as simple as k8s ConfigMaps? PipeCD has attachment feature for your usecase. + +## Configuration + +Suppose you have `config.yaml` file which contains + +```yaml +mysql: + rootPassword: "test" + database: "pipecd" +``` + +Then your application configuration will be configured like this + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: ECSApp +spec: + name: secret-management + labels: + env: example + team: xyz + input: + ... + attachment: + sources: + config: config.yaml + targets: + - taskdef.yaml +``` + +The configuration says that: The file `config.yaml` will be used as an attachment for others, its content will be referred as `config`. The target files, that can use the `config.yaml` file as an attachment, are currently configured to `taskdef.yaml` file. + +And in the "target" file, which uses `config.yaml` file content + +```yaml +... +containerDefinitions: + - command: "echo {{ .attachment.config }}" + image: nginx:1 + cpu: 100 + memory: 100 + name: web +... +``` + +In all cases, `Piped` will perform attaching the attachment file content at last, right before using it to handle any deployment tasks. + +__Tip__: + +This feature can be used in combo with PipeCD [SecretManagement feature](../secret-management). You can encrypt your secret data using PipeCD secret encryption function, it will be decrypted and placed in your configuration files; then the PipeCD attachment feature will attach that decrypted configuration to the manifest of resource, which requires that configuration. + +See examples for detail. + +## Examples + +- [examples/ecs/attachment](https://github.com/pipe-cd/examples/tree/master/ecs/attachment) diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/rolling-back-a-deployment.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/rolling-back-a-deployment.md new file mode 100644 index 0000000000..4997f41bb5 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/rolling-back-a-deployment.md @@ -0,0 +1,21 @@ +--- +title: "Rolling back a deployment" +linkTitle: "Rolling back a deployment" +weight: 6 +description: > + This page describes when a deployment is rolled back automatically and how to manually rollback a deployment. +--- + +Rolling back a deployment can be automated by enabling the `autoRollback` field in the application configuration of the application. When `autoRollback` is enabled, the deployment will be rolled back if any of the following conditions are met: +- a stage of the deployment pipeline was failed +- an analysis stage determined that the deployment had a negative impact +- any error occurs while deploying + +When the rolling back process is triggered, a new `ROLLBACK` stage will be added to the deployment pipeline and it reverts all the applied changes. + +![](/images/rolled-back-deployment.png) +

+A deployment was rolled back +

+ +Alternatively, manually rolling back a running deployment can be done from web UI by clicking on `Cancel with rollback` button. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/secret-management.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/secret-management.md new file mode 100755 index 0000000000..c1ddc15912 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/secret-management.md @@ -0,0 +1,122 @@ +--- +title: "Secret management" +linkTitle: "Secret management" +weight: 9 +description: > + Storing secrets safely in the Git repository. +--- + +When doing GitOps, user wants to use Git as a single source of truth. But storing credentials like Kubernetes Secret or Terraform's credentials directly in Git is not safe. +This feature helps you keep that sensitive information safely in Git, right next to your application manifests. + +Basically, the flow will look like this: +- user encrypts their secret data via the PipeCD's Web UI and stores the encrypted data in Git +- `Piped` decrypts them before doing deployment tasks + +## Prerequisites + +Before using this feature, `Piped` needs to be started with a key pair for secret encryption. + +You can use the following command to generate a key pair: + +``` console +openssl genpkey -algorithm RSA -pkeyopt rsa_keygen_bits:2048 -out private-key +openssl pkey -in private-key -pubout -out public-key +``` + +Then specify them while [installing](../../../installation/install-piped/installing-on-kubernetes) the `Piped` with these options: + +``` console +--set-file secret.data.secret-public-key=PATH_TO_PUBLIC_KEY_FILE \ +--set-file secret.data.secret-private-key=PATH_TO_PRIVATE_KEY_FILE +``` + +Finally, enable this feature in Piped configuration file with `secretManagement` field as below: + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + pipedID: your-piped-id + ... + secretManagement: + type: KEY_PAIR + config: + privateKeyFile: /etc/piped-secret/secret-private-key + publicKeyFile: /etc/piped-secret/secret-public-key +``` + +## Encrypting secret data + +In order to encrypt the secret data, go to the application list page and click on the options icon at the right side of the application row, choose "Encrypt Secret" option. +After that, input your secret data and click on "ENCRYPT" button. +The encrypted data should be shown for you. Copy it to store in Git. + +![](/images/sealed-secret-application-list.png) +

+Application list page +

+ +
+ +![](/images/sealed-secret-encrypting-form.png) +

+The form for encrypting secret data +

+ +## Storing encrypted secrets in Git + +To make encrypted secrets available to an application, they must be specified in the application configuration file of that application. + +- `encryptedSecrets` contains a list of the encrypted secrets. +- `decryptionTargets` contains a list of files that are using one of the encrypted secrets and should be decrypted by `Piped`. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +# One of Piped defined app kind such as: KubernetesApp +kind: {APPLICATION_KIND} +spec: + encryption: + encryptedSecrets: + password: encrypted-data + decryptionTargets: + - secret.yaml +``` + +## Accessing encrypted secrets + +Any file in the application directory can use `.encryptedSecrets` context to access secrets you have encrypted and stored in the application configuration. + +For example, + +- Accessing by a Kubernets Secret manfiest + +``` yaml +apiVersion: v1 +kind: Secret +metadata: + name: simple-sealed-secret +data: + password: "{{ .encryptedSecrets.password }}" +``` + +- Configuring ENV variable of a Lambda function to use a encrypted secret + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: LambdaFunction +spec: + name: HelloFunction + environments: + KEY: "{{ .encryptedSecrets.key }}" +``` + +In all cases, `Piped` will decrypt the encrypted secrets and render the decryption target files before using to handle any deployment tasks. + +## Examples + +- [examples/kubernetes/secret-management](https://github.com/pipe-cd/examples/tree/master/kubernetes/secret-management) +- [examples/cloudrun/secret-management](https://github.com/pipe-cd/examples/tree/master/cloudrun/secret-management) +- [examples/lambda/secret-management](https://github.com/pipe-cd/examples/tree/master/lambda/secret-management) +- [examples/terraform/secret-management](https://github.com/pipe-cd/examples/tree/master/terraform/secret-management) +- [examples/ecs/secret-management](https://github.com/pipe-cd/examples/tree/master/ecs/secret-management) diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-application/triggering-a-deployment.md b/docs/content/en/docs-v0.50.x/user-guide/managing-application/triggering-a-deployment.md new file mode 100644 index 0000000000..3fcb5559ab --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-application/triggering-a-deployment.md @@ -0,0 +1,50 @@ +--- +title: "Triggering a deployment" +linkTitle: "Triggering a deployment" +weight: 4 +description: > + This page describes when a deployment is triggered automatically and how to manually trigger a deployment. +--- + +PipeCD uses Git as a single source of truth; all application resources are defined declaratively and immutably in Git. Whenever a developer wants to update the application or infrastructure, they will send a pull request to that Git repository to propose the change. The state defined in Git is the desired state for the application and infrastructure running in the cluster. + +PipeCD applies the proposed changes to running resources in the cluster by triggering needed deployments for applications. The deployment mission is syncing all running resources of the application in the cluster to the state specified in the newest commit in Git. + +By default, when a new merged pull request touches an application, a new deployment for that application will be triggered to execute the sync process. But users can configure the application to control when a new deployment should be triggered or not. For example, using [`onOutOfSync`](#trigger-configuration) to enable the ability to attempt to resolve `OUT_OF_SYNC` state whenever a configuration drift has been detected. + +### Trigger configuration + +Configuration for the trigger used to determine whether we trigger a new deployment. There are several configurable types: +- `onCommit`: Controls triggering new deployment when new Git commits touched the application. +- `onCommand`: Controls triggering new deployment when received a new `SYNC` command. +- `onOutOfSync`: Controls triggering new deployment when application is at `OUT_OF_SYNC` state. +- `onChain`: Controls triggering new deployment when the application is counted as a node of some chains. + +See [Configuration Reference](../../configuration-reference/#deploymenttrigger) for the full configuration. + +After a new deployment was triggered, it will be queued to handle by the appropriate `piped`. And at this time the deployment pipeline was not decided yet. +`piped` schedules all deployments of applications to ensure that for each application only one deployment will be executed at the same time. +When no deployment of an application is running, `piped` picks queueing one to plan the deploying pipeline. +`piped` plans the deploying pipeline based on the application configuration and the diff between the running state and the specified state in the newest commit. +For example: + +- when the merged pull request updated a Deployment's container image or updated a mounting ConfigMap or Secret, `piped` planner will decide that the deployment should use the specified pipeline to do a progressive deployment. +- when the merged pull request just updated the `replicas` number, `piped` planner will decide to use a quick sync to scale the resources. + +You can force `piped` planner to decide to use the [QuickSync](../../../concepts/#sync-strategy) or the specified pipeline based on the commit message by configuring [CommitMatcher](../../configuration-reference/#commitmatcher) in the application configuration. + +After being planned, the deployment will be executed as the decided pipeline. The deployment execution including the state of each stage as well as their logs can be viewed in realtime at the deployment details page. + +![](/images/deployment-details.png) +

+A Running Deployment at the Deployment Details Page +

+ +As explained above, by default all deployments will be triggered automatically by checking the merged commits but you also can manually trigger a new deployment from web UI. +By clicking on `SYNC` button at the application details page, a new deployment for that application will be triggered to sync the application to be the state specified at the newest commit of the master branch (default branch). + +![](/images/application-details.png) +

+Application Details Page +

+ diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/_index.md b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/_index.md new file mode 100644 index 0000000000..51de59988b --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/_index.md @@ -0,0 +1,7 @@ +--- +title: "Managing Control Plane" +linkTitle: "Managing Control Plane" +weight: 4 +description: > + This guide is for administrators and operators wanting to install and configure PipeCD for other developers. +--- diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/adding-a-project.md b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/adding-a-project.md new file mode 100644 index 0000000000..e162c6adf5 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/adding-a-project.md @@ -0,0 +1,24 @@ +--- +title: "Adding a project" +linkTitle: "Adding a project" +weight: 2 +description: > + This page describes how to set up a new project. +--- + +The control plane ops can add a new project for a team. +Project adding can be simply done from an internal web page prepared for the ops. +Because that web service is running in an `ops` pod, so in order to access it, using `kubectl port-forward` command to forward a local port to a port on the `ops` pod as following: + +``` console +kubectl port-forward service/pipecd-ops 9082 --namespace={NAMESPACE} +``` + +Then, access to [http://localhost:9082](http://localhost:9082). + +On that page, you will see the list of registered projects and a link to register new projects. +Registering a new project requires only a unique ID string and an optional description text. + +Once a new project has been registered, a static admin (username, password) will be automatically generated for the project admin. You can send that information to the project admin. The project admin first uses the provided static admin information to log in to PipeCD. After that, they can change the static admin information, configure the SSO, RBAC or disable static admin user. + +__Caution:__ The Role-Based Access Control (RBAC) setting is required to enable your team login using SSO, please make sure you have that setup before disable static admin user. \ No newline at end of file diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/architecture-overview.md b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/architecture-overview.md new file mode 100644 index 0000000000..4166700b69 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/architecture-overview.md @@ -0,0 +1,40 @@ +--- +title: "Architecture overview" +linkTitle: "Architecture overview" +weight: 1 +description: > + This page describes the architecture of control plane. +--- + +![](/images/control-plane-components.png) +

+Component Architecture +

+ +The control plane is a centralized part of PipeCD. It contains several services as below to manage the application, deployment data and handle all requests from `piped`s and web clients: + +##### Server + +`server` handles all incoming gRPC requests from `piped`s, web clients, incoming HTTP requests such as auth callback from third party services. +It also serves all web assets including HTML, JS, CSS... +This service can be easily scaled by updating the pod number. + +##### Cache + +`cache` is a single pod service for caching internal data used by `server` service. Currently, this `cache` service is powered by `redis`. +You can configure the control plane to use a fully-managed redis cache service instead of launching a cache pod in your cluster. + +##### Ops + +`ops` is a single pod service for operating PipeCD owner's tasks. +For example, it provides an internal web page for adding and managing projects; it periodically removes the old data; it collects and saves the deployment insights. + +##### Data Store + +`Data store` is a storage for storing model data such as applications and deployments. This can be a fully-managed service such as GCP [Firestore](https://cloud.google.com/firestore), GCP [Cloud SQL](https://cloud.google.com/sql) or AWS [RDS](https://aws.amazon.com/rds/) (currently we choose [MySQL v8](https://www.mysql.com/) as supported relational data store). You can also configure the control plane to use a self-managed MySQL server. +When installing the control plane, you have to choose one of the provided data store services. + +##### File Store + +`File store` is a storage for storing stage logs, application live states. This can be a fully-managed service such as GCP [GCS](https://cloud.google.com/storage), AWS [S3](https://aws.amazon.com/s3/), or a self-managed service such as [Minio](https://github.com/minio/minio). +When installing the control plane, you have to choose one of the provided file store services. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/auth.md b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/auth.md new file mode 100644 index 0000000000..a86d9e5f79 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/auth.md @@ -0,0 +1,183 @@ +--- +title: "Authentication and authorization" +linkTitle: "Authentication and authorization" +weight: 3 +description: > + This page describes about PipeCD Authentication and Authorization. +--- + +![](/images/settings-project-v0.38.x.png) + +### Static Admin + +When the PipeCD owner [adds a new project](../adding-a-project/), an admin account will be automatically generated for the project. After that, PipeCD owner sends that static admin information including username, password strings to the project admin, who can use that information to log in to PipeCD web with the admin role. + +After logging, the project admin should change the provided username and password. Or disable the static admin account after configuring the single sign-on for the project. + +### Single Sign-On (SSO) + +Single sign-on (SSO) allows users to log in to PipeCD by relying on a trusted third-party service. + +**Supported service** + +- GitHub +- Generic OIDC + +> Note: In the future, we want to support such as Google Gmail, Bitbucket... + +#### Github + +Before configuring the SSO, you need an OAuth application of the using service. For example, GitHub SSO requires creating a GitHub OAuth application as described in this page: + +https://docs.github.com/en/developers/apps/creating-an-oauth-app + +The authorization callback URL should be `https://YOUR_PIPECD_ADDRESS/auth/callback`. + +![](/images/settings-update-sso.png) + +#### Generic OIDC + +PipeCD supports any OIDC provider, with tested providers including Keycloak, Auth0, and AWS Cognito. The only supported authentication flow currently is the Authorization Code Grant. + +Requirements: + +- The IdToken will be used to decide the user's role and username. +- The IdToken must contain information about the Username and Role. + - Supported Claims Key for Username (in order of priority): `username`, `preferred_username`,`name`, `cognito:username` + - Supported Claims Key for Role (in order of priority): `groups`, `roles`, `cognito:groups`, `custom:roles`, `custom:groups` + - Supported Claims Key for Avatar (in order of priority): `picture`, `avatar_url` + +Provider Configuration Examples: + +##### Keycloak + +- **Client authentication**: On +- **Valid redirect URIs**: `https://YOUR_PIPECD_ADDRESS/auth/callback` +- **Client scopes**: Add a new mapper to the `-dedicated` scope. For instance, map Group Membership information to the groups claim (Full group path should be off). + +- **Control Plane configuration**: + + ```yaml + apiVersion: "pipecd.dev/v1beta1" + kind: ControlPlane + spec: + sharedSSOConfigs: + - name: oidc + provider: OIDC + oidc: + clientId: + clientSecret: + issuer: https:///realms/ + redirect_uri: https:///auth/callback + scopes: + - openid + - profile + ``` + +##### Auth0 + +- **Allowed Callback URLs**: `https://YOUR_PIPECD_ADDRESS/auth/callback` +- **Control Plane configuration**: + + ```yaml + apiVersion: "pipecd.dev/v1beta1" + kind: ControlPlane + spec: + sharedSSOConfigs: + - name: oidc + provider: OIDC + oidc: + clientId: + clientSecret: + issuer: https:// + redirect_uri: https:///auth/callback + scopes: + - openid + - profile + ``` + +- **Roles/Groups Claims** + For Role or Groups information mapping using Auth0 Actions, here is an example for setting `custom:roles`: + + ```javascript + exports.onExecutePostLogin = async (event, api) => { + let namespace = "custom"; + if (namespace && !namespace.endsWith("/")) { + namespace += ":"; + } + api.idToken.setCustomClaim(namespace + "roles", event.authorization.roles); + }; + ``` + +##### AWS Cognito + +- **Allowed Callback URLs**: `https://YOUR_PIPECD_ADDRESS/auth/callback` + +- **Control Plane configuration**: + + ```yaml + apiVersion: "pipecd.dev/v1beta1" + kind: ControlPlane + spec: + sharedSSOConfigs: + - name: oidc + provider: OIDC + oidc: + clientId: + clientSecret: + issuer: https://cognito-idp..amazonaws.com/ + redirect_uri: https:///auth/callback + scopes: + - openid + - profile + ``` + +The project can be configured to use a shared SSO configuration (shared OAuth application) instead of needing a new one. In that case, while creating the project, the PipeCD owner specifies the name of the shared SSO configuration should be used, and then the project admin can skip configuring SSO at the settings page. + +### Role-Based Access Control (RBAC) + +Role-based access control (RBAC) allows restricting access on the PipeCD web-based on the roles of user groups within the project. Before using this feature, the SSO must be configured. + +PipeCD provides three built-in roles: + +- `Viewer`: has only permissions to view existing resources or data. +- `Editor`: has all viewer permissions, plus permissions for actions that modify state, such as manually syncing application, canceling deployment... +- `Admin`: has all editor permissions, plus permissions for updating project configurations. + +#### Configuring the PipeCD's roles + +The below table represents PipeCD's resources with actions on those resources. + +| resource | get | list | create | update | delete | +| :---------- | :-: | :--: | :----: | :----: | :----: | +| application | ○ | ○ | ○ | ○ | ○ | +| deployment | ○ | ○ | | ○ | | +| event | | ○ | | | | +| piped | ○ | ○ | ○ | ○ | | +| project | ○ | | | ○ | | +| apiKey | | ○ | ○ | ○ | | +| insight | ○ | | | | | + +Each role is defined as a combination of multiple policies under this format. + +``` +resources=RESOURCE_NAMES;actions=ACTION_NAMES +``` + +The `*` represents all resources and all actions for a resource. + +``` +resources=*;actions=ACTION_NAMES +resources=RESOURCE_NAMES;actions=* +resources=*;actions=* +``` + +#### Configuring the PipeCD's user groups + +User Group represents a relation with a specific team (GitHub)/group (Google) and an arbitrary role. All users belong to a team/group will have all permissions of that team/group. + +In case of using the GitHub team as a PipeCD user group, the PipeCD user group must be set in lowercase. For example, if your GitHub team is named `ORG/ABC-TEAM`, the PipeCD user group would be set as `ORG/abc-team`. (It's follow the GitHub team URL as github.com/orgs/{organization-name}/teams/{TEAM-NAME}) + +Note: You CANNOT assign multiple roles to a team/group, should create a new role with suitable permissions instead. + +![](/images/settings-add-user-group.png) diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/configuration-reference.md b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/configuration-reference.md new file mode 100644 index 0000000000..0554faa006 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/configuration-reference.md @@ -0,0 +1,176 @@ +--- +title: "Configuration reference" +linkTitle: "Configuration reference" +weight: 6 +description: > + This page describes all configurable fields in the Control Plane configuration. +--- + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: ControlPlane +spec: + address: https://your-pipecd-address + ... +``` + +## Control Plane Configuration + +| Field | Type | Description | Required | +|-|-|-|-| +| stateKey | string | A randomly generated string used to sign oauth state. | Yes | +| datastore | [DataStore](#datastore) | Storage for storing application, deployment data. | Yes | +| filestore | [FileStore](#filestore) | File storage for storing deployment logs and application states. | Yes | +| cache | [Cache](#cache) | Internal cache configuration. | No | +| address | string | The address to the control plane. This is required if SSO is enabled. | No | +| insightCollector | [InsightCollector](#insightcollector) | Option to run collector of Insights feature. | No | +| sharedSSOConfigs | [][SharedSSOConfig](#sharedssoconfig) | List of shared SSO configurations that can be used by any projects. | No | +| projects | [][Project](#project) | List of debugging/quickstart projects. Please note that do not use this to configure the projects running in the production. | No | + +## DataStore + +| Field | Type | Description | Required | +|-|-|-|-| +| type | string | Which type of data store should be used. Can be one of the following values
`FIRESTORE`, `MYSQL`. | Yes | +| config | [DataStoreConfig](#datastoreconfig) | Specific configuration for the datastore type. This must be one of these DataStoreConfig. | Yes | + +## DataStoreConfig + +Must be one of the following objects: + +### DataStoreFireStoreConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| namespace | string | The root path element considered as a logical namespace, e.g. `pipecd`. | Yes | +| environment | string | The second path element considered as a logical environment, e.g. `dev`. All pipecd collections will have path formatted according to `{namespace}/{environment}/{collection-name}`. | Yes | +| collectionNamePrefix | string | The prefix for collection name. This can be used to avoid conflicts with existing collections in your Firestore database. | No | +| project | string | The name of GCP project hosting the Firestore. | Yes | +| credentialsFile | string | The path to the service account file for accessing Firestores. | No | + + +### DataStoreMySQLConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| url | string | The address to MySQL server. Should attach with the database port info as `127.0.0.1:3307` in case you want to use another port than the default value. | Yes | +| database | string | The name of database. | No (If you set it via URL) | +| usernameFile | string | Path to the file containing the username. | No | +| passwordFile | string | Path to the file containing the password. | No | + + +## FileStore + +| Field | Type | Description | Required | +|-|-|-|-| +| type | string | Which type of file store should be used. Can be one of the following values
`GCS`, `S3`, `MINIO` | Yes | +| config | [FileStoreConfig](#filestoreconfig) | Specific configuration for the filestore type. This must be one of these FileStoreConfig. | Yes | + +## FileStoreConfig + +Must be one of the following objects: + +### FileStoreGCSConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| bucket | string | The bucket name. | Yes | +| credentialsFile | string | The path to the service account file for accessing GCS. | No | + +### FileStoreS3Config + +| Field | Type | Description | Required | +|-|-|-|-| +| bucket | string | The AWS S3 bucket name. | Yes | +| region | string | The AWS region name. | Yes | +| profile | string | The AWS profile name. Default value is `default`. | No | +| credentialsFile | string | The path to AWS credential file. Requires only if you want to auth by specified credential file, by default PipeCD will use `$HOME/.aws/credentials` file. | No | +| roleARN | string | The IAM role arn to use when assuming an role. Requires only if you want to auth by `WebIdentity` pattern. | No | +| tokenFile | string | The path to the WebIdentity token PipeCD should use to assume a role with. Requires only if you want to auth by `WebIdentity` pattern. | No | + +### FileStoreMinioConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| endpoint | string | The address of Minio. | Yes | +| bucket | string | The bucket name. | Yes | +| accessKeyFile | string | The path to the access key file. | No | +| secretKeyFile | string | The path to the secret key file. | No | +| autoCreateBucket | bool | Whether the given bucket should be made automatically if not exists. | No | + +## Cache + +| Field | Type | Description | Required | +|-|-|-|-| +| ttl | duration | The time that in-memory cache items are stored before they are considered as stale. | Yes | + +## Project + +| Field | Type | Description | Required | +|-|-|-|-| +| id | string | The unique identifier of the project. | Yes | +| desc | string | The description about the project. | No | +| staticAdmin | [ProjectStaticUser](#projectstaticuser) | Static admin account of the project. | Yes | + +## ProjectStaticUser + +| Field | Type | Description | Required | +|-|-|-|-| +| username | string | The username string. | Yes | +| passwordHash | string | The bcrypt hashed value of the password string. | Yes | + +## InsightCollector + +| Field | Type | Description | Required | +|-|-|-|-| +| application | [InsightCollectorApplication](#insightcollectorapplication) | Application metrics collector. | No | +| deployment | [InsightCollectorDeployment](#insightcollectordeployment) | Deployment metrics collector. | No | + +## InsightCollectorApplication + +| Field | Type | Description | Required | +|-|-|-|-| +| enabled | bool | Whether to enable. Default is `true` | No | +| schedule | string | When collector will be executed. Default is `0 * * * *` | No | + +## InsightCollectorDeployment + +| Field | Type | Description | Required | +|-|-|-|-| +| enabled | bool | Whether to enable. Default is `true` | No | +| schedule | string | When collector will be executed. Default is `30 * * * *` | No | +| chunkMaxCount | int | The maximum number of deployment items could be stored in a chunk. Default is `1000` | No | + +## SharedSSOConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The unique name of the configuration. | Yes | +| provider | string | The SSO service provider. Currently, only `GITHUB` and `OIDC` is supported. | Yes | +| sessionTtl | int | The time to live of session for SSO login. Unit is `hour`. Default is 7 * 24 hours. | No | +| github | [SSOConfigGitHub](#ssoconfiggithub) | GitHub sso configuration. | No | +| oidc | [SSOConfigOIDC](#ssoconfigoidc) | OIDC sso configuration. | No | + +## SSOConfigGitHub + +| Field | Type | Description | Required | +|-|-|-|-| +| clientId | string | The client id string of GitHub oauth app. | Yes | +| clientSecret | string | The client secret string of GitHub oauth app. | Yes | +| baseUrl | string | The address of GitHub service. Required if enterprise. | No | +| uploadUrl | string | The upload url of GitHub service. | No | +| proxyUrl | string | The address of the proxy used while communicating with the GitHub service. | No | + +## SSOConfigOIDC + +| Field | Type | Description | Required | +|-|-|-|-| +| clientId | string | The client id string of OpenID Connect oauth app. | Yes | +| clientSecret | string | The client secret string of OpenID Connect oauth app. | Yes | +| issuer | string | The address of OpenID Connect service. | Yes | +| redirectUri | string | The address of the redirect URI. | Yes | +| authorizationEndpoint | string | The address of the authorization endpoint. | No | +| tokenEndpoint | string | The address of the token endpoint. | No | +| userInfoEndpoint | string | The address of the user info endpoint. | No | +| proxyUrl | string | The address of the proxy used while communicating with the OpenID Connect service. | No | +| scopes | []string | Scopes to request from the OpenID Connect service. Default is `openid`. Some providers may require other scopes. | No | diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/registering-a-piped.md b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/registering-a-piped.md new file mode 100644 index 0000000000..9719f26f8d --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-controlplane/registering-a-piped.md @@ -0,0 +1,16 @@ +--- +title: "Registering a piped" +linkTitle: "Registering a piped" +weight: 4 +description: > + This page describes how to register a new piped to a project. +--- + +The list of pipeds are shown in the Settings page. Anyone who has the project admin role can register a new piped by clicking on the `+ADD` button. + +
+ +![](/images/settings-add-piped.png) +

+Registering a new piped +

diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/_index.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/_index.md new file mode 100644 index 0000000000..8ce33fc697 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/_index.md @@ -0,0 +1,11 @@ +--- +title: "Managing Piped" +linkTitle: "Managing Piped" +weight: 3 +description: > + This guide is for administrators and operators wanting to install and configure piped for other developers. +--- + +In order to use Piped you need to register through PipeCD control plane, so please refer [register a Piped docs](../managing-controlplane/registering-a-piped/) if you do not have already. After registering successfully, you can monitor your Piped live state via the PipeCD console on the settings page. + +![piped-list-page](/images/piped-list-page.png) diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-a-git-repository.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-a-git-repository.md new file mode 100644 index 0000000000..97bf68b200 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-a-git-repository.md @@ -0,0 +1,41 @@ +--- +title: "Adding a git repository" +linkTitle: "Adding git repository" +weight: 2 +description: > + This page describes how to add a new Git repository. +--- + +In the `piped` configuration file, we specify the list of Git repositories should be handled by the `piped`. +A Git repository contains one or more deployable applications where each application is put inside a directory called as [application directory](../../../concepts/#application-directory). +That directory contains an application configuration file as well as application manifests. +The `piped` periodically checks the new commits and fetches the needed manifests from those repositories for executing the deployment. + +A single `piped` can be configured to handle one or more Git repositories. +In order to enable a new Git repository, let's add a new [GitRepository](../configuration-reference/#gitrepository) block to the `repositories` field in the `piped` configuration file. + +For example, with the following snippet, `piped` will take the `master` branch of [pipe-cd/examples](https://github.com/pipe-cd/examples) repository as a target Git repository for doing deployments. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + repositories: + - repoId: examples + remote: git@github.com:pipe-cd/examples.git + branch: master +``` + +In most of the cases, we want to deal with private Git repositories. For accessing those private repositories, `piped` needs a private SSH key, which can be configured while [installing](../../../installation/install-piped/installing-on-kubernetes/) with `secret.sshKey` in the Helm chart. + +``` console +helm install dev-piped pipecd/piped --version={VERSION} \ + --set-file config.data={PATH_TO_PIPED_CONFIG_FILE} \ + --set-file secret.data.piped-key={PATH_TO_PIPED_KEY_FILE} \ + --set-file secret.data.ssh-key={PATH_TO_PRIVATE_SSH_KEY_FILE} +``` + +You can see this [configuration reference](../configuration-reference/#git) for more configurable fields about Git commands. + +Currently, `piped` allows configuring only one private SSH key for all specified Git repositories. So you can configure the same SSH key for all of those private repositories, or break them into separate `piped`s. In the near future, we also want to update `piped` to support loading multiple SSH keys. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-a-platform-provider.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-a-platform-provider.md new file mode 100644 index 0000000000..d231c26e38 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-a-platform-provider.md @@ -0,0 +1,132 @@ +--- +title: "Adding a platform provider" +linkTitle: "Adding platform provider" +weight: 4 +description: > + This page describes how to add a platform provider to enable its applications. +--- + +PipeCD supports multiple platforms and multiple application kinds which run on those platforms. +Platform provider defines which platform and where the application should be deployed to. +So while registering a new application, the name of a configured platform provider is required. + +Currently, PipeCD is supporting these five kinds of platform providers: `KUBERNETES`, `ECS`, `TERRAFORM`, `CLOUDRUN`, `LAMBDA`. +A new platform provider can be enabled by adding a [PlatformProvider](../configuration-reference/#platformprovider) struct to the piped configuration file. +A piped can have one or multiple platform provider instances from the same or different platform provider kind. + +The next sections show the specific configuration for each kind of platform provider. + +### Configuring Kubernetes platform provider + +By default, piped deploys Kubernetes application to the cluster where the piped is running in. An external cluster can be connected by specifying the `masterURL` and `kubeConfigPath` in the [configuration](../configuration-reference/#platformproviderkubernetesconfig). + +And, the default resources (defined at [here](https://github.com/pipe-cd/pipecd/blob/master/pkg/app/piped/platformprovider/kubernetes/resourcekey.go)) from all namespaces of the Kubernetes cluster will be watched for rendering the application state in realtime and detecting the configuration drift. In case you want to restrict piped to watch only a single namespace, let specify the namespace in the [KubernetesAppStateInformer](../configuration-reference/#kubernetesappstateinformer) field. You can also add other resources or exclude resources to/from the watching targets by that field. + +Below configuration snippet just specifies a name and type of platform provider. It means the platform provider `kubernetes-dev` will connect to the Kubernetes cluster where the piped is running in, and this platform provider watches all of the predefined resources from all namespaces inside that cluster. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + platformProviders: + - name: kubernetes-dev + type: KUBERNETES +``` + +See [ConfigurationReference](../configuration-reference/#platformproviderkubernetesconfig) for the full configuration. + +### Configuring Terraform platform provider + +A terraform platform provider contains a list of shared terraform variables that will be applied while running the deployment of its applications. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + platformProviders: + - name: terraform-dev + type: TERRAFORM + config: + vars: + - "project=pipecd" +``` + +See [ConfigurationReference](../configuration-reference/#platformproviderterraformconfig) for the full configuration. + +### Configuring Cloud Run platform provider + +Adding a Cloud Run provider requires the name of the Google Cloud project and the region name where Cloud Run service is running. A service account file for accessing to Cloud Run is also required if the machine running the piped does not have enough permissions to access. + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + platformProviders: + - name: cloudrun-dev + type: CLOUDRUN + config: + project: {GCP_PROJECT} + region: {CLOUDRUN_REGION} + credentialsFile: {PATH_TO_THE_SERVICE_ACCOUNT_FILE} +``` + +See [ConfigurationReference](../configuration-reference/#platformprovidercloudrunconfig) for the full configuration. + +### Configuring Lambda platform provider + +Adding a Lambda provider requires the region name where Lambda service is running. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + platformProviders: + - name: lambda-dev + type: LAMBDA + config: + region: {LAMBDA_REGION} + profile: default + credentialsFile: {PATH_TO_THE_CREDENTIAL_FILE} +``` + +You will generally need your AWS credentials to authenticate with Lambda. Piped provides multiple methods of loading these credentials. +It attempts to retrieve credentials in the following order: +1. From the environment variables. Available environment variables are `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` and `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`. +2. From the given credentials file. (the `credentialsFile field in above sample`) +3. From the pod running in EKS cluster via STS (SecurityTokenService). +4. From the EC2 Instance Role. + +Therefore, you don't have to set credentialsFile if you use the environment variables or the EC2 Instance Role. Keep in mind the IAM role/user that you use with your Piped must possess the IAM policy permission for at least `Lambda.Function` and `Lambda.Alias` resources controll (list/read/write). + +See [ConfigurationReference](../configuration-reference/#platformproviderlambdaconfig) for the full configuration. + +### Configuring ECS platform provider + +Adding a ECS provider requires the region name where ECS cluster is running. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + platformProviders: + - name: ecs-dev + type: ECS + config: + region: {ECS_CLUSTER_REGION} + profile: default + credentialsFile: {PATH_TO_THE_CREDENTIAL_FILE} +``` + +Just same as Lambda platform provider, there are several ways to authorize Piped agent to enable it performs deployment jobs. +It attempts to retrieve credentials in the following order: +1. From the environment variables. Available environment variables are `AWS_ACCESS_KEY_ID` or `AWS_ACCESS_KEY` and `AWS_SECRET_ACCESS_KEY` or `AWS_SECRET_KEY`. +2. From the given credentials file. (the `credentialsFile field in above sample`) +3. From the pod running in EKS cluster via STS (SecurityTokenService). +4. From the EC2 Instance Role. + +See [ConfigurationReference](../configuration-reference/#platformproviderecsconfig) for the full configuration. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-an-analysis-provider.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-an-analysis-provider.md new file mode 100644 index 0000000000..cc87d3a416 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-an-analysis-provider.md @@ -0,0 +1,55 @@ +--- +title: "Adding an analysis provider" +linkTitle: "Adding analysis provider" +weight: 6 +description: > + This page describes how to add an analysis provider for doing deployment analysis. +--- + +To enable [Automated deployment analysis](../../managing-application/customizing-deployment/automated-deployment-analysis/) feature, you have to set the needed information for Piped to connect to the [Analysis Provider](../../../concepts/#analysis-provider). + +Currently, PipeCD supports the following providers: +- [Prometheus](https://prometheus.io/) +- [Datadog](https://datadoghq.com/) + + +## Prometheus +Piped queries the [range query endpoint](https://prometheus.io/docs/prometheus/latest/querying/api/#range-queries) to obtain metrics used to evaluate the deployment. + +You need to define the Prometheus server address accessible to Piped. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + analysisProviders: + - name: prometheus-dev + type: PROMETHEUS + config: + address: https://your-prometheus.dev +``` +The full list of configurable fields are [here](../configuration-reference/#analysisproviderprometheusconfig). + +## Datadog +Piped queries the [MetricsApi.QueryMetrics](https://docs.datadoghq.com/api/latest/metrics/#query-timeseries-points) endpoint to obtain metrics used to evaluate the deployment. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + analysisProviders: + - name: datadog-dev + type: DATADOG + config: + apiKeyFile: /etc/piped-secret/datadog-api-key + applicationKeyFile: /etc/piped-secret/datadog-application-key +``` + +The full list of configurable fields are [here](../configuration-reference/#analysisproviderdatadogconfig). + +If you choose `Helm` as the installation method, we recommend using `--set-file` to mount the key files while performing the [upgrading process](../../../installation/install-piped/installing-on-kubernetes/#in-the-cluster-wide-mode). + +```console +--set-file secret.data.datadog-api-key={PATH_TO_API_KEY_FILE} \ +--set-file secret.data.datadog-application-key={PATH_TO_APPLICATION_KEY_FILE} +``` diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-helm-chart-repository-or-registry.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-helm-chart-repository-or-registry.md new file mode 100644 index 0000000000..79581d2d65 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/adding-helm-chart-repository-or-registry.md @@ -0,0 +1,60 @@ +--- +title: "Adding a Helm chart repository or registry" +linkTitle: "Adding Helm chart repo or registry" +weight: 5 +description: > + This page describes how to add a new Helm chart repository or registry. +--- + +PipeCD supports Kubernetes applications that are using Helm for templating and packaging. In addition to being able to deploy a Helm chart that is sourced from the same Git repository (`local chart`) or from a different Git repository (`remote git chart`), an application can use a chart sourced from a Helm chart repository. + +### Adding Helm chart repository + +A Helm [chart repository](https://helm.sh/docs/topics/chart_repository/) is a location backed by an HTTP server where packaged charts can be stored and shared. Before an application can be configured to use a chart from a Helm chart repository, that chart repository must be enabled in the related `piped` by adding the [ChartRepository](../configuration-reference/#chartrepository) struct to the piped configuration file. + +``` yaml +# piped configuration file +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + chartRepositories: + - name: pipecd + address: https://charts.pipecd.dev +``` + +For example, the above snippet enables the official chart repository of PipeCD project. After that, you can configure the Kubernetes application to load a chart from that chart repository for executing the deployment. + +``` yaml +# Application configuration file. +apiVersion: pipecd.dev/v1beta1 +kind: KubernetesApp +spec: + input: + # Helm chart sourced from a Helm Chart Repository. + helmChart: + repository: pipecd + name: helloworld + version: v0.5.0 +``` + +In case the chart repository is backed by HTTP basic authentication, the username and password strings are required in [configuration](../configuration-reference/#chartrepository). + +### Adding Helm chart registry + +A Helm chart [registry](https://helm.sh/docs/topics/registries/) is a mechanism enabled by default in Helm 3.8.0 and later that allows the OCI registry to be used for storage and distribution of Helm charts. + +Before an application can be configured to use a chart from a registry, that registry must be enabled in the related `piped` by adding the [ChartRegistry](../configuration-reference/#chartregistry) struct to the piped configuration file if authentication is enabled at the registry. + +``` yaml +# piped configuration file +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + ... + chartRegistries: + - type: OCI + address: registry.example.com + username: sample-username + password: sample-password +``` diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuration-reference.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuration-reference.md new file mode 100644 index 0000000000..207807ced3 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuration-reference.md @@ -0,0 +1,273 @@ +--- +title: "Configuration reference" +linkTitle: "Configuration reference" +weight: 9 +description: > + This page describes all configurable fields in the piped configuration. +--- + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + projectID: ... + pipedID: ... + ... +``` + +## Piped Configuration + +| Field | Type | Description | Required | +|-|-|-|-| +| projectID | string | The identifier of the PipeCD project where this piped belongs to. | Yes | +| pipedID | string | The generated ID for this piped. | Yes | +| pipedKeyFile | string | The path to the file containing the generated key string for this piped. | Yes | +| pipedKeyData | string | Base64 encoded string of Piped key. Either pipedKeyFile or pipedKeyData must be set. | Yes | +| apiAddress | string | The address used to connect to the Control Plane's API in format `host:port`. | Yes | +| syncInterval | duration | How often to check whether an application should be synced. Default is `1m`. | No | +| appConfigSyncInterval | duration | How often to check whether application configuration files should be synced. Default is `1m`. | No | +| git | [Git](#git) | Git configuration needed for Git commands. | No | +| repositories | [][Repository](#gitrepository) | List of Git repositories this piped will handle. | No | +| chartRepositories | [][ChartRepository](#chartrepository) | List of Helm chart repositories that should be added while starting up. | No | +| chartRegistries | [][ChartRegistry](#chartregistry) | List of helm chart registries that should be logged in while starting up. | No | +| platformProviders | [][PlatformProvider](#platformprovider) | List of platform providers can be used by this piped. | No | +| analysisProviders | [][AnalysisProvider](#analysisprovider) | List of analysis providers can be used by this piped. | No | +| eventWatcher | [EventWatcher](#eventwatcher) | Optional Event watcher settings. | No | +| secretManagement | [SecretManagement](#secretmanagement) | The using secret management method. | No | +| notifications | [Notifications](#notifications) | Sending notifications to Slack, Webhook... | No | +| appSelector | map[string]string | List of labels to filter all applications this piped will handle. Currently, it is only be used to filter the applications suggested for adding from the control plane. | No | + +## Git + +| Field | Type | Description | Required | +|-|-|-|-| +| username | string | The username that will be configured for `git` user. Default is `piped`. | No | +| email | string | The email that will be configured for `git` user. Default is `pipecd.dev@gmail.com`. | No | +| sshConfigFilePath | string | Where to write ssh config file. Default is `$HOME/.ssh/config`. | No | +| host | string | The host name. Default is `github.com`. | No | +| hostName | string | The hostname or IP address of the remote git server. Default is the same value with Host. | No | +| sshKeyFile | string | The path to the private ssh key file. This will be used to clone the source code of the specified git repositories. | No | +| sshKeyData | string | Base64 encoded string of SSH key. | No | +| password | string | The base64 encoded password for git used while cloning above Git repository. | No | + +## GitRepository + +| Field | Type | Description | Required | +|-|-|-|-| +| repoID | string | Unique identifier to the repository. This must be unique in the piped scope. | Yes | +| remote | string | Remote address of the repository used to clone the source code. e.g. `git@github.com:org/repo.git` | Yes | +| branch | string | The branch will be handled. | Yes | + +## ChartRepository + +| Field | Type | Description | Required | +|-|-|-|-| +| type | string | The repository type. Currently, HTTP and GIT are supported. Default is HTTP. | No | +| name | string | The name of the Helm chart repository. Note that is not a Git repository but a [Helm chart repository](https://helm.sh/docs/topics/chart_repository/). | Yes if type is HTTP | +| address | string | The address to the Helm chart repository. | Yes if type is HTTP | +| username | string | Username used for the repository backed by HTTP basic authentication. | No | +| password | string | Password used for the repository backed by HTTP basic authentication. | No | +| insecure | bool | Whether to skip TLS certificate checks for the repository or not. | No | +| gitRemote | string | Remote address of the Git repository used to clone Helm charts. | Yes if type is GIT | +| sshKeyFile | string | The path to the private ssh key file used while cloning Helm charts from above Git repository. | No | + +## ChartRegistry + +| Field | Type | Description | Required | +|-|-|-|-| +| type | string | The registry type. Currently, only OCI is supported. Default is OCI. | No | +| address | string | The address to the registry. | Yes | +| username | string | Username used for the registry authentication. | No | +| password | string | Password used for the registry authentication. | No | + +## PlatformProvider + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The name of the platform provider. | Yes | +| type | string | The platform provider type. Must be one of the following values:
`KUBERNETES`, `TERRAFORM`, `ECS`, `CLOUDRUN`, `LAMBDA`. | Yes | +| config | [PlatformProviderConfig](#platformproviderconfig) | Specific configuration for the specified type of platform provider. | No | + +## PlatformProviderConfig + +Must be one of the following structs: + +### PlatformProviderKubernetesConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| masterURL | string | The master URL of the kubernetes cluster. Empty means in-cluster. | No | +| kubectlVersion | string | Version of kubectl which will be used to connect to your cluster. Empty means the version set on [piped config](../user-guide/managing-piped/configuration-reference/#platformproviderkubernetesconfig) or [default version](https://github.com/pipe-cd/pipecd/blob/master/tool/piped-base/install-kubectl.sh#L24) will be used. | No | +| kubeConfigPath | string | The path to the kubeconfig file. Empty means in-cluster. | No | +| appStateInformer | [KubernetesAppStateInformer](#kubernetesappstateinformer) | Configuration for application resource informer. | No | + +### PlatformProviderTerraformConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| vars | []string | List of variables that will be set directly on terraform commands with `-var` flag. The variable must be formatted by `key=value`. | No | +| driftDetectionEnabled | bool | Enable drift detection. This is a temporary option and will be possibly removed in the future release. Default is `true` | No | + +### PlatformProviderCloudRunConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| project | string | The GCP project hosting the Cloud Run service. | Yes | +| region | string | The region of running Cloud Run service. | Yes | +| credentialsFile | string | The path to the service account file for accessing Cloud Run service. | No | + +### PlatformProviderLambdaConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| region | string | The region of running Lambda service. | Yes | +| credentialsFile | string | The path to the credential file for logging into AWS cluster. If this value is not provided, piped will read credential info from environment variables. It expects the format [~/.aws/credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html). | No | +| roleARN | string | The IAM role arn to use when assuming an role. Required if you want to use the AWS SecurityTokenService. | No | +| tokenFile | string | The path to the WebIdentity token the SDK should use to assume a role with. Required if you want to use the AWS SecurityTokenService. | No | +| profile | string | The profile to use for logging into AWS cluster. The default value is `default`. | No | +| awsAPIPollingInterval | duration | The interval of periodical calls of AWS APIs. Currently, this is an interval of refreshing the live state of Lambda functions. Default is 15s. | No | + +### PlatformProviderECSConfig + +| Field | Type | Description | Required | +|-|-|-|-| +| region | string | The region of running ECS cluster. | Yes | +| credentialsFile | string | The path to the credential file for logging into AWS cluster. If this value is not provided, piped will read credential info from environment variables. It expects the format [~/.aws/credentials](https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-files.html) | No | +| roleARN | string | The IAM role arn to use when assuming an role. Required if you want to use the AWS SecurityTokenService. | No | +| tokenFile | string | The path to the WebIdentity token the SDK should use to assume a role with. Required if you want to use the AWS SecurityTokenService. | No | +| profile | string | The profile to use for logging into AWS cluster. The default value is `default`. | No | + +## KubernetesAppStateInformer + +| Field | Type | Description | Required | +|-|-|-|-| +| namespace | string | Only watches the specified namespace. Empty means watching all namespaces. | No | +| includeResources | [][KubernetesResourcematcher](#kubernetesresourcematcher) | List of resources that should be added to the watching targets. | No | +| excludeResources | [][KubernetesResourcematcher](#kubernetesresourcematcher) | List of resources that should be ignored from the watching targets. | No | + +### KubernetesResourceMatcher + +| Field | Type | Description | Required | +|-|-|-|-| +| apiVersion | string | The APIVersion of the kubernetes resource. | Yes | +| kind | string | The kind name of the kubernetes resource. Empty means all kinds are matching. | No | + +## AnalysisProvider + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The unique name of the analysis provider. | Yes | +| type | string | The provider type. Currently, only PROMETHEUS, DATADOG are available. | Yes | +| config | [AnalysisProviderConfig](#analysisproviderconfig) | Specific configuration for the specified type of analysis provider. | Yes | + +## AnalysisProviderConfig + +Must be one of the following structs: + +### AnalysisProviderPrometheusConfig +| Field | Type | Description | Required | +|-|-|-|-| +| address | string | The Prometheus server address. | Yes | +| usernameFile | string | The path to the username file. | No | +| passwordFile | string | The path to the password file. | No | + +### AnalysisProviderDatadogConfig +| Field | Type | Description | Required | +|-|-|-|-| +| address | string | The address of Datadog API server. Only "datadoghq.com", "us3.datadoghq.com", "datadoghq.eu", "ddog-gov.com" are available. Defaults to "datadoghq.com" | No | +| apiKeyFile | string | The path to the api key file. | Yes | +| applicationKeyFile | string | The path to the application key file. | Yes | +| apiKeyData | string | Base64 API Key for Datadog API server. Either apiKeyData or apiKeyFile must be set | No | +| applicationKeyData | string | Base64 Application Key for Datadog API server. Either applicationKeyFile or applicationKeyData must be set | No | + +## EventWatcher + +| Field | Type | Description | Required | +|-|-|-|-| +| checkInterval | duration | Interval to fetch the latest event and compare it with one defined in EventWatcher config files. Defaults to `1m`. | No | +| gitRepos | [][EventWatcherGitRepo](#eventwatchergitrepo) | The configuration list of git repositories to be observed. Only the repositories in this list will be observed by Piped. | No | + +### EventWatcherGitRepo + +| Field | Type | Description | Required | +|-|-|-|-| +| repoId | string | Id of the git repository. This must be unique within the repos' elements. | Yes | +| commitMessage | string | The commit message used to push after replacing values. Default message is used if not given. | No | +| includes | []string | The paths to EventWatcher files to be included. Patterns can be used like `foo/*.yaml`. | No | +| excludes | []string | The paths to EventWatcher files to be excluded. Patterns can be used like `foo/*.yaml`. This is prioritized if both includes and this are given. | No | + +## SecretManagement + +| Field | Type | Description | Required | +|-|-|-|-| +| type | string | Which management method should be used. Default is `KEY_PAIR`. | Yes | +| config | [SecretManagementConfig](#secretmanagementconfig) | Configration for using secret management method. | Yes | + +## SecretManagementConfig + +Must be one of the following structs: + +### SecretManagementKeyPair + +| Field | Type | Description | Required | +|-|-|-|-| +| privateKeyFile | string | Path to the private RSA key file. | Yes | +| privateKeyData | string | Base64 encoded string of private RSA key. Either privateKeyFile or privateKeyData must be set. | No | +| publicKeyFile | string | Path to the public RSA key file. | Yes | +| publicKeyData | string | Base64 encoded string of public RSA key. Either publicKeyFile or publicKeyData must be set. | No | + +### SecretManagementGCPKMS + +> WIP + +## Notifications + +| Field | Type | Description | Required | +|-|-|-|-| +| routes | [][NotificationRoute](#notificationroute) | List of notification routes. | No | +| receivers | [][NotificationReceiver](#notificationreceiver) | List of notification receivers. | No | + +### NotificationRoute + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The name of the route. | Yes | +| receiver | string | The name of receiver who will receive all matched events. | Yes | +| events | []string | List of events that should be routed to the receiver. | No | +| ignoreEvents | []string | List of events that should be ignored. | No | +| groups | []string | List of event groups should be routed to the receiver. | No | +| ignoreGroups | []string | List of event groups should be ignored. | No | +| apps | []string | List of applications where their events should be routed to the receiver. | No | +| ignoreApps | []string | List of applications where their events should be ignored. | No | +| labels | map[string]string | List of labels where their events should be routed to the receiver. | No | +| ignoreLabels | map[string]string | List of labels where their events should be ignored. | No | + + +### NotificationReceiver + +| Field | Type | Description | Required | +|-|-|-|-| +| name | string | The name of the receiver. | Yes | +| slack | [NotificationReciverSlack](#notificationreceiverslack) | Configuration for slack receiver. | No | +| webhook | [NotificationReceiverWebhook](#notificationreceiverwebhook) | Configuration for webhook receiver. | No | + +#### NotificationReceiverSlack + +| Field | Type | Description | Required | +|-|-|-|-| +| hookURL | string | The hookURL of a slack channel. | Yes | +| oauthToken | string | [The token for Slack API use.](https://api.slack.com/authentication/basics) (deprecated)| No | +| oauthTokenData | string | Base64 encoded string of [The token for Slack API use.](https://api.slack.com/authentication/basics) | No | +| oauthTokenFile | string | The path to the oautoken file | No | +| channelID | string | The channel id which slack api send to. | No | +| mentionedAccounts | []string | The accounts to which slack api referes. This field supports both `@username` and `username` writing styles.| No | +| mentionedGroups | []string | The groups to which slack api referes. This field supports both `` and `groupname` writing styles.| No | + +#### NotificationReceiverWebhook + +| Field | Type | Description | Required | +|-|-|-|-| +| url | string | The URL where notification event will be sent to. | Yes | +| signatureKey | string | The HTTP header key used to store the configured signature in each event. Default is "PipeCD-Signature". | No | +| signatureValue | string | The value of signature included in header of each event request. It can be used to verify the received events. | No | +| signatureValueFile | string | The path to the signature value file. | No | diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuring-event-watcher.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuring-event-watcher.md new file mode 100644 index 0000000000..1a7b0ae10c --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuring-event-watcher.md @@ -0,0 +1,62 @@ +--- +title: "Configuring event watcher" +linkTitle: "Configuring event watcher" +weight: 7 +description: > + This page describes how to configure piped to enable event watcher. +--- + +To enable [EventWatcher](../../event-watcher/), you have to configure your piped at first. + +### Grant write permission +The [SSH key used by Piped](../configuration-reference/#git) must be a key with write-access because piped needs to commit and push to your git repository when any incoming event matches. + +### Specify Git repositories to be observed +Piped watches events only for the Git repositories specified in the `gitRepos` list. +You need to add all repositories you want to enable Eventwatcher. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + eventWatcher: + gitRepos: + - repoId: repo-1 + - repoId: repo-2 + - repoId: repo-3 +``` + +### [optional] Specify Eventwatcher files Piped will use +>NOTE: This way is valid only for defining events using [.pipe/](../../event-watcher/#use-the-pipe-directory). + +If multiple Pipeds handle a single repository, you can prevent conflicts by splitting into the multiple EventWatcher files and setting `includes/excludes` to specify the files that should be monitored by this Piped. + +Say for instance, if you only want the Piped to use the Eventwatcher files under `.pipe/dev/`: + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + eventWatcher: + gitRepos: + - repoId: repo-1 + commitMessage: Update values by Event watcher + includes: + - dev/*.yaml +``` + +`excludes` is prioritized if both `includes` and `excludes` are given. + +The full list of configurable fields are [here](../configuration-reference/#eventwatcher). + +### [optional] Settings for git user +By default, every git commit uses `piped` as a username and `pipecd.dev@gmail.com` as an email. You can change it with the [git](../configuration-reference/#git) field. + +```yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + git: + username: foo + email: foo@example.com +``` diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuring-notifications.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuring-notifications.md new file mode 100644 index 0000000000..4d45719d15 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/configuring-notifications.md @@ -0,0 +1,138 @@ +--- +title: "Configuring notifications" +linkTitle: "Configuring notifications" +weight: 8 +description: > + This page describes how to configure piped to send notifications to external services. +--- + +PipeCD events (deployment triggered, planned, completed, analysis result, piped started...) can be sent to external services like Slack or a Webhook service. While forwarding those events to a chat service helps developers have a quick and convenient way to know the deployment's current status, forwarding to a Webhook service may be useful for triggering other related tasks like CI jobs. + +PipeCD events are emitted and sent by the `piped` component. So all the needed configurations can be specified in the `piped` configuration file. +Notification configuration including: +- a list of `Route`s which used to match events and decide where the event should be sent to +- a list of `Receiver`s which used to know how to send events to the external service + +[Notification Route](../configuration-reference/#notificationroute) matches events based on their metadata like `name`, `group`, `app`, `labels`. +Below is the list of supporting event names and their groups. + +| Event | Group | Supported | Description | +|-|-|-|-| +| DEPLOYMENT_TRIGGERED | DEPLOYMENT |

| | +| DEPLOYMENT_PLANNED | DEPLOYMENT |

| | +| DEPLOYMENT_STARTED | DEPLOYMENT |

| | +| DEPLOYMENT_APPROVED | DEPLOYMENT |

| | +| DEPLOYMENT_WAIT_APPROVAL | DEPLOYMENT |

| | +| DEPLOYMENT_ROLLING_BACK | DEPLOYMENT |

| PipeCD sends a notification when a deployment is completed, while it does not send a notification when a deployment status changes to DEPLOYMENT_ROLLING_BACK because it is not a completion status. See [#4547](https://github.com/pipe-cd/pipecd/issues/4547) | +| DEPLOYMENT_SUCCEEDED | DEPLOYMENT |

| | +| DEPLOYMENT_FAILED | DEPLOYMENT |

| | +| DEPLOYMENT_CANCELLED | DEPLOYMENT |

| | +| DEPLOYMENT_TRIGGER_FAILED | DEPLOYMENT |

| | +| APPLICATION_SYNCED | APPLICATION_SYNC |

| | +| APPLICATION_OUT_OF_SYNC | APPLICATION_SYNC |

| | +| APPLICATION_HEALTHY | APPLICATION_HEALTH |

| | +| APPLICATION_UNHEALTHY | APPLICATION_HEALTH |

| | +| PIPED_STARTED | PIPED |

| | +| PIPED_STOPPED | PIPED |

| | + +### Sending notifications to Slack + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + notifications: + routes: + # Sending all event which contains labels `env: dev` to dev-slack-channel. + - name: dev-slack + labels: + env: dev + receiver: dev-slack-channel + # Only sending deployment started and completed events which contains + # labels `env: prod` and `team: pipecd` to prod-slack-channel. + - name: prod-slack + events: + - DEPLOYMENT_TRIGGERED + - DEPLOYMENT_SUCCEEDED + labels: + env: prod + team: pipecd + receiver: prod-slack-channel + - name: integration-slack + receiver: integration-slack-api + receivers: + - name: dev-slack-channel + slack: + hookURL: https://slack.com/dev + - name: prod-slack-channel + slack: + hookURL: https://slack.com/prod + - name: integration-slack-api + slack: + oauthTokenData: "token" + channelID: "testid" + - name: hookurl-with-mentioned-accounts + slack: + hookURL: https://slack.com/dev, + mentionedAccounts: + - '@user1' + - 'user2' + - name: integration-slack-api-with-mentioned-accounts + slack: + oauthTokenData: token + channelID: testid + mentionedAccounts: + - '@user1' + - 'user2' + - name: integration-slack-api-with-oauthTokenData-and-mentioned-groups + slack: + oauthTokenData: token + channelID: testid + mentionedGroups: + - 'group1' + - '' + - name: integration-slack-api-with-oauthTokenData-and-mentioned-both-accounts-and-groups + slack: + oauthTokenData: token + channelID: testid + mentionedAccounts: + - 'user1' + - '@user2' + mentionedGroups: + - 'groupID1' + - '' +``` + + +![](/images/slack-notification-deployment.png) +

+Deployment was triggered, planned and completed successfully +

+ +![](/images/slack-notification-piped-started.png) +

+A piped has been started +

+ + +For detailed configuration, please check the [configuration reference for Notifications](../configuration-reference/#notifications) section. + +### Sending notifications to external services via webhook + +``` yaml +apiVersion: pipecd.dev/v1beta1 +kind: Piped +spec: + notifications: + routes: + # Sending all events an external service. + - name: all-events-to-a-external-service + receiver: a-webhook-service + receivers: + - name: a-webhook-service + webhook: + url: {WEBHOOK_SERVICE_URL} + signatureValue: {RANDOM_SIGNATURE_STRING} +``` + +For detailed configuration, please check the [configuration reference for NotificationReceiverWebhook](../configuration-reference/#notificationreceiverwebhook) section. diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/remote-upgrade-remote-config.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/remote-upgrade-remote-config.md new file mode 100644 index 0000000000..eec51632dd --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/remote-upgrade-remote-config.md @@ -0,0 +1,39 @@ +--- +title: "Remote upgrade and remote config" +linkTitle: "Remote upgrade and remote config" +weight: 1 +description: > + This page describes how to use remote upgrade and remote config features. +--- + +## Remote upgrade + +The remote upgrade is the ability to restart the currently running Piped with another version from the web console. +This reduces the effort involved in updating Piped to newer versions. +All Pipeds that are running by the provided Piped container image can be enabled to use this feature. +It means Pipeds running on a Kubernetes cluster, a virtual machine, a serverless service can be upgraded remotely from the web console. + +Basically, in order to use this feature you must run Piped with `/launcher` command instead of `/piped` command as usual. +Please check the [installation](../../../installation/install-piped/) guide on each environment to see the details. + +After starting Piped with the remote-upgrade feature, you can go to the Settings page then click on `UPGRADE` button on the top-right corner. +A dialog will be shown for selecting which Pipeds you want to upgrade and what version they should run. + +![](/images/settings-remote-upgrade.png) +

+Select a list of Pipeds to upgrade from Settings page +

+ +## Remote config + +Although the remote-upgrade allows you remotely restart your Pipeds to run any new version you want, if your Piped is loading its config locally where Piped is running, you still need to manually restart Piped after adding any change on that config data. Remote-config is for you to remove that kind of manual operation. + +Remote-config is the ability to load Piped config data from a remote location such as a Git repository. Not only that, but it also watches the config periodically to detect any changes on that config and restarts Piped to reflect the new configuration automatically. + +This feature requires the remote-upgrade feature to be enabled simultaneously. Currently, we only support remote config from a Git repository, but other remote locations could be supported in the future. Please check the [installation](../../../installation/install-piped/) guide on each environment to know how to configure Piped to load a remote config file. + + +## Summary + +- By `remote-upgrade` you can upgrade your Piped to a newer version by clicking on the web console +- By `remote-config` you can enforce your Piped to use the latest config data just by updating its config file stored in a Git repository diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/runtime-options.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/runtime-options.md new file mode 100644 index 0000000000..6b8ba10365 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/runtime-options.md @@ -0,0 +1,80 @@ +--- +title: "Runtime Options" +linkTitle: "Runtime Options" +weight: 11 +description: > + This page describes configurable options for executing Piped and launcher. +--- + +You can configure some options when running Piped and launcher. + +## Options for Piped + +``` +Usage: + piped piped [flags] + +Flags: + --add-login-user-to-passwd Whether to add login user to $HOME/passwd. This is typically for applications running as a random user ID. + --admin-port int The port number used to run a HTTP server for admin tasks such as metrics, healthz. (default 9085) + --app-manifest-cache-count int The number of app manifests to cache. The cache-key contains the commit hash. The default is 150. (default 150) + --cert-file string The path to the TLS certificate file. + --config-aws-secret string The ARN of secret that contains Piped config and be stored in AWS Secrets Manager. + --config-aws-ssm-parameter string The name of parameter of Piped config stored in AWS Systems Manager Parameter Store. SecureString is also supported. + --config-data string The base64 encoded string of the configuration data. + --config-file string The path to the configuration file. + --config-gcp-secret string The resource ID of secret that contains Piped config and be stored in GCP SecretManager. + --enable-default-kubernetes-cloud-provider Whether the default kubernetes provider is enabled or not. This feature is deprecated. + --grace-period duration How long to wait for graceful shutdown. (default 30s) + -h, --help help for piped + --insecure Whether disabling transport security while connecting to control-plane. + --launcher-version string The version of launcher which initialized this Piped. + --tools-dir string The path to directory where to install needed tools such as kubectl, helm, kustomize. (default "~/.piped/tools") + +Global Flags: + --log-encoding string The encoding type for logger [json|console|humanize]. (default "humanize") + --log-level string The minimum enabled logging level. (default "info") + --metrics Whether metrics is enabled or not. (default true) + --profile If true enables uploading the profiles to Stackdriver. + --profile-debug-logging If true enables logging debug information of profiler. + --profiler-credentials-file string The path to the credentials file using while sending profiles to Stackdriver. +``` + +## Options for launcher + +``` +Usage: + launcher launcher [flags] + +Flags: + --aws-secret-id string The ARN of secret that contains Piped config in AWS Secrets Manager service. + --aws-ssm-parameter string The name of parameter of Piped config stored in AWS Systems Manager Parameter Store. SecureString is also supported. + --cert-file string The path to the TLS certificate file. + --check-interval duration Interval to periodically check desired config/version to restart Piped. Default is 1m. (default 1m0s) + --config-data string The base64 encoded string of the configuration data. + --config-file string The path to the configuration file. + --config-from-aws-secret Whether to load Piped config that is being stored in AWS Secrets Manager service. + --config-from-aws-ssm-parameter-store Whether to load Piped config that is being stored in AWS Systems Manager Parameter Store. + --config-from-gcp-secret Whether to load Piped config that is being stored in GCP SecretManager service. + --config-from-git-repo Whether to load Piped config that is being stored in a git repository. + --default-version string The version should be run when no desired version was specified. Empty means using the same version with Launcher. + --gcp-secret-id string The resource ID of secret that contains Piped config in GCP SecretManager service. + --git-branch string Branch of git repository to for Piped config. + --git-piped-config-file string Relative path within git repository to locate Piped config file. + --git-repo-url string The remote URL of git repository to fetch Piped config. + --git-ssh-key-data string Base64 encoded value of SSH private key to fetch Piped config from the private git repository. + --git-ssh-key-file string The path to SSH private key to fetch Piped config from private git repository. + --grace-period duration How long to wait for graceful shutdown. (default 30s) + -h, --help help for launcher + --home-dir string The working directory of Launcher. + --insecure Whether disabling transport security while connecting to control-plane. + --launcher-admin-port int The port number used to run a HTTP server for admin tasks such as metrics, healthz. + +Global Flags: + --log-encoding string The encoding type for logger [json|console|humanize]. (default "humanize") + --log-level string The minimum enabled logging level. (default "info") + --metrics Whether metrics is enabled or not. (default true) + --profile If true enables uploading the profiles to Stackdriver. + --profile-debug-logging If true enables logging debug information of profiler. + --profiler-credentials-file string The path to the credentials file using while sending profiles to Stackdriver. +``` diff --git a/docs/content/en/docs-v0.50.x/user-guide/managing-piped/using-pprof-in-piped.md b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/using-pprof-in-piped.md new file mode 100644 index 0000000000..647db3f3b6 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/managing-piped/using-pprof-in-piped.md @@ -0,0 +1,48 @@ +--- +title: "Using Pprof in Piped" +linkTitle: "Using Pprof in Piped" +weight: 10 +description: > + This guide is for developers who want to use pprof for performance profiling in Piped. +--- + +Piped provides built-in support for pprof, a tool for visualization and analysis of profiling data. It's a part of the standard Go library. + +In Piped, several routes are registered to serve the profiling data in a format understood by the pprof tool. Here are the routes: + +- `/debug/pprof/`: This route serves an index page that lists the available profiling data. +- `/debug/pprof/profile`: This route serves CPU profiling data. +- `/debug/pprof/trace`: This route serves execution trace data. + +You can access these routes to get the profiling data. For example, to get the CPU profiling data, you can access the `/debug/pprof/profile` route. + +Note that using these features in a production environment may impact performance. + +This document explains the basic usage of [pprof](https://pkg.go.dev/net/http/pprof) in Piped. For more detailed information or specific use cases, please refer to the official Go documentation. + +## How to use pprof + +1. Access the pprof index page + ```bash + curl http://localhost:9085/debug/pprof/ + ``` + This will return an HTML page that lists the available profiling data. + +2. Get the CPU Profile + ```bash + curl http://localhost:9085/debug/pprof/profile > cpu.pprof + ``` + This will save the CPU profiling data to a file named cpu.pprof. You can then analyze this data using the pprof tool: + ```bash + go tool pprof cpu.pprof + ``` + +3. Get the Execution Trace + ```bash + curl http://localhost:9085/debug/pprof/trace > trace.out + ``` + This will save the execution trace data to a file named trace.out. You can then view this trace using the go tool trace command: + ```bash + go tool trace trace.out + ``` + Please replace localhost:9085 with the actual address and port of your Piped's admin server. diff --git a/docs/content/en/docs-v0.50.x/user-guide/metrics.md b/docs/content/en/docs-v0.50.x/user-guide/metrics.md new file mode 100644 index 0000000000..fee0d7e9c2 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/metrics.md @@ -0,0 +1,124 @@ +--- +title: "Metrics" +linkTitle: "Metrics" +weight: 8 +description: > + This page describes how to enable monitoring system for collecting PipeCD' metrics. +--- + +PipeCD comes with a monitoring system including Prometheus, Alertmanager, and Grafana. +This page walks you through how to set up and use them. + +## Monitoring overview + +![](/images/metrics-architecture.png) +

+Monitoring Architecture +

+ +Both the Control plane and piped agent have their own "admin servers" (the default port number is 9085), which are simple HTTP servers providing operational information such as health status, running version, go profile, and monitoring metrics. + +The piped agent collects its metrics and periodically sends them to the Control plane. The Control plane then compacts its resource usage and cluster information with the metrics sent by the piped agent and re-publishes them via its admin server. When the PipeCD monitoring feature is turned on, Prometheus, Alertmanager, and Grafana are deployed with the Control plane, and Prometheus retrieves metrics information from the Control plane's admin server. + +Developers managing the piped agent can also get metrics directly from the piped agent and monitor them with their custom monitoring service. + +## Enable monitoring system +To enable monitoring system for PipeCD, you first need to set the following value to `helm install` when [installing](../../../installation/install-controlplane/#2-preparing-control-plane-configuration-file-and-installing). + +``` +--set monitoring.enabled=true +``` + +## Dashboards +If you've already enabled monitoring system in the previous section, you can access Grafana using port forwarding: + +``` +kubectl port-forward -n {NAMESPACE} svc/{PIPECD_RELEASE_NAME}-grafana 3000:80 +``` + +#### Control Plane dashboards +There are three dashboards related to Control Plane: +- Overview - usage stats of PipeCD +- Incoming Requests - gRPC and HTTP requests stats to check for any negative impact on users +- Go - processes stats of PipeCD components + +#### Piped dashboards +Visualize the metrics of Piped registered in the Control plane. +- Overview - usage stats of piped agents +- Process - resource usage of piped agent +- Go - processes stats of piped agents. + +#### Cluster dashboards +Because cluster dashboards tracks cluster-wide metrics, defaults to disable. You can enable it with: + +``` +--monitoring.clusterStats=true +``` + +There are three dashboards that track metrics for: +- Node - nodes stats within the Kubernetes cluster where PipeCD runs on +- Pod - stats for pods that make PipeCD up +- Prometheus - stats for Prometheus itself + +## Alert notifications +If you want to send alert notifications to external services like Slack, you need to set an alertmanager configuration file. + +For example, let's say you use Slack as a receiver. Create `values.yaml` and put the following configuration to there. + +```yaml +prometheus: + alertmanagerFiles: + alertmanager.yml: + global: + slack_api_url: {YOUR_WEBHOOK_URL} + route: + receiver: slack-notifications + receivers: + - name: slack-notifications + slack_configs: + - channel: '#your-channel' +``` + +And give it to the `helm install` command when [installing](../../../installation/install-controlplane/#2-preparing-control-plane-configuration-file-and-installing). + +``` +--values=values.yaml +``` + +See [here](https://prometheus.io/docs/alerting/latest/configuration/) for more details on AlertManager's configuration. + +## Piped agent metrics + +| Metric | Type | Description | +| --- | --- | --- | +| `cloudprovider_kubernetes_tool_calls_total` | counter | Number of calls made to run the tool like kubectl, kustomize. | +| `deployment_status` | gauge | The current status of deployment. 1 for current status, 0 for others. | +| `livestatestore_kubernetes_api_requests_total` | counter | Number of requests sent to kubernetes api server. | +| `livestatestore_kubernetes_resource_events_total` | counter | Number of resource events received from kubernetes server. | +| `plan_preview_command_handled_total` | counter | Total number of plan-preview commands handled at piped. | +| `plan_preview_command_handling_seconds` | histogram | Histogram of handling seconds of plan-preview commands. | +| `plan_preview_command_received_total` | counter | Total number of plan-preview commands received at piped. | + +## Control plane metrics + +All Piped's metrics are sent to the control plane so that they are also available on the control plane's metrics server. + +| Metric | Type | Description | +| --- | --- | --- | +| `cache_get_operation_total` | counter | Number of cache get operation while processing. | +| `grpcapi_create_deployment_total` | counter | Number of successful CreateDeployment RPC with project label. | +| `http_request_duration_milliseconds` | histogram | Histogram of request latencies in milliseconds. | +| `http_requests_total` | counter | Total number of HTTP requests. | +| `insight_application_total` | gauge | Number of applications currently controlled by control plane. | + +## Health Checking + +The below components expose their endpoint for health checking. +- server +- ops +- piped +- launcher (only when you run with designating the `launcher-admin-port` option.) + +The spec of the health check endpoint is as below. +- Path: `/healthz` +- Port: the same as admin server's port. 9085 by default. diff --git a/docs/content/en/docs-v0.50.x/user-guide/plan-preview.md b/docs/content/en/docs-v0.50.x/user-guide/plan-preview.md new file mode 100644 index 0000000000..c0a212d5df --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/plan-preview.md @@ -0,0 +1,52 @@ +--- +title: "Confidently review your changes with Plan Preview" +linkTitle: "Plan preview" +weight: 6 +description: > + Enables the ability to preview the deployment plan against a given commit before merging. +--- + +In order to help developers review the pull request with a better experience and more confidence to approve it to trigger the actual deployments, +PipeCD provides a way to preview the deployment plan of all updated applications by that pull request. + +Here are what will be included currently in the result of plan-preview process: + +- which application will be deployed once the pull request got merged +- which deployment strategy (QUICK_SYNC or PIPELINE_SYNC) will be used +- which resources will be added, deleted, or modified + +This feature will available for all application kinds: KUBERNETES, TERRAFORM, CLOUD_RUN, LAMBDA and Amazon ECS. + +![](/images/plan-preview-comment.png) +

+PlanPreview with GitHub actions pipe-cd/actions-plan-preview +

+ +## Prerequisites + +- Ensure the version of your Piped is at least `v0.11.0`. +- Having an API key that has `READ_WRITE` role to authenticate with PipeCD's Control Plane. A new key can be generated from `settings/api-key` page of your PipeCD web. + +## Usage + +Plan-preview result can be requested by using `pipectl` command-line tool as below: + +``` console +pipectl plan-preview \ + --address={ PIPECD_CONTROL_PLANE_ADDRESS } \ + --api-key={ PIPECD_API_KEY } \ + --repo-remote-url={ REPO_REMOTE_GIT_SSH_URL } \ + --head-branch={ HEAD_BRANCH } \ + --head-commit={ HEAD_COMMIT } \ + --base-branch={ BASE_BRANCH } +``` + +You can run it locally or integrate it to your CI system to run automatically when a new pull request is opened/updated. Use `--help` to see more options. + +``` console +pipectl plan-preview --help +``` + +## GitHub Actions + +If you are using GitHub Actions, you can seamlessly integrate our prepared [actions-plan-preview](https://github.com/pipe-cd/actions-plan-preview) to your workflows. This automatically comments the plan-preview result on the pull request when it is opened or updated. You can also trigger to run plan-preview manually by leave a comment `/pipecd plan-preview` on the pull request. diff --git a/docs/content/en/docs-v0.50.x/user-guide/terraform-provider-pipecd.md b/docs/content/en/docs-v0.50.x/user-guide/terraform-provider-pipecd.md new file mode 100644 index 0000000000..5175deb0c7 --- /dev/null +++ b/docs/content/en/docs-v0.50.x/user-guide/terraform-provider-pipecd.md @@ -0,0 +1,68 @@ +--- +title: "PipeCD Terraform provider" +linkTitle: "PipeCD Terraform provider" +weight: 10 +description: > + This page describes how to manage PipeCD resources with Terraform using terraform-provider-pipecd. +--- + +Besides using web UI and command line tool, PipeCD community also provides Terraform module, [terraform-provider-pipecd](https://registry.terraform.io/providers/pipe-cd/pipecd/latest), which allows you to manage PipeCD resources. +This provider enables us to add, update, and delete PipeCD resources as Infrastructure as Code (IaC). Storing resources as code in a version control system like Git repository ensures more reliability, security, and makes it more friendly for engineers to manage PipeCD resources with the power of Git leverage. + +## Usage + +### Setup Terraform provider +Add terraform block to declare that you use PipeCD Terraform provider. You need to input a controle plane's host name and API key via provider settings or environment variables. API key is available on the web UI. + +```hcl +terraform { + required_providers { + pipecd = { + source = "pipe-cd/pipecd" + version = "0.1.0" + } + } + required_version = ">= 1.4" +} + +provider "pipecd" { + # pipecd_host = "" // optional, if not set, read from environments as PIPECD_HOST + # pipecd_api_key = "" // optional, if not set, read from environments as PIPECD_API_KEY +} +``` + +### Manage Piped agent +Add `pipecd_piped` resource to manage a Piped agent. + +```hcl +resource "pipecd_piped" "mypiped" { + name = "mypiped" + description = "This is my piped" + id = "my-piped-id" +} +``` + +### Adding a new application +Add `pipecd_application` resource to manage an application. + +```hcl +// CloudRun Application +resource "pipecd_application" "main" { + kind = "CLOUDRUN" + name = "example-application" + description = "This is the simple application" + platform_provider = "cloudrun-inproject" + piped_id = "your-piped-id" + git = { + repository_id = "examples" + remote = "git@github.com:pipe-cd/examples.git" + branch = "master" + path = "cloudrun/simple" + filename = "app.pipecd.yaml" + } +} +``` + +### You want more? + +We always want to add more needed resources into the Terraform provider. Please let the maintainers know what resources you want to add by creating issues in the [pipe-cd/terraform-provider-pipecd](https://github.com/pipe-cd/terraform-provider-pipecd/) repository. We also welcome your pull request to contribute! diff --git a/docs/layouts/docs-v0.50.x/baseof.html b/docs/layouts/docs-v0.50.x/baseof.html new file mode 100644 index 0000000000..20af4e6526 --- /dev/null +++ b/docs/layouts/docs-v0.50.x/baseof.html @@ -0,0 +1,32 @@ + + + + {{ partial "head.html" . }} + + +
+ {{ partial "navbar.html" . }} +
+
+
+
+ + +
+ {{ partial "version-banner.html" . }} + {{ if not .Site.Params.ui.breadcrumb_disable }}{{ partial "breadcrumb.html" . }}{{ end }} + {{ block "main" . }}{{ end }} +
+
+
+ {{ partial "footer.html" . }} +
+ {{ partial "scripts.html" . }} + + \ No newline at end of file diff --git a/docs/layouts/docs-v0.50.x/baseof.print.html b/docs/layouts/docs-v0.50.x/baseof.print.html new file mode 100644 index 0000000000..d37e99012b --- /dev/null +++ b/docs/layouts/docs-v0.50.x/baseof.print.html @@ -0,0 +1,26 @@ + + + + {{ partial "head.html" . }} + + +
+ {{ partial "navbar.html" . }} +
+
+
+
+
+
+
+
+
+ {{ block "main" . }}{{ end }} +
+
+
+ {{ partial "footer.html" . }} +
+ {{ partial "scripts.html" . }} + + diff --git a/docs/layouts/docs-v0.50.x/list.html b/docs/layouts/docs-v0.50.x/list.html new file mode 100644 index 0000000000..b59b8d0047 --- /dev/null +++ b/docs/layouts/docs-v0.50.x/list.html @@ -0,0 +1,32 @@ +{{ define "main" }} +
+

{{ .Title }}

+ {{ with .Params.description }}
{{ . | markdownify }}
{{ end }} +
+ {{ $context := . }} + {{ if .Site.Params.Taxonomy.taxonomyPageHeader }} + {{ range $index, $taxo := .Site.Params.Taxonomy.taxonomyPageHeader }} + {{ partial "taxonomy_terms_article.html" (dict "context" $context "taxo" $taxo ) }} + {{ end }} + {{ else }} + {{ range $taxo, $taxo_map := .Site.Taxonomies }} + {{ partial "taxonomy_terms_article.html" (dict "context" $context "taxo" $taxo ) }} + {{ end }} + {{ end }} + {{ if (and (not .Params.hide_readingtime) (.Site.Params.ui.readingtime.enable)) }} + {{ partial "reading-time.html" . }} + {{ end }} +
+ {{ .Content }} + {{ partial "section-index.html" . }} + {{ if (and (not .Params.hide_feedback) (.Site.Params.ui.feedback.enable) (.Site.GoogleAnalytics)) }} + {{ partial "feedback.html" .Site.Params.ui.feedback }} +
+ {{ end }} + {{ if (.Site.DisqusShortname) }} +
+ {{ partial "disqus-comment.html" . }} + {{ end }} + {{ partial "page-meta-lastmod.html" . }} +
+{{ end }} diff --git a/docs/layouts/docs-v0.50.x/list.print.html b/docs/layouts/docs-v0.50.x/list.print.html new file mode 100644 index 0000000000..1b04015886 --- /dev/null +++ b/docs/layouts/docs-v0.50.x/list.print.html @@ -0,0 +1,3 @@ +{{ define "main" }} +{{ partial "print/render" . }} +{{ end }} diff --git a/docs/layouts/docs-v0.50.x/single.html b/docs/layouts/docs-v0.50.x/single.html new file mode 100644 index 0000000000..00cb3ab911 --- /dev/null +++ b/docs/layouts/docs-v0.50.x/single.html @@ -0,0 +1,3 @@ +{{ define "main" }} +{{ .Render "content" }} +{{ end }} \ No newline at end of file diff --git a/docs/main.go b/docs/main.go index 2086c8ac72..bf1f025bb1 100644 --- a/docs/main.go +++ b/docs/main.go @@ -28,7 +28,7 @@ import ( const dir = "/public" // Don't update here manually. /hack/gen-release-docs.sh does. -const latestPath = "/docs-v0.49.x/" +const latestPath = "/docs-v0.50.x/" func main() { var ( diff --git a/manifests/pipecd/Chart.lock b/manifests/pipecd/Chart.lock index 1d0066feee..83add409e6 100644 --- a/manifests/pipecd/Chart.lock +++ b/manifests/pipecd/Chart.lock @@ -10,6 +10,6 @@ dependencies: version: 0.93.3 - name: tempo repository: https://grafana.github.io/helm-charts - version: 1.10.2 -digest: sha256:2fa3e0df6d19e699e8ae52b415bed1a56f9890701eaa04cd48f4419eb4da01cc -generated: "2024-08-08T10:45:50.558122029+09:00" + version: 1.14.0 +digest: sha256:c6d00e5535f3d61210cca8a16521f2186ddaf5d9cf6f2da68cdfa9eba08473f0 +generated: "2024-12-09T09:56:27.110590328+09:00" diff --git a/manifests/pipecd/Chart.yaml b/manifests/pipecd/Chart.yaml index 848e9c0bc4..1dddc0b00a 100644 --- a/manifests/pipecd/Chart.yaml +++ b/manifests/pipecd/Chart.yaml @@ -32,6 +32,6 @@ dependencies: repository: "https://open-telemetry.github.io/opentelemetry-helm-charts" condition: monitoring.enabled - name: tempo - version: "1.10.2" + version: "1.14.0" repository: "https://grafana.github.io/helm-charts" condition: monitoring.enabled diff --git a/manifests/pipecd/values.yaml b/manifests/pipecd/values.yaml index 231922b7ef..33b6a4236e 100644 --- a/manifests/pipecd/values.yaml +++ b/manifests/pipecd/values.yaml @@ -274,6 +274,8 @@ tempo: # modified by @pipecd, to expand template remoteWriteUrl # @default -- Dynamically generated tempo configmap config: | + memberlist: + cluster_label: "{{ .Release.Name }}.{{ .Release.Namespace }}" multitenancy_enabled: {{ .Values.tempo.multitenancyEnabled }} usage_report: reporting_enabled: {{ .Values.tempo.reportingEnabled }} diff --git a/pkg/app/launcher/cmd/launcher/launcher.go b/pkg/app/launcher/cmd/launcher/launcher.go index 8402f6b34f..8a31e985ca 100644 --- a/pkg/app/launcher/cmd/launcher/launcher.go +++ b/pkg/app/launcher/cmd/launcher/launcher.go @@ -306,7 +306,7 @@ func (l *launcher) run(ctx context.Context, input cli.Input) error { } // Start new piped process. - runningPiped, err = l.launchNewPiped(version, config, workingDir, input.Logger) + runningPiped, err = l.launchNewPiped(ctx, version, config, workingDir, input.Logger) if err != nil { input.Logger.Error("LAUNCHER: failed while launching new Piped", zap.Error(err)) return err @@ -404,7 +404,7 @@ func (l *launcher) cleanOldPiped(cmd *lifecycle.Command, workingDir string, logg return nil } -func (l *launcher) launchNewPiped(version string, config []byte, workingDir string, logger *zap.Logger) (*lifecycle.Command, error) { +func (l *launcher) launchNewPiped(ctx context.Context, version string, config []byte, workingDir string, logger *zap.Logger) (*lifecycle.Command, error) { if err := os.MkdirAll(workingDir, 0755); err != nil { return nil, fmt.Errorf("could not create working directory %s (%w)", workingDir, err) } @@ -436,7 +436,7 @@ func (l *launcher) launchNewPiped(version string, config []byte, workingDir stri args := makePipedArgs(os.Args[2:], configFilePath) logger.Info(fmt.Sprintf("LAUNCHER: start running Piped %s with args %v", version, args)) - return lifecycle.RunBinary(pipedPath, args) + return lifecycle.RunBinary(ctx, pipedPath, args) } func (l *launcher) loadConfigData(ctx context.Context) ([]byte, error) { diff --git a/pkg/app/piped/toolregistry/tool_darwin.go b/pkg/app/piped/toolregistry/tool_darwin.go index 044c90cecb..c2d715e138 100644 --- a/pkg/app/piped/toolregistry/tool_darwin.go +++ b/pkg/app/piped/toolregistry/tool_darwin.go @@ -16,7 +16,7 @@ package toolregistry var kubectlInstallScript = ` cd {{ .WorkingDir }} -curl -LO https://storage.googleapis.com/kubernetes-release/release/v{{ .Version }}/bin/darwin/amd64/kubectl +curl -LO https://dl.k8s.io/release/v{{ .Version }}/bin/darwin/amd64/kubectl mv kubectl {{ .BinDir }}/kubectl-{{ .Version }} chmod +x {{ .BinDir }}/kubectl-{{ .Version }} {{ if .AsDefault }} diff --git a/pkg/app/piped/toolregistry/tool_linux.go b/pkg/app/piped/toolregistry/tool_linux.go index 6f3d4c146c..6dbdf730bf 100644 --- a/pkg/app/piped/toolregistry/tool_linux.go +++ b/pkg/app/piped/toolregistry/tool_linux.go @@ -16,7 +16,7 @@ package toolregistry var kubectlInstallScript = ` cd {{ .WorkingDir }} -curl -LO https://storage.googleapis.com/kubernetes-release/release/v{{ .Version }}/bin/linux/amd64/kubectl +curl -LO https://dl.k8s.io/release/v{{ .Version }}/bin/linux/amd64/kubectl mv kubectl {{ .BinDir }}/kubectl-{{ .Version }} chmod +x {{ .BinDir }}/kubectl-{{ .Version }} {{ if .AsDefault }} diff --git a/pkg/app/pipedv1/appconfigreporter/appconfigreporter.go b/pkg/app/pipedv1/appconfigreporter/appconfigreporter.go index 68cb3aefa9..2044e16135 100644 --- a/pkg/app/pipedv1/appconfigreporter/appconfigreporter.go +++ b/pkg/app/pipedv1/appconfigreporter/appconfigreporter.go @@ -27,7 +27,7 @@ import ( "google.golang.org/grpc" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/git" "github.com/pipe-cd/pipecd/pkg/model" ) @@ -375,26 +375,21 @@ func (r *Reporter) readApplicationInfo(repoDir, repoID, cfgRelPath string) (*mod if err != nil { return nil, fmt.Errorf("failed to open the configuration file: %w", err) } - cfg, err := config.DecodeYAML(b) + cfg, err := config.DecodeYAML[*config.GenericApplicationSpec](b) if err != nil { return nil, fmt.Errorf("failed to decode configuration file: %w", err) } - spec, ok := cfg.GetGenericApplication() - if !ok { - return nil, fmt.Errorf("unsupported application kind %q", cfg.Kind) - } - - kind, ok := cfg.Kind.ToApplicationKind() - if !ok { + if !cfg.Kind.IsApplicationKind() { return nil, fmt.Errorf("%q is not application config kind", cfg.Kind) } + + spec := cfg.Spec if spec.Name == "" { return nil, fmt.Errorf("missing application name: %w", errMissingRequiredField) } return &model.ApplicationInfo{ Name: spec.Name, - Kind: kind, Labels: spec.Labels, RepoId: repoID, Path: filepath.Dir(cfgRelPath), diff --git a/pkg/app/pipedv1/appconfigreporter/appconfigreporter_test.go b/pkg/app/pipedv1/appconfigreporter/appconfigreporter_test.go index 63084dc33b..91886949a6 100644 --- a/pkg/app/pipedv1/appconfigreporter/appconfigreporter_test.go +++ b/pkg/app/pipedv1/appconfigreporter/appconfigreporter_test.go @@ -21,7 +21,7 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/zap" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/model" ) diff --git a/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api.go b/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api.go index ad48acfe0f..2558c4b884 100644 --- a/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api.go +++ b/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api.go @@ -20,7 +20,7 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/cmd/piped/service" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/crypto" "github.com/pipe-cd/pipecd/pkg/model" diff --git a/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api_test.go b/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api_test.go index 301d82353b..5d36cceee9 100644 --- a/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api_test.go +++ b/pkg/app/pipedv1/cmd/piped/grpcapi/plugin_api_test.go @@ -17,7 +17,7 @@ package grpcapi import ( "testing" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/model" ) diff --git a/pkg/app/pipedv1/cmd/piped/piped.go b/pkg/app/pipedv1/cmd/piped/piped.go index f3364031f7..b2e2bcfdd6 100644 --- a/pkg/app/pipedv1/cmd/piped/piped.go +++ b/pkg/app/pipedv1/cmd/piped/piped.go @@ -19,14 +19,18 @@ import ( "context" "crypto/tls" "encoding/base64" + "encoding/json" "fmt" + "net" "net/http" "net/http/pprof" "os" "os/exec" "path" "path/filepath" + "strconv" "strings" + "sync" "time" secretmanager "cloud.google.com/go/secretmanager/apiv1" @@ -47,7 +51,6 @@ import ( "sigs.k8s.io/yaml" "github.com/pipe-cd/pipecd/pkg/admin" - "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/analysisresultstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/applicationstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/commandstore" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/apistore/deploymentstore" @@ -61,12 +64,13 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/statsreporter" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/trigger" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/cache/memorycache" "github.com/pipe-cd/pipecd/pkg/cli" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/crypto" "github.com/pipe-cd/pipecd/pkg/git" + "github.com/pipe-cd/pipecd/pkg/lifecycle" "github.com/pipe-cd/pipecd/pkg/model" + pluginapi "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1" "github.com/pipe-cd/pipecd/pkg/rpc" "github.com/pipe-cd/pipecd/pkg/rpc/rpcauth" "github.com/pipe-cd/pipecd/pkg/rpc/rpcclient" @@ -88,6 +92,7 @@ type piped struct { adminPort int pluginServicePort int toolsDir string + pluginsDir string enableDefaultKubernetesCloudProvider bool gracePeriod time.Duration addLoginUserToPasswd bool @@ -104,6 +109,7 @@ func NewCommand() *cobra.Command { adminPort: 9085, pluginServicePort: 9087, toolsDir: path.Join(home, ".piped", "tools"), + pluginsDir: path.Join(home, ".piped", "plugins"), gracePeriod: 30 * time.Second, maxRecvMsgSize: 1024 * 1024 * 10, // 10MB } @@ -155,14 +161,14 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { // Register all metrics. registry := registerMetrics(cfg.PipedID, cfg.ProjectID, p.launcherVersion) - // Configure SSH config if needed. - if cfg.Git.ShouldConfigureSSHConfig() { - if err := git.AddSSHConfig(cfg.Git); err != nil { - input.Logger.Error("failed to configure ssh-config", zap.Error(err)) - return err - } - input.Logger.Info("successfully configured ssh-config") - } + // // Configure SSH config if needed. + // if cfg.Git.ShouldConfigureSSHConfig() { + // if err := git.AddSSHConfig(cfg.Git); err != nil { + // input.Logger.Error("failed to configure ssh-config", zap.Error(err)) + // return err + // } + // input.Logger.Info("successfully configured ssh-config") + // } pipedKey, err := cfg.LoadPipedKey() if err != nil { @@ -170,19 +176,20 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { return err } - // Make gRPC client and connect to the API. + // Make gRPC client and connect to the Control Plane API. apiClient, err := p.createAPIClient(ctx, cfg.APIAddress, cfg.ProjectID, cfg.PipedID, pipedKey, input.Logger) if err != nil { input.Logger.Error("failed to create gRPC client to control plane", zap.Error(err)) return err } + // Setup the tracer provider. + // We don't set the global tracer provider because 3rd-party library may use the global one. tracerProvider, err := p.createTracerProvider(ctx, cfg.APIAddress, cfg.ProjectID, cfg.PipedID, pipedKey) if err != nil { input.Logger.Error("failed to create tracer provider", zap.Error(err)) return err } - // we don't set the global tracer provider because 3rd-party library may use the global one. // Send the newest piped meta to the control-plane. if err := p.sendPipedMeta(ctx, apiClient, cfg, input.Logger); err != nil { @@ -291,16 +298,6 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { eventLister = store.Lister() } - analysisResultStore := analysisresultstore.NewStore(apiClient, input.Logger) - - // Create memory caches. - appManifestsCache := memorycache.NewTTLCache(ctx, time.Hour, time.Minute) - - // Start running application live state reporter. - { - // TODO: Implement the live state reporter controller. - } - // Start running plugin service server. { var ( @@ -324,10 +321,49 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { }) } - decrypter, err := p.initializeSecretDecrypter(cfg) - if err != nil { - input.Logger.Error("failed to initialize secret decrypter", zap.Error(err)) - return err + // Start plugins that registered in the configuration. + { + // Start all plugins and keep their commands to stop them later. + plugins, err := p.runPlugins(ctx, cfg.Plugins, input.Logger) + if err != nil { + input.Logger.Error("failed to run plugins", zap.Error(err)) + return err + } + + group.Go(func() error { + <-ctx.Done() + wg := &sync.WaitGroup{} + for _, plg := range plugins { + wg.Add(1) + go func() { + defer wg.Done() + if err := plg.GracefulStop(p.gracePeriod); err != nil { + input.Logger.Error("failed to stop plugin", zap.Error(err)) + } + }() + } + wg.Wait() + return nil + }) + } + + // Make grpc clients to connect to plugins. + pluginClis := make([]pluginapi.PluginClient, 0, len(cfg.Plugins)) + options := []rpcclient.DialOption{ + rpcclient.WithBlock(), + rpcclient.WithInsecure(), + } + for _, plg := range cfg.Plugins { + cli, err := pluginapi.NewClient(ctx, net.JoinHostPort("localhost", strconv.Itoa(plg.Port)), options...) + if err != nil { + input.Logger.Error("failed to create client to connect plugin", zap.String("plugin", plg.Name), zap.Error(err)) + } + pluginClis = append(pluginClis, cli) + } + + // Start running application live state reporter. + { + // TODO: Implement the live state reporter controller. } // Start running application application drift detector. @@ -340,14 +376,10 @@ func (p *piped) run(ctx context.Context, input cli.Input) (runErr error) { c := controller.NewController( apiClient, gitClient, + pluginClis, deploymentLister, commandLister, - applicationLister, - analysisResultStore, notifier, - decrypter, - cfg, - appManifestsCache, p.gracePeriod, input.Logger, tracerProvider, @@ -548,18 +580,18 @@ func (p *piped) loadConfig(ctx context.Context) (*config.PipedSpec, error) { return nil, err } - extract := func(cfg *config.Config) (*config.PipedSpec, error) { + extract := func(cfg *config.Config[*config.PipedSpec, config.PipedSpec]) (*config.PipedSpec, error) { if cfg.Kind != config.KindPiped { return nil, fmt.Errorf("wrong configuration kind for piped: %v", cfg.Kind) } if p.enableDefaultKubernetesCloudProvider { - cfg.PipedSpec.EnableDefaultKubernetesPlatformProvider() + cfg.Spec.EnableDefaultKubernetesPlatformProvider() } - return cfg.PipedSpec, nil + return cfg.Spec, nil } if p.configFile != "" { - cfg, err := config.LoadFromYAML(p.configFile) + cfg, err := config.LoadFromYAML[*config.PipedSpec](p.configFile) if err != nil { return nil, err } @@ -572,7 +604,7 @@ func (p *piped) loadConfig(ctx context.Context) (*config.PipedSpec, error) { return nil, fmt.Errorf("the given config-data isn't base64 encoded: %w", err) } - cfg, err := config.DecodeYAML(data) + cfg, err := config.DecodeYAML[*config.PipedSpec](data) if err != nil { return nil, err } @@ -584,7 +616,7 @@ func (p *piped) loadConfig(ctx context.Context) (*config.PipedSpec, error) { if err != nil { return nil, fmt.Errorf("failed to load config from SecretManager (%w)", err) } - cfg, err := config.DecodeYAML(data) + cfg, err := config.DecodeYAML[*config.PipedSpec](data) if err != nil { return nil, err } @@ -596,7 +628,7 @@ func (p *piped) loadConfig(ctx context.Context) (*config.PipedSpec, error) { if err != nil { return nil, fmt.Errorf("failed to load config from AWS Secrets Manager (%w)", err) } - cfg, err := config.DecodeYAML(data) + cfg, err := config.DecodeYAML[*config.PipedSpec](data) if err != nil { return nil, err } @@ -606,6 +638,35 @@ func (p *piped) loadConfig(ctx context.Context) (*config.PipedSpec, error) { return nil, fmt.Errorf("one of config-file, config-gcp-secret or config-aws-secret must be set") } +func (p *piped) runPlugins(ctx context.Context, pluginsCfg []config.PipedPlugin, logger *zap.Logger) ([]*lifecycle.Command, error) { + plugins := make([]*lifecycle.Command, 0, len(pluginsCfg)) + for _, pCfg := range pluginsCfg { + // Download plugin binary to piped's pluginsDir. + pPath, err := lifecycle.DownloadBinary(pCfg.URL, p.pluginsDir, pCfg.Name, logger) + if err != nil { + return nil, fmt.Errorf("failed to download plugin %s: %w", pCfg.Name, err) + } + + // Build plugin's args. + args := make([]string, 0, 0) + args = append(args, "--piped-plugin-service", net.JoinHostPort("localhost", strconv.Itoa(p.pluginServicePort))) + b, err := json.Marshal(pCfg) + if err != nil { + return nil, fmt.Errorf("failed to prepare plugin %s config: %w", pCfg.Name, err) + } + args = append(args, "--config", string(b)) + + // Run the plugin binary. + cmd, err := lifecycle.RunBinary(ctx, pPath, args) + if err != nil { + return nil, fmt.Errorf("failed to run plugin %s: %w", pCfg.Name, err) + } + + plugins = append(plugins, cmd) + } + return plugins, nil +} + // TODO: Remove this once the decryption task by plugin call to the plugin service is implemented. func (p *piped) initializeSecretDecrypter(cfg *config.PipedSpec) (crypto.Decrypter, error) { sm := cfg.SecretManagement @@ -660,15 +721,12 @@ func (p *piped) sendPipedMeta(ctx context.Context, client pipedservice.Client, c return err } - var ( - req = &pipedservice.ReportPipedMetaRequest{ - Version: version.Get().Version, - Config: string(maskedCfg), - Repositories: repos, - PlatformProviders: make([]*model.Piped_PlatformProvider, 0, len(cfg.PlatformProviders)), - } - retry = pipedservice.NewRetry(5) - ) + req := &pipedservice.ReportPipedMetaRequest{ + Version: version.Get().Version, + Config: string(maskedCfg), + Repositories: repos, + PlatformProviders: make([]*model.Piped_PlatformProvider, 0, len(cfg.PlatformProviders)), + } // Configure the list of specified platform providers. for _, cp := range cfg.PlatformProviders { @@ -695,19 +753,21 @@ func (p *piped) sendPipedMeta(ctx context.Context, client pipedservice.Client, c } } - for retry.WaitNext(ctx) { + retry := pipedservice.NewRetry(5) + _, err = retry.Do(ctx, func() (interface{}, error) { if res, err := client.ReportPipedMeta(ctx, req); err == nil { cfg.Name = res.Name if cfg.WebAddress == "" { cfg.WebAddress = res.WebBaseUrl } - return nil + return nil, nil } logger.Warn("failed to report piped meta to control-plane, wait to the next retry", zap.Int("calls", retry.Calls()), zap.Error(err), ) - } + return nil, err + }) return err } diff --git a/pkg/app/pipedv1/controller/controller.go b/pkg/app/pipedv1/controller/controller.go index 70b10339ab..a2827a5f2d 100644 --- a/pkg/app/pipedv1/controller/controller.go +++ b/pkg/app/pipedv1/controller/controller.go @@ -35,10 +35,10 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/controller/controllermetrics" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/cache" - "github.com/pipe-cd/pipecd/pkg/config" "github.com/pipe-cd/pipecd/pkg/git" "github.com/pipe-cd/pipecd/pkg/model" + pluginapi "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1" + "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1/deployment" ) type apiClient interface { @@ -71,23 +71,10 @@ type commandLister interface { ListStageCommands(deploymentID, stageID string) []model.ReportableCommand } -type applicationLister interface { - Get(id string) (*model.Application, bool) -} - -type analysisResultStore interface { - GetLatestAnalysisResult(ctx context.Context, applicationID string) (*model.AnalysisResult, error) - PutLatestAnalysisResult(ctx context.Context, applicationID string, analysisResult *model.AnalysisResult) error -} - type notifier interface { Notify(event model.NotificationEvent) } -type secretDecrypter interface { - Decrypt(string) (string, error) -} - type DeploymentController interface { Run(ctx context.Context) error } @@ -98,18 +85,16 @@ var ( ) type controller struct { - apiClient apiClient - pluginRegistry PluginRegistry - gitClient gitClient - deploymentLister deploymentLister - commandLister commandLister - applicationLister applicationLister - analysisResultStore analysisResultStore - notifier notifier - secretDecrypter secretDecrypter // TODO: Remove this - pipedCfg *config.PipedSpec // TODO: Remove this, use pipedConfig instead - appManifestsCache cache.Cache - + apiClient apiClient + gitClient gitClient + deploymentLister deploymentLister + commandLister commandLister + notifier notifier + + // gRPC clients to communicate with plugins. + pluginClients []pluginapi.PluginClient + // Map from stage name to the plugin client. + stageBasedPluginsMap map[string]pluginapi.PluginClient // Map from application ID to the planner // of a pending deployment of that application. planners map[string]*planner @@ -141,31 +126,22 @@ type controller struct { func NewController( apiClient apiClient, gitClient gitClient, + pluginClients []pluginapi.PluginClient, deploymentLister deploymentLister, commandLister commandLister, - applicationLister applicationLister, - analysisResultStore analysisResultStore, notifier notifier, - sd secretDecrypter, - pipedCfg *config.PipedSpec, - appManifestsCache cache.Cache, gracePeriod time.Duration, logger *zap.Logger, tracerProvider trace.TracerProvider, ) DeploymentController { return &controller{ - apiClient: apiClient, - pluginRegistry: DefaultPluginRegistry(), - gitClient: gitClient, - deploymentLister: deploymentLister, - commandLister: commandLister, - applicationLister: applicationLister, - analysisResultStore: analysisResultStore, - notifier: notifier, - secretDecrypter: sd, - appManifestsCache: appManifestsCache, - pipedCfg: pipedCfg, + apiClient: apiClient, + gitClient: gitClient, + pluginClients: pluginClients, + deploymentLister: deploymentLister, + commandLister: commandLister, + notifier: notifier, planners: make(map[string]*planner), donePlanners: make(map[string]time.Time), @@ -196,6 +172,23 @@ func (c *controller) Run(ctx context.Context) error { c.workspaceDir = dir c.logger.Info(fmt.Sprintf("workspace directory was configured to %s", c.workspaceDir)) + // Build the list of stages that can be handled by piped's plugins. + stagesBasedPluginsMap := make(map[string]pluginapi.PluginClient) + for _, plugin := range c.pluginClients { + resp, err := plugin.FetchDefinedStages(ctx, &deployment.FetchDefinedStagesRequest{}) + if err != nil { + return err + } + for _, stage := range resp.GetStages() { + if _, ok := stagesBasedPluginsMap[stage]; ok { + c.logger.Error("duplicated stage name", zap.String("stage", stage)) + return fmt.Errorf("duplicated stage name %s", stage) + } + stagesBasedPluginsMap[stage] = plugin + } + } + c.stageBasedPluginsMap = stagesBasedPluginsMap + ticker := time.NewTicker(c.syncInternal) defer ticker.Stop() c.logger.Info("start syncing planners and schedulers") @@ -440,18 +433,13 @@ func (c *controller) startNewPlanner(ctx context.Context, d *model.Deployment) ( } } - pluginClient, ok := c.pluginRegistry.Plugin(d.Kind) - if !ok { - logger.Error("no plugin client for the application kind", zap.String("kind", d.Kind.String())) - return nil, fmt.Errorf("no plugin client for the application kind %s", d.Kind.String()) - } - planner := newPlanner( d, commitHash, configFilename, workingDir, - pluginClient, + c.pluginClients, // FIXME: Find a way to ensure the plugins only related to deployment. + c.stageBasedPluginsMap, c.apiClient, c.gitClient, c.notifier, @@ -591,13 +579,8 @@ func (c *controller) startNewScheduler(ctx context.Context, d *model.Deployment) workingDir, c.apiClient, c.gitClient, - c.commandLister, - c.applicationLister, - c.analysisResultStore, + c.stageBasedPluginsMap, c.notifier, - c.secretDecrypter, - c.pipedCfg, - c.appManifestsCache, c.logger, c.tracerProvider, ) @@ -628,25 +611,22 @@ func (c *controller) startNewScheduler(ctx context.Context, d *model.Deployment) } func (c *controller) getMostRecentlySuccessfulDeployment(ctx context.Context, applicationID string) (*model.ApplicationDeploymentReference, error) { - var ( - err error - resp *pipedservice.GetApplicationMostRecentDeploymentResponse - retry = pipedservice.NewRetry(3) - req = &pipedservice.GetApplicationMostRecentDeploymentRequest{ - ApplicationId: applicationID, - Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS, - } - ) + req := &pipedservice.GetApplicationMostRecentDeploymentRequest{ + ApplicationId: applicationID, + Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS, + } - for retry.WaitNext(ctx) { - if resp, err = c.apiClient.GetApplicationMostRecentDeployment(ctx, req); err == nil { + d, err := pipedservice.NewRetry(3).Do(ctx, func() (interface{}, error) { + resp, err := c.apiClient.GetApplicationMostRecentDeployment(ctx, req) + if err == nil { return resp.Deployment, nil } - if !pipedservice.Retriable(err) { - return nil, err - } + return nil, pipedservice.NewRetriableErr(err) + }) + if err != nil { + return nil, err } - return nil, err + return d.(*model.ApplicationDeploymentReference), nil } func (c *controller) shouldStartPlanningDeployment(ctx context.Context, d *model.Deployment) (plannable, cancel bool, cancelReason string, err error) { @@ -669,49 +649,38 @@ func (c *controller) shouldStartPlanningDeployment(ctx context.Context, d *model } func (c *controller) cancelDeployment(ctx context.Context, d *model.Deployment, reason string) error { - var ( - err error - req = &pipedservice.ReportDeploymentCompletedRequest{ - DeploymentId: d.Id, - Status: model.DeploymentStatus_DEPLOYMENT_CANCELLED, - StatusReason: reason, - StageStatuses: nil, - DeploymentChainId: d.DeploymentChainId, - DeploymentChainBlockIndex: d.DeploymentChainBlockIndex, - CompletedAt: time.Now().Unix(), - } - retry = pipedservice.NewRetry(10) - ) + req := &pipedservice.ReportDeploymentCompletedRequest{ + DeploymentId: d.Id, + Status: model.DeploymentStatus_DEPLOYMENT_CANCELLED, + StatusReason: reason, + StageStatuses: nil, + DeploymentChainId: d.DeploymentChainId, + DeploymentChainBlockIndex: d.DeploymentChainBlockIndex, + CompletedAt: time.Now().Unix(), + } - for retry.WaitNext(ctx) { - if _, err = c.apiClient.ReportDeploymentCompleted(ctx, req); err == nil { - return nil + _, err := pipedservice.NewRetry(10).Do(ctx, func() (interface{}, error) { + _, err := c.apiClient.ReportDeploymentCompleted(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report deployment status to control-plane: %w", err) } - err = fmt.Errorf("failed to report deployment status to control-plane: %v", err) - } + return nil, nil + }) return err } -type appLiveResourceLister struct { - platformProvider string - appID string -} - func reportApplicationDeployingStatus(ctx context.Context, c apiClient, appID string, deploying bool) error { - var ( - err error - retry = pipedservice.NewRetry(10) - req = &pipedservice.ReportApplicationDeployingStatusRequest{ - ApplicationId: appID, - Deploying: deploying, - } - ) + req := &pipedservice.ReportApplicationDeployingStatusRequest{ + ApplicationId: appID, + Deploying: deploying, + } - for retry.WaitNext(ctx) { - if _, err = c.ReportApplicationDeployingStatus(ctx, req); err == nil { - return nil + _, err := pipedservice.NewRetry(10).Do(ctx, func() (interface{}, error) { + _, err := c.ReportApplicationDeployingStatus(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report application deploying status to control-plane: %w", err) } - err = fmt.Errorf("failed to report application deploying status to control-plane: %w", err) - } + return nil, nil + }) return err } diff --git a/pkg/app/pipedv1/controller/planner.go b/pkg/app/pipedv1/controller/planner.go index 2547e968d5..2984aef823 100644 --- a/pkg/app/pipedv1/controller/planner.go +++ b/pkg/app/pipedv1/controller/planner.go @@ -90,7 +90,8 @@ func newPlanner( lastSuccessfulCommitHash string, lastSuccessfulConfigFilename string, workingDir string, - pluginClient pluginapi.PluginClient, + pluginClients []pluginapi.PluginClient, + stageBasedPluginsMap map[string]pluginapi.PluginClient, apiClient apiClient, gitClient gitClient, notifier notifier, @@ -106,22 +107,13 @@ func newPlanner( zap.String("working-dir", workingDir), ) - // TODO: Fix this. Passed by args - tmp := make(map[string]pluginapi.PluginClient) - tmp["K8S_SYNC"] = pluginClient - - plugins := make([]pluginapi.PluginClient, 0, len(tmp)) - for _, v := range tmp { - plugins = append(plugins, v) - } - p := &planner{ deployment: d, lastSuccessfulCommitHash: lastSuccessfulCommitHash, lastSuccessfulConfigFilename: lastSuccessfulConfigFilename, workingDir: workingDir, - stageBasedPluginsMap: tmp, - plugins: plugins, + stageBasedPluginsMap: stageBasedPluginsMap, + plugins: pluginClients, apiClient: apiClient, gitClient: gitClient, metadataStore: metadatastore.NewMetadataStore(apiClient, d), @@ -250,8 +242,6 @@ func (p *planner) buildPlan(ctx context.Context, runningDS, targetDS *deployment Deployment: p.deployment, RunningDeploymentSource: runningDS, TargetDeploymentSource: targetDS, - // TODO: Add more planner input fields. - // we need passing PluginConfig } // Build deployment target versions. @@ -507,23 +497,10 @@ func (p *planner) buildPipelineSyncStages(ctx context.Context, cfg *config.Gener } func (p *planner) reportDeploymentPlanned(ctx context.Context, out *plannerOutput) error { - var ( - err error - retry = pipedservice.NewRetry(10) - req = &pipedservice.ReportDeploymentPlannedRequest{ - DeploymentId: p.deployment.Id, - Summary: out.Summary, - StatusReason: "The deployment has been planned", - RunningCommitHash: p.lastSuccessfulCommitHash, - RunningConfigFilename: p.lastSuccessfulConfigFilename, - Versions: out.Versions, - Stages: out.Stages, - DeploymentChainId: p.deployment.DeploymentChainId, - DeploymentChainBlockIndex: p.deployment.DeploymentChainBlockIndex, - } - ) - users, groups, err := p.getApplicationNotificationMentions(model.NotificationEventType_EVENT_DEPLOYMENT_PLANNED) + if err != nil { + p.logger.Error("failed to get the list of users or groups", zap.Error(err)) + } defer func() { p.notifier.Notify(model.NotificationEvent{ @@ -537,13 +514,25 @@ func (p *planner) reportDeploymentPlanned(ctx context.Context, out *plannerOutpu }) }() - for retry.WaitNext(ctx) { - if _, err = p.apiClient.ReportDeploymentPlanned(ctx, req); err == nil { - return nil - } - err = fmt.Errorf("failed to report deployment status to control-plane: %v", err) + req := &pipedservice.ReportDeploymentPlannedRequest{ + DeploymentId: p.deployment.Id, + Summary: out.Summary, + StatusReason: "The deployment has been planned", + RunningCommitHash: p.lastSuccessfulCommitHash, + RunningConfigFilename: p.lastSuccessfulConfigFilename, + Versions: out.Versions, + Stages: out.Stages, + DeploymentChainId: p.deployment.DeploymentChainId, + DeploymentChainBlockIndex: p.deployment.DeploymentChainBlockIndex, } + _, err = pipedservice.NewRetry(10).Do(ctx, func() (interface{}, error) { + _, err := p.apiClient.ReportDeploymentPlanned(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report deployment status to control-plane: %w", err) + } + return nil, nil + }) if err != nil { p.logger.Error("failed to mark deployment to be planned", zap.Error(err)) } @@ -551,21 +540,6 @@ func (p *planner) reportDeploymentPlanned(ctx context.Context, out *plannerOutpu } func (p *planner) reportDeploymentFailed(ctx context.Context, reason string) error { - var ( - err error - now = p.nowFunc() - req = &pipedservice.ReportDeploymentCompletedRequest{ - DeploymentId: p.deployment.Id, - Status: model.DeploymentStatus_DEPLOYMENT_FAILURE, - StatusReason: reason, - StageStatuses: nil, - DeploymentChainId: p.deployment.DeploymentChainId, - DeploymentChainBlockIndex: p.deployment.DeploymentChainBlockIndex, - CompletedAt: now.Unix(), - } - retry = pipedservice.NewRetry(10) - ) - users, groups, err := p.getApplicationNotificationMentions(model.NotificationEventType_EVENT_DEPLOYMENT_FAILED) if err != nil { p.logger.Error("failed to get the list of users or groups", zap.Error(err)) @@ -583,13 +557,24 @@ func (p *planner) reportDeploymentFailed(ctx context.Context, reason string) err }) }() - for retry.WaitNext(ctx) { - if _, err = p.apiClient.ReportDeploymentCompleted(ctx, req); err == nil { - return nil - } - err = fmt.Errorf("failed to report deployment status to control-plane: %v", err) + req := &pipedservice.ReportDeploymentCompletedRequest{ + DeploymentId: p.deployment.Id, + Status: model.DeploymentStatus_DEPLOYMENT_FAILURE, + StatusReason: reason, + StageStatuses: nil, + DeploymentChainId: p.deployment.DeploymentChainId, + DeploymentChainBlockIndex: p.deployment.DeploymentChainBlockIndex, + CompletedAt: p.nowFunc().Unix(), } + _, err = pipedservice.NewRetry(10).Do(ctx, func() (interface{}, error) { + _, err := p.apiClient.ReportDeploymentCompleted(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report deployment status to control-plane: %w", err) + } + return nil, nil + }) + if err != nil { p.logger.Error("failed to mark deployment to be failed", zap.Error(err)) } @@ -597,21 +582,6 @@ func (p *planner) reportDeploymentFailed(ctx context.Context, reason string) err } func (p *planner) reportDeploymentCancelled(ctx context.Context, commander, reason string) error { - var ( - err error - now = p.nowFunc() - req = &pipedservice.ReportDeploymentCompletedRequest{ - DeploymentId: p.deployment.Id, - Status: model.DeploymentStatus_DEPLOYMENT_CANCELLED, - StatusReason: reason, - StageStatuses: nil, - DeploymentChainId: p.deployment.DeploymentChainId, - DeploymentChainBlockIndex: p.deployment.DeploymentChainBlockIndex, - CompletedAt: now.Unix(), - } - retry = pipedservice.NewRetry(10) - ) - users, groups, err := p.getApplicationNotificationMentions(model.NotificationEventType_EVENT_DEPLOYMENT_CANCELLED) if err != nil { p.logger.Error("failed to get the list of users or groups", zap.Error(err)) @@ -629,13 +599,24 @@ func (p *planner) reportDeploymentCancelled(ctx context.Context, commander, reas }) }() - for retry.WaitNext(ctx) { - if _, err = p.apiClient.ReportDeploymentCompleted(ctx, req); err == nil { - return nil - } - err = fmt.Errorf("failed to report deployment status to control-plane: %v", err) + req := &pipedservice.ReportDeploymentCompletedRequest{ + DeploymentId: p.deployment.Id, + Status: model.DeploymentStatus_DEPLOYMENT_CANCELLED, + StatusReason: reason, + StageStatuses: nil, + DeploymentChainId: p.deployment.DeploymentChainId, + DeploymentChainBlockIndex: p.deployment.DeploymentChainBlockIndex, + CompletedAt: p.nowFunc().Unix(), } + _, err = pipedservice.NewRetry(10).Do(ctx, func() (interface{}, error) { + _, err := p.apiClient.ReportDeploymentCompleted(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report deployment status to control-plane: %w", err) + } + return nil, nil + }) + if err != nil { p.logger.Error("failed to mark deployment to be cancelled", zap.Error(err)) } diff --git a/pkg/app/pipedv1/controller/pluginregistry.go b/pkg/app/pipedv1/controller/pluginregistry.go deleted file mode 100644 index 436bfda150..0000000000 --- a/pkg/app/pipedv1/controller/pluginregistry.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2024 The PipeCD Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package controller provides a piped component -// that handles all of the not completed deployments by managing a pool of planners and schedulers. -// Whenever a new PENDING deployment is detected, controller spawns a new planner for deciding -// the deployment pipeline and update the deployment status to PLANNED. -// Whenever a new PLANNED deployment is detected, controller spawns a new scheduler -// for scheduling and running its pipeline executors. -package controller - -import ( - "sync" - - "github.com/pipe-cd/pipecd/pkg/model" - pluginapi "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1" -) - -type PluginRegistry interface { - Plugin(k model.ApplicationKind) (pluginapi.PluginClient, bool) -} - -type pluginRegistry struct { - plugins map[model.ApplicationKind]pluginapi.PluginClient - mu sync.RWMutex -} - -func (r *pluginRegistry) Plugin(k model.ApplicationKind) (pluginapi.PluginClient, bool) { - r.mu.RLock() - defer r.mu.RUnlock() - - e, ok := r.plugins[k] - if !ok { - return nil, false - } - - return e, true -} - -var defaultPluginRegistry = &pluginRegistry{ - plugins: make(map[model.ApplicationKind]pluginapi.PluginClient), -} - -func DefaultPluginRegistry() PluginRegistry { - return defaultPluginRegistry -} - -func init() { - // TODO: Register all available built-in plugins. - - // NOTE: If you want to directry test the plugin, you can use the following code. - - // defaultPluginRegistry.mu.Lock() - // defer defaultPluginRegistry.mu.Unlock() - - // options := []rpcclient.DialOption{ - // rpcclient.WithBlock(), - // rpcclient.WithInsecure(), - // } - - // cli, err := platform.NewClient(context.Background(), "localhost:10000", options...) - // if err != nil { - // panic(err) - // } - - // defaultPluginRegistry.plugins[model.ApplicationKind_KUBERNETES] = cli -} diff --git a/pkg/app/pipedv1/controller/scheduler.go b/pkg/app/pipedv1/controller/scheduler.go index 63d942782b..b3f8671423 100644 --- a/pkg/app/pipedv1/controller/scheduler.go +++ b/pkg/app/pipedv1/controller/scheduler.go @@ -18,8 +18,6 @@ import ( "context" "encoding/json" "fmt" - "io" - "path/filepath" "time" "go.opentelemetry.io/otel/attribute" @@ -29,37 +27,28 @@ import ( "go.uber.org/zap" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/controller/controllermetrics" - "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" - "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" - registry "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor/registry" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/metadatastore" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/cache" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/model" + pluginapi "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1" + "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1/deployment" ) // scheduler is a dedicated object for a specific deployment of a single application. type scheduler struct { - // Readonly deployment model. - deployment *model.Deployment - workingDir string - executorRegistry registry.Registry - apiClient apiClient - gitClient gitClient - commandLister commandLister - applicationLister applicationLister - analysisResultStore analysisResultStore - metadataStore metadatastore.MetadataStore - notifier notifier - secretDecrypter secretDecrypter - pipedConfig *config.PipedSpec - appManifestsCache cache.Cache - logger *zap.Logger - tracer trace.Tracer - - targetDSP deploysource.Provider - runningDSP deploysource.Provider + deployment *model.Deployment + workingDir string + + stageBasedPluginsMap map[string]pluginapi.PluginClient + + apiClient apiClient + gitClient gitClient + metadataStore metadatastore.MetadataStore + notifier notifier + + targetDS *deployment.DeploymentSource + runningDS *deployment.DeploymentSource // Current status of each stages. // We stores their current statuses into this field @@ -67,7 +56,10 @@ type scheduler struct { // We may need a mutex for this field in the future // when the stages can be executed concurrently. stageStatuses map[string]model.StageStatus - genericApplicationConfig config.GenericApplicationSpec + genericApplicationConfig *config.GenericApplicationSpec + + logger *zap.Logger + tracer trace.Tracer done atomic.Bool doneTimestamp time.Time @@ -83,13 +75,8 @@ func newScheduler( workingDir string, apiClient apiClient, gitClient gitClient, - commandLister commandLister, - applicationLister applicationLister, - analysisResultStore analysisResultStore, + stageBasedPluginsMap map[string]pluginapi.PluginClient, notifier notifier, - sd secretDecrypter, - pipedConfig *config.PipedSpec, - appManifestsCache cache.Cache, logger *zap.Logger, tracerProvider trace.TracerProvider, ) *scheduler { @@ -104,17 +91,11 @@ func newScheduler( s := &scheduler{ deployment: d, workingDir: workingDir, - executorRegistry: registry.DefaultRegistry(), + stageBasedPluginsMap: stageBasedPluginsMap, apiClient: apiClient, gitClient: gitClient, - commandLister: commandLister, - applicationLister: applicationLister, - analysisResultStore: analysisResultStore, metadataStore: metadatastore.NewMetadataStore(apiClient, d), notifier: notifier, - secretDecrypter: sd, - pipedConfig: pipedConfig, - appManifestsCache: appManifestsCache, doneDeploymentStatus: d.Status, cancelledCh: make(chan *model.ReportableCommand, 1), logger: logger, @@ -233,49 +214,16 @@ func (s *scheduler) Run(ctx context.Context) error { ) deploymentStatus = model.DeploymentStatus_DEPLOYMENT_SUCCESS - repoCfg := config.PipedRepository{ - RepoID: s.deployment.GitPath.Repo.Id, - Remote: s.deployment.GitPath.Repo.Remote, - Branch: s.deployment.GitPath.Repo.Branch, - } - - s.targetDSP = deploysource.NewProvider( - filepath.Join(s.workingDir, "target-deploysource"), - deploysource.NewGitSourceCloner(s.gitClient, repoCfg, "target", s.deployment.Trigger.Commit.Hash), - *s.deployment.GitPath, - s.secretDecrypter, - ) - - if s.deployment.RunningCommitHash != "" { - gp := *s.deployment.GitPath - gp.ConfigFilename = s.deployment.RunningConfigFilename - - s.runningDSP = deploysource.NewProvider( - filepath.Join(s.workingDir, "running-deploysource"), - deploysource.NewGitSourceCloner(s.gitClient, repoCfg, "running", s.deployment.RunningCommitHash), - gp, - s.secretDecrypter, - ) - } - - // We use another deploy source provider to load the application configuration at the target commit. - // This provider is configured with a nil secretDecrypter - // because decrypting the sealed secrets is not required. - // We need only the application configuration spec. - configDSP := deploysource.NewProvider( - filepath.Join(s.workingDir, "target-config"), - deploysource.NewGitSourceCloner(s.gitClient, repoCfg, "target", s.deployment.Trigger.Commit.Hash), - *s.deployment.GitPath, - nil, - ) - ds, err := configDSP.Get(ctx, io.Discard) + /// TODO: prepare the targetDS and runningDS + var targetDS *deployment.DeploymentSource + cfg, err := config.DecodeYAML[*config.GenericApplicationSpec](targetDS.GetApplicationConfig()) if err != nil { deploymentStatus = model.DeploymentStatus_DEPLOYMENT_FAILURE - statusReason = fmt.Sprintf("Unable to prepare application configuration source data at target commit (%v)", err) + statusReason = fmt.Sprintf("Failed to decode application configuration at target commit (%v)", err) s.reportDeploymentCompleted(ctx, deploymentStatus, statusReason, "") return err } - s.genericApplicationConfig = *ds.GenericApplicationConfig + s.genericApplicationConfig = cfg.Spec ctx, span := s.tracer.Start( newContextWithDeploymentSpan(ctx, s.deployment), @@ -303,10 +251,12 @@ func (s *scheduler) Run(ctx context.Context) error { for i, ps := range s.deployment.Stages { lastStage = s.deployment.Stages[i] + // Ignore the stage if it is already completed. if ps.Status == model.StageStatus_STAGE_SUCCESS { continue } - if !ps.Visible || ps.Name == model.StageRollback.String() { + // Ignore the rollback stage, we did it later by another loop. + if ps.Rollback { continue } @@ -324,7 +274,7 @@ func (s *scheduler) Run(ctx context.Context) error { var ( result model.StageStatus - sig, handler = executor.NewStopSignal() + sig, handler = NewStopSignal() doneCh = make(chan struct{}) ) @@ -337,9 +287,7 @@ func (s *scheduler) Run(ctx context.Context) error { )) defer span.End() - result = s.executeStage(sig, *ps, func(in executor.Input) (executor.Executor, bool) { - return s.executorRegistry.Executor(model.Stage(ps.Name), in) - }) + result = s.executeStage(sig, ps) switch result { case model.StageStatus_STAGE_SUCCESS: @@ -394,7 +342,7 @@ func (s *scheduler) Run(ctx context.Context) error { if result == model.StageStatus_STAGE_FAILURE { deploymentStatus = model.DeploymentStatus_DEPLOYMENT_FAILURE // The stage was failed because of timing out. - if sig.Signal() == executor.StopSignalTimeout { + if sig.Signal() == StopSignalTimeout { statusReason = fmt.Sprintf("Timed out while executing stage %s", ps.Id) } else { statusReason = fmt.Sprintf("Failed while executing stage %s", ps.Id) @@ -427,11 +375,11 @@ func (s *scheduler) Run(ctx context.Context) error { for _, stage := range rollbackStages { // Start running rollback stage. var ( - sig, handler = executor.NewStopSignal() + sig, handler = NewStopSignal() doneCh = make(chan struct{}) ) go func() { - rbs := *stage + rbs := stage rbs.Requires = []string{lastStage.Id} _, span := s.tracer.Start(ctx, rbs.Name, trace.WithAttributes( @@ -442,9 +390,7 @@ func (s *scheduler) Run(ctx context.Context) error { )) defer span.End() - result := s.executeStage(sig, rbs, func(in executor.Input) (executor.Executor, bool) { - return s.executorRegistry.RollbackExecutor(s.deployment.Kind, in) - }) + result := s.executeStage(sig, rbs) switch result { case model.StageStatus_STAGE_SUCCESS: @@ -485,23 +431,19 @@ func (s *scheduler) Run(ctx context.Context) error { return nil } -// executeStage finds the executor for the given stage and execute. -func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage, executorFactory func(executor.Input) (executor.Executor, bool)) (finalStatus model.StageStatus) { +// executeStage finds the plugin for the given stage and execute. +// At the time this executeStage is called, the stage status is before model.StageStatus_STAGE_RUNNING. +// As the first step, it updates the stage status to model.StageStatus_STAGE_RUNNING. +// And that will be treated as the original status of the given stage. +func (s *scheduler) executeStage(sig StopSignal, ps *model.PipelineStage) (finalStatus model.StageStatus) { var ( ctx = sig.Context() originalStatus = ps.Status ) - defer func() { - // When the piped has been terminated (PS kill) while the stage is still running - // we should not mark the log persister as completed. - if !finalStatus.IsCompleted() && sig.Terminated() { - return - } - }() // Check whether to execute the script rollback stage or not. // If the base stage is executed, the script rollback stage will be executed. - if ps.Name == model.StageScriptRunRollback.String() { + if ps.Rollback { baseStageID := ps.Metadata["baseStageID"] if baseStageID == "" { return @@ -525,24 +467,18 @@ func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage originalStatus = model.StageStatus_STAGE_RUNNING } - // Check the existence of the specified cloud provider. - if !s.pipedConfig.HasPlatformProvider(s.deployment.PlatformProvider, s.deployment.Kind) { - s.logger.Error(fmt.Sprintf("This piped is not having the specified platform provider in this deployment: %v", s.deployment.PlatformProvider)) - if err := s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires); err != nil { - s.logger.Error("failed to report stage status", zap.Error(err)) - } + // Find the executor plugin for this stage. + plugin, ok := s.stageBasedPluginsMap[ps.Name] + if !ok { + err := fmt.Errorf("no registered plugin that can perform for stage %s", ps.Name) + s.logger.Error(err.Error()) + s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires) return model.StageStatus_STAGE_FAILURE } // Load the stage configuration. - var stageConfig config.PipelineStage - var stageConfigFound bool - if ps.Predefined { - // FIXME: stageConfig, stageConfigFound = pln.GetPredefinedStage(ps.Id) - } else { - stageConfig, stageConfigFound = s.genericApplicationConfig.GetStage(ps.Index) - } - + // TODO: Check this works with pre-defined stages. (stages added to the pipeline without user-defined configuration) + stageConfig, stageConfigFound := s.genericApplicationConfig.GetStageByte(ps.Index) if !stageConfigFound { s.logger.Error("Unable to find the stage configuration") if err := s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires); err != nil { @@ -551,50 +487,24 @@ func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage return model.StageStatus_STAGE_FAILURE } - app, ok := s.applicationLister.Get(s.deployment.ApplicationId) - if !ok { - s.logger.Error(fmt.Sprintf("Application %s for this deployment was not found (Maybe it was disabled).", s.deployment.ApplicationId)) - s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires) - return model.StageStatus_STAGE_FAILURE - } - - cmdLister := stageCommandLister{ - lister: s.commandLister, - deploymentID: s.deployment.Id, - stageID: ps.Id, - } - aStore := appAnalysisResultStore{ - store: s.analysisResultStore, - applicationID: app.Id, - } - input := executor.Input{ - Stage: &ps, - StageConfig: stageConfig, - Deployment: s.deployment, - Application: app, - PipedConfig: s.pipedConfig, - TargetDSP: s.targetDSP, - RunningDSP: s.runningDSP, - GitClient: s.gitClient, - CommandLister: cmdLister, - MetadataStore: s.metadataStore, - AppManifestsCache: s.appManifestsCache, - AnalysisResultStore: aStore, - Logger: s.logger, - Notifier: s.notifier, - } - - // Find the executor for this stage. - ex, ok := executorFactory(input) - if !ok { - err := fmt.Errorf("no registered executor for stage %s", ps.Name) - s.logger.Error(err.Error()) + // Start running executor. + res, err := plugin.ExecuteStage(ctx, &deployment.ExecuteStageRequest{ + Input: &deployment.ExecutePluginInput{ + Deployment: s.deployment, + Stage: ps, + StageConfig: stageConfig, + RunningDeploymentSource: s.runningDS, // TODO: prepare this + TargetDeploymentSource: s.targetDS, // TODO: prepare this + }, + }) + if err != nil { + s.logger.Error("failed to execute stage", zap.Error(err)) s.reportStageStatus(ctx, ps.Id, model.StageStatus_STAGE_FAILURE, ps.Requires) return model.StageStatus_STAGE_FAILURE } - // Start running executor. - status := ex.Execute(sig) + // Determine the final status of the stage. + status := determineStageStatus(sig.Signal(), originalStatus, res.Status) // Commit deployment state status in the following cases: // - Apply state successfully. @@ -617,9 +527,25 @@ func (s *scheduler) executeStage(sig executor.StopSignal, ps model.PipelineStage return originalStatus } +// determineStageStatus determines the final status of the stage based on the given stop signal. +// Normal is the case when the stop signal is StopSignalNone. +func determineStageStatus(sig StopSignalType, ori, got model.StageStatus) model.StageStatus { + switch sig { + case StopSignalNone: + return got + case StopSignalTerminate: + return ori + case StopSignalCancel: + return model.StageStatus_STAGE_CANCELLED + case StopSignalTimeout: + return model.StageStatus_STAGE_FAILURE + default: + return model.StageStatus_STAGE_FAILURE + } +} + func (s *scheduler) reportStageStatus(ctx context.Context, stageID string, status model.StageStatus, requires []string) error { var ( - err error now = s.nowFunc() req = &pipedservice.ReportStageStatusChangedRequest{ DeploymentId: s.deployment.Id, @@ -635,21 +561,19 @@ func (s *scheduler) reportStageStatus(ctx context.Context, stageID string, statu // Update stage status at local. s.stageStatuses[stageID] = status - // Update stage status on the remote. - for retry.WaitNext(ctx) { - _, err = s.apiClient.ReportStageStatusChanged(ctx, req) - if err == nil { - break + _, err := retry.Do(ctx, func() (interface{}, error) { + _, err := s.apiClient.ReportStageStatusChanged(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report stage status to control-plane: %v", err) } - err = fmt.Errorf("failed to report stage status to control-plane: %v", err) - } + return nil, nil + }) return err } func (s *scheduler) reportDeploymentStatusChanged(ctx context.Context, status model.DeploymentStatus, desc string) error { var ( - err error retry = pipedservice.NewRetry(10) req = &pipedservice.ReportDeploymentStatusChangedRequest{ DeploymentId: s.deployment.Id, @@ -661,19 +585,19 @@ func (s *scheduler) reportDeploymentStatusChanged(ctx context.Context, status mo ) // Update deployment status on remote. - for retry.WaitNext(ctx) { - if _, err = s.apiClient.ReportDeploymentStatusChanged(ctx, req); err == nil { - return nil + _, err := retry.Do(ctx, func() (interface{}, error) { + _, err := s.apiClient.ReportDeploymentStatusChanged(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report deployment status to control-plane: %v", err) } - err = fmt.Errorf("failed to report deployment status to control-plane: %v", err) - } + return nil, nil + }) return err } func (s *scheduler) reportDeploymentCompleted(ctx context.Context, status model.DeploymentStatus, desc, cancelCommander string) error { var ( - err error now = s.nowFunc() req = &pipedservice.ReportDeploymentCompletedRequest{ DeploymentId: s.deployment.Id, @@ -737,12 +661,13 @@ func (s *scheduler) reportDeploymentCompleted(ctx context.Context, status model. }() // Update deployment status on remote. - for retry.WaitNext(ctx) { - if _, err = s.apiClient.ReportDeploymentCompleted(ctx, req); err == nil { - return nil + _, err := retry.Do(ctx, func() (interface{}, error) { + _, err := s.apiClient.ReportDeploymentCompleted(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report deployment status to control-plane: %v", err) } - err = fmt.Errorf("failed to report deployment status to control-plane: %w", err) - } + return nil, nil + }) return err } @@ -764,7 +689,6 @@ func (p *scheduler) getApplicationNotificationMentions(event model.NotificationE func (s *scheduler) reportMostRecentlySuccessfulDeployment(ctx context.Context) error { var ( - err error req = &pipedservice.ReportApplicationMostRecentDeploymentRequest{ ApplicationId: s.deployment.ApplicationId, Status: model.DeploymentStatus_DEPLOYMENT_SUCCESS, @@ -782,35 +706,13 @@ func (s *scheduler) reportMostRecentlySuccessfulDeployment(ctx context.Context) retry = pipedservice.NewRetry(10) ) - for retry.WaitNext(ctx) { - if _, err = s.apiClient.ReportApplicationMostRecentDeployment(ctx, req); err == nil { - return nil + _, err := retry.Do(ctx, func() (interface{}, error) { + _, err := s.apiClient.ReportApplicationMostRecentDeployment(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report most recent successful deployment: %v", err) } - err = fmt.Errorf("failed to report most recent successful deployment: %w", err) - } + return nil, nil + }) return err } - -type stageCommandLister struct { - lister commandLister - deploymentID string - stageID string -} - -func (s stageCommandLister) ListCommands() []model.ReportableCommand { - return s.lister.ListStageCommands(s.deploymentID, s.stageID) -} - -type appAnalysisResultStore struct { - store analysisResultStore - applicationID string -} - -func (a appAnalysisResultStore) GetLatestAnalysisResult(ctx context.Context) (*model.AnalysisResult, error) { - return a.store.GetLatestAnalysisResult(ctx, a.applicationID) -} - -func (a appAnalysisResultStore) PutLatestAnalysisResult(ctx context.Context, analysisResult *model.AnalysisResult) error { - return a.store.PutLatestAnalysisResult(ctx, a.applicationID, analysisResult) -} diff --git a/pkg/app/pipedv1/controller/scheduler_test.go b/pkg/app/pipedv1/controller/scheduler_test.go new file mode 100644 index 0000000000..793e937b9c --- /dev/null +++ b/pkg/app/pipedv1/controller/scheduler_test.go @@ -0,0 +1,310 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package controller + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "go.uber.org/zap/zaptest" + "google.golang.org/grpc" + + "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" + config "github.com/pipe-cd/pipecd/pkg/configv1" + "github.com/pipe-cd/pipecd/pkg/model" + pluginapi "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1" + "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1/deployment" +) + +func TestDetermineStageStatus(t *testing.T) { + testcases := []struct { + name string + sig StopSignalType + ori model.StageStatus + got model.StageStatus + expected model.StageStatus + }{ + { + name: "No stop signal, should get got status", + sig: StopSignalNone, + ori: model.StageStatus_STAGE_RUNNING, + got: model.StageStatus_STAGE_SUCCESS, + expected: model.StageStatus_STAGE_SUCCESS, + }, { + name: "Terminated signal given, should get original status", + sig: StopSignalTerminate, + ori: model.StageStatus_STAGE_RUNNING, + got: model.StageStatus_STAGE_SKIPPED, + expected: model.StageStatus_STAGE_RUNNING, + }, { + name: "Timeout signal given, should get failed status", + sig: StopSignalTimeout, + ori: model.StageStatus_STAGE_RUNNING, + got: model.StageStatus_STAGE_RUNNING, + expected: model.StageStatus_STAGE_FAILURE, + }, { + name: "Cancel signal given, should get cancelled status", + sig: StopSignalCancel, + ori: model.StageStatus_STAGE_RUNNING, + got: model.StageStatus_STAGE_RUNNING, + expected: model.StageStatus_STAGE_CANCELLED, + }, { + name: "Unknown signal type given, should get failed status", + sig: StopSignalType("unknown"), + ori: model.StageStatus_STAGE_RUNNING, + got: model.StageStatus_STAGE_RUNNING, + expected: model.StageStatus_STAGE_FAILURE, + }, + } + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + got := determineStageStatus(tc.sig, tc.ori, tc.got) + assert.Equal(t, tc.expected, got) + }) + } +} + +type fakeExecutorPluginClient struct { + pluginapi.PluginClient +} + +func (m *fakeExecutorPluginClient) ExecuteStage(ctx context.Context, req *deployment.ExecuteStageRequest, opts ...grpc.CallOption) (*deployment.ExecuteStageResponse, error) { + return &deployment.ExecuteStageResponse{ + Status: model.StageStatus_STAGE_SUCCESS, + }, nil +} + +type fakeApiClient struct { + apiClient +} + +func (f *fakeApiClient) ReportStageStatusChanged(ctx context.Context, req *pipedservice.ReportStageStatusChangedRequest, opts ...grpc.CallOption) (*pipedservice.ReportStageStatusChangedResponse, error) { + return nil, nil +} + +func TestExecuteStage(t *testing.T) { + logger := zaptest.NewLogger(t) + + testcases := []struct { + name string + deployment *model.Deployment + stageStatuses map[string]model.StageStatus + applicationConfig *config.GenericApplicationSpec + expected model.StageStatus + }{ + { + name: "stage not started yet, everything go right", + deployment: &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-id", + Name: "stage-name", + Index: 0, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + }, + }, + }, + stageStatuses: map[string]model.StageStatus{ + "stage-id": model.StageStatus_STAGE_NOT_STARTED_YET, + }, + expected: model.StageStatus_STAGE_SUCCESS, + }, + { + name: "stage is rollback but base stage not started yet, should not trigger anything", + deployment: &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-rollback-id", + Name: "stage-rollback-name", + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Rollback: true, + Metadata: map[string]string{ + "baseStageId": "stage-id", + }, + }, + }, + }, + stageStatuses: map[string]model.StageStatus{ + "stage-id": model.StageStatus_STAGE_NOT_STARTED_YET, + }, + expected: model.StageStatus_STAGE_NOT_STARTED_YET, + }, + { + name: "stage is rollback but base stage is skipped, should not trigger anything", + deployment: &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-rollback-id", + Name: "stage-rollback-name", + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + Rollback: true, + Metadata: map[string]string{ + "baseStageId": "stage-id", + }, + }, + }, + }, + stageStatuses: map[string]model.StageStatus{ + "stage-id": model.StageStatus_STAGE_SKIPPED, + }, + expected: model.StageStatus_STAGE_NOT_STARTED_YET, + }, + { + name: "stage which can not be handled by the current scheduler, should be set as failed", + deployment: &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-id", + Name: "stage-name-not-found", + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + }, + }, + }, + stageStatuses: map[string]model.StageStatus{ + "stage-id": model.StageStatus_STAGE_NOT_STARTED_YET, + }, + expected: model.StageStatus_STAGE_FAILURE, + }, + { + name: "stage without config, should be set as failed", + deployment: &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-id", + Name: "stage-name", + Index: 0, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + }, + }, + }, + stageStatuses: map[string]model.StageStatus{ + "stage-id": model.StageStatus_STAGE_NOT_STARTED_YET, + }, + applicationConfig: &config.GenericApplicationSpec{ + Pipeline: &config.DeploymentPipeline{ + Stages: []config.PipelineStage{}, + }, + }, + expected: model.StageStatus_STAGE_FAILURE, + }, + } + + sig, handler := NewStopSignal() + defer handler.Terminate() + + for _, tc := range testcases { + t.Run(tc.name, func(t *testing.T) { + s := &scheduler{ + apiClient: &fakeApiClient{}, + stageBasedPluginsMap: map[string]pluginapi.PluginClient{ + "stage-name": &fakeExecutorPluginClient{}, + }, + genericApplicationConfig: &config.GenericApplicationSpec{ + Pipeline: &config.DeploymentPipeline{ + Stages: []config.PipelineStage{ + {ID: "stage-id", Name: "stage-name"}, + }, + }, + }, + deployment: tc.deployment, + stageStatuses: tc.stageStatuses, + logger: logger, + nowFunc: time.Now, + } + + if tc.applicationConfig != nil { + s.genericApplicationConfig = tc.applicationConfig + } + + finalStatus := s.executeStage(sig, s.deployment.Stages[0]) + assert.Equal(t, tc.expected, finalStatus) + }) + } +} + +func TestExecuteStage_SignalTerminated(t *testing.T) { + logger := zaptest.NewLogger(t) + sig, handler := NewStopSignal() + + s := &scheduler{ + apiClient: &fakeApiClient{}, + stageBasedPluginsMap: map[string]pluginapi.PluginClient{ + "stage-name": &fakeExecutorPluginClient{}, + }, + genericApplicationConfig: &config.GenericApplicationSpec{ + Pipeline: &config.DeploymentPipeline{ + Stages: []config.PipelineStage{ + {ID: "stage-id", Name: "stage-name"}, + }, + }, + }, + deployment: &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-id", + Name: "stage-name", + Index: 0, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + }, + }, + }, + stageStatuses: map[string]model.StageStatus{}, + logger: logger, + nowFunc: time.Now, + } + + handler.Terminate() + finalStatus := s.executeStage(sig, s.deployment.Stages[0]) + assert.Equal(t, model.StageStatus_STAGE_FAILURE, finalStatus) +} + +func TestExecuteStage_SignalCancelled(t *testing.T) { + logger := zaptest.NewLogger(t) + sig, handler := NewStopSignal() + + s := &scheduler{ + apiClient: &fakeApiClient{}, + stageBasedPluginsMap: map[string]pluginapi.PluginClient{ + "stage-name": &fakeExecutorPluginClient{}, + }, + genericApplicationConfig: &config.GenericApplicationSpec{ + Pipeline: &config.DeploymentPipeline{ + Stages: []config.PipelineStage{ + {ID: "stage-id", Name: "stage-name"}, + }, + }, + }, + deployment: &model.Deployment{ + Stages: []*model.PipelineStage{ + { + Id: "stage-id", + Name: "stage-name", + Index: 0, + Status: model.StageStatus_STAGE_NOT_STARTED_YET, + }, + }, + }, + stageStatuses: map[string]model.StageStatus{}, + logger: logger, + nowFunc: time.Now, + } + + handler.Cancel() + finalStatus := s.executeStage(sig, s.deployment.Stages[0]) + assert.Equal(t, model.StageStatus_STAGE_FAILURE, finalStatus) +} diff --git a/pkg/app/pipedv1/executor/stopsignal.go b/pkg/app/pipedv1/controller/stopsignal.go similarity index 99% rename from pkg/app/pipedv1/executor/stopsignal.go rename to pkg/app/pipedv1/controller/stopsignal.go index 3949b3b109..b6298764cc 100644 --- a/pkg/app/pipedv1/executor/stopsignal.go +++ b/pkg/app/pipedv1/controller/stopsignal.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package executor +package controller import ( "context" diff --git a/pkg/app/pipedv1/executor/stopsignal_test.go b/pkg/app/pipedv1/controller/stopsignal_test.go similarity index 98% rename from pkg/app/pipedv1/executor/stopsignal_test.go rename to pkg/app/pipedv1/controller/stopsignal_test.go index 113869ceb4..93deb50283 100644 --- a/pkg/app/pipedv1/executor/stopsignal_test.go +++ b/pkg/app/pipedv1/controller/stopsignal_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package executor +package controller import ( "testing" diff --git a/pkg/app/pipedv1/eventwatcher/eventwatcher.go b/pkg/app/pipedv1/eventwatcher/eventwatcher.go index 46755ae528..98ecddd864 100644 --- a/pkg/app/pipedv1/eventwatcher/eventwatcher.go +++ b/pkg/app/pipedv1/eventwatcher/eventwatcher.go @@ -37,7 +37,7 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" "github.com/pipe-cd/pipecd/pkg/backoff" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/git" "github.com/pipe-cd/pipecd/pkg/model" "github.com/pipe-cd/pipecd/pkg/regexpool" @@ -257,7 +257,7 @@ func (w *watcher) run(ctx context.Context, repo git.Repo, repoCfg config.PipedRe } } - appCfg, err := config.LoadApplication(repo.GetPath(), app.GitPath.GetApplicationConfigFilePath(), app.Kind) + appCfg, err := config.LoadApplication(repo.GetPath(), app.GitPath.GetApplicationConfigFilePath()) if err != nil { w.logger.Error("failed to load application configuration", zap.Error(err)) continue diff --git a/pkg/app/pipedv1/executor/executor.go b/pkg/app/pipedv1/executor/executor.go deleted file mode 100644 index e843bdc520..0000000000 --- a/pkg/app/pipedv1/executor/executor.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2024 The PipeCD Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor - -import ( - "context" - - "go.uber.org/zap" - - "github.com/pipe-cd/pipecd/pkg/app/pipedv1/deploysource" - "github.com/pipe-cd/pipecd/pkg/app/pipedv1/metadatastore" - "github.com/pipe-cd/pipecd/pkg/cache" - "github.com/pipe-cd/pipecd/pkg/config" - "github.com/pipe-cd/pipecd/pkg/git" - "github.com/pipe-cd/pipecd/pkg/model" -) - -type Executor interface { - // Execute starts running executor until completion - // or the StopSignal has emitted. - Execute(sig StopSignal) model.StageStatus -} - -type Factory func(in Input) Executor - -type CommandLister interface { - ListCommands() []model.ReportableCommand -} - -type AnalysisResultStore interface { - GetLatestAnalysisResult(ctx context.Context) (*model.AnalysisResult, error) - PutLatestAnalysisResult(ctx context.Context, analysisResult *model.AnalysisResult) error -} - -type Notifier interface { - Notify(event model.NotificationEvent) -} - -type GitClient interface { - Clone(ctx context.Context, repoID, remote, branch, destination string) (git.Repo, error) -} - -type Input struct { - Stage *model.PipelineStage - StageConfig config.PipelineStage - // Readonly deployment model. - Deployment *model.Deployment - Application *model.Application - PipedConfig *config.PipedSpec - // Deploy source at target commit - TargetDSP deploysource.Provider - // Deploy source at running commit - RunningDSP deploysource.Provider - GitClient GitClient - CommandLister CommandLister - MetadataStore metadatastore.MetadataStore - AppManifestsCache cache.Cache - AnalysisResultStore AnalysisResultStore - Logger *zap.Logger - Notifier Notifier -} - -// DetermineStageStatus determines the final status of the stage based on the given stop signal. -// Normal is the case when the stop signal is StopSignalNone. -func DetermineStageStatus(sig StopSignalType, ori, got model.StageStatus) model.StageStatus { - switch sig { - case StopSignalNone: - return got - case StopSignalTerminate: - return ori - case StopSignalCancel: - return model.StageStatus_STAGE_CANCELLED - case StopSignalTimeout: - return model.StageStatus_STAGE_FAILURE - default: - return model.StageStatus_STAGE_FAILURE - } -} diff --git a/pkg/app/pipedv1/executor/executor_test.go b/pkg/app/pipedv1/executor/executor_test.go deleted file mode 100644 index 88e1142164..0000000000 --- a/pkg/app/pipedv1/executor/executor_test.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2024 The PipeCD Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/pipe-cd/pipecd/pkg/model" -) - -func TestDetermineStageStatus(t *testing.T) { - testcases := []struct { - name string - sig StopSignalType - ori model.StageStatus - got model.StageStatus - expected model.StageStatus - }{ - { - name: "No stop signal, should get got status", - sig: StopSignalNone, - ori: model.StageStatus_STAGE_RUNNING, - got: model.StageStatus_STAGE_SUCCESS, - expected: model.StageStatus_STAGE_SUCCESS, - }, { - name: "Terminated signal given, should get original status", - sig: StopSignalTerminate, - ori: model.StageStatus_STAGE_RUNNING, - got: model.StageStatus_STAGE_SKIPPED, - expected: model.StageStatus_STAGE_RUNNING, - }, { - name: "Timeout signal given, should get failed status", - sig: StopSignalTimeout, - ori: model.StageStatus_STAGE_RUNNING, - got: model.StageStatus_STAGE_RUNNING, - expected: model.StageStatus_STAGE_FAILURE, - }, { - name: "Cancel signal given, should get cancelled status", - sig: StopSignalCancel, - ori: model.StageStatus_STAGE_RUNNING, - got: model.StageStatus_STAGE_RUNNING, - expected: model.StageStatus_STAGE_CANCELLED, - }, { - name: "Unknown signal type given, should get failed status", - sig: StopSignalType("unknown"), - ori: model.StageStatus_STAGE_RUNNING, - got: model.StageStatus_STAGE_RUNNING, - expected: model.StageStatus_STAGE_FAILURE, - }, - } - - for _, tc := range testcases { - t.Run(tc.name, func(t *testing.T) { - got := DetermineStageStatus(tc.sig, tc.ori, tc.got) - assert.Equal(t, tc.expected, got) - }) - } -} diff --git a/pkg/app/pipedv1/executor/registry/registry.go b/pkg/app/pipedv1/executor/registry/registry.go deleted file mode 100644 index dd4c3afbf1..0000000000 --- a/pkg/app/pipedv1/executor/registry/registry.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2024 The PipeCD Authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package registry - -import ( - "fmt" - "sync" - - "github.com/pipe-cd/pipecd/pkg/app/pipedv1/executor" - "github.com/pipe-cd/pipecd/pkg/model" -) - -type Registry interface { - Executor(stage model.Stage, in executor.Input) (executor.Executor, bool) - RollbackExecutor(kind model.ApplicationKind, in executor.Input) (executor.Executor, bool) -} - -type registry struct { - factories map[model.Stage]executor.Factory - rollbackFactories map[model.RollbackKind]executor.Factory - mu sync.RWMutex -} - -func (r *registry) Register(stage model.Stage, f executor.Factory) error { - r.mu.Lock() - defer r.mu.Unlock() - - if _, ok := r.factories[stage]; ok { - return fmt.Errorf("executor for %s stage has already been registered", stage) - } - r.factories[stage] = f - return nil -} - -func (r *registry) RegisterRollback(kind model.RollbackKind, f executor.Factory) error { - r.mu.Lock() - defer r.mu.Unlock() - - if _, ok := r.rollbackFactories[kind]; ok { - return fmt.Errorf("rollback executor for %s application kind has already been registered", kind.String()) - } - r.rollbackFactories[kind] = f - return nil -} - -func (r *registry) Executor(stage model.Stage, in executor.Input) (executor.Executor, bool) { - r.mu.RLock() - defer r.mu.RUnlock() - - f, ok := r.factories[stage] - if !ok { - return nil, false - } - return f(in), true -} - -func (r *registry) RollbackExecutor(kind model.ApplicationKind, in executor.Input) (executor.Executor, bool) { - r.mu.RLock() - defer r.mu.RUnlock() - var rollbackKind model.RollbackKind - if in.Stage.Name == model.StageCustomSyncRollback.String() { - rollbackKind = model.RollbackKind_Rollback_CUSTOM_SYNC - } else { - rollbackKind = kind.ToRollbackKind() - } - - f, ok := r.rollbackFactories[rollbackKind] - if !ok { - return nil, false - } - return f(in), true -} - -var defaultRegistry = ®istry{ - factories: make(map[model.Stage]executor.Factory), - rollbackFactories: make(map[model.RollbackKind]executor.Factory), -} - -func DefaultRegistry() Registry { - return defaultRegistry -} diff --git a/pkg/app/pipedv1/notifier/notifier.go b/pkg/app/pipedv1/notifier/notifier.go index 488315aae3..9d4da780df 100644 --- a/pkg/app/pipedv1/notifier/notifier.go +++ b/pkg/app/pipedv1/notifier/notifier.go @@ -25,7 +25,7 @@ import ( "go.uber.org/zap" "golang.org/x/sync/errgroup" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/model" "github.com/pipe-cd/pipecd/pkg/version" ) @@ -57,31 +57,31 @@ func NewNotifier(cfg *config.PipedSpec, logger *zap.Logger) (*Notifier, error) { } handlers := make([]handler, 0, len(cfg.Notifications.Routes)) - for _, route := range cfg.Notifications.Routes { - receiver, ok := receivers[route.Receiver] - if !ok { - return nil, fmt.Errorf("missing receiver %s that is used in route %s", route.Receiver, route.Name) - } - - var sd sender - switch { - case receiver.Slack != nil: - slacksender, err := newSlackSender(receiver.Name, *receiver.Slack, cfg.WebAddress, logger) - if err != nil { - return nil, fmt.Errorf("failed to create slack sender: %w", err) - } - sd = slacksender - case receiver.Webhook != nil: - sd = newWebhookSender(receiver.Name, *receiver.Webhook, cfg.WebAddress, logger) - default: - continue - } - - handlers = append(handlers, handler{ - matcher: newMatcher(route), - sender: sd, - }) - } + // for _, route := range cfg.Notifications.Routes { + // // receiver, ok := receivers[route.Receiver] + // // if !ok { + // // return nil, fmt.Errorf("missing receiver %s that is used in route %s", route.Receiver, route.Name) + // // } + + // var sd sender + // switch { + // case receiver.Slack != nil: + // slacksender, err := newSlackSender(receiver.Name, *receiver.Slack, cfg.WebAddress, logger) + // if err != nil { + // return nil, fmt.Errorf("failed to create slack sender: %w", err) + // } + // sd = slacksender + // case receiver.Webhook != nil: + // sd = newWebhookSender(receiver.Name, *receiver.Webhook, cfg.WebAddress, logger) + // default: + // continue + // } + + // handlers = append(handlers, handler{ + // matcher: newMatcher(route), + // sender: sd, + // }) + // } return &Notifier{ config: cfg, diff --git a/pkg/app/pipedv1/plugin/kubernetes/config/application.go b/pkg/app/pipedv1/plugin/kubernetes/config/application.go index 359d31bc34..64ad7b2868 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/config/application.go +++ b/pkg/app/pipedv1/plugin/kubernetes/config/application.go @@ -35,6 +35,9 @@ type KubernetesApplicationSpec struct { // name: replication-controller-name Workloads []K8sResourceReference `json:"workloads"` + // The label will be configured to variant manifests used to distinguish them. + VariantLabel KubernetesVariantLabel `json:"variantLabel"` + // TODO: Define fields for KubernetesApplicationSpec. } @@ -48,9 +51,41 @@ type KubernetesDeploymentInput struct { // List of manifest files in the application directory used to deploy. // Empty means all manifest files in the directory will be used. Manifests []string `json:"manifests,omitempty"` + // Version of kubectl will be used. + KubectlVersion string `json:"kubectlVersion,omitempty"` // The namespace where manifests will be applied. Namespace string `json:"namespace,omitempty"` + // Automatically create a new namespace if it does not exist. + // Default is false. + AutoCreateNamespace bool `json:"autoCreateNamespace,omitempty"` + // TODO: Define fields for KubernetesDeploymentInput. } + +type KubernetesVariantLabel struct { + // The key of the label. + // Default is pipecd.dev/variant. + Key string `json:"key" default:"pipecd.dev/variant"` + // The label value for PRIMARY variant. + // Default is primary. + PrimaryValue string `json:"primaryValue" default:"primary"` + // The label value for CANARY variant. + // Default is canary. + CanaryValue string `json:"canaryValue" default:"canary"` + // The label value for BASELINE variant. + // Default is baseline. + BaselineValue string `json:"baselineValue" default:"baseline"` +} + +type KubernetesDeployTargetConfig struct { + // The master URL of the kubernetes cluster. + // Empty means in-cluster. + MasterURL string `json:"masterURL,omitempty"` + // The path to the kubeconfig file. + // Empty means in-cluster. + KubeConfigPath string `json:"kubeConfigPath,omitempty"` + // Version of kubectl will be used. + KubectlVersion string `json:"kubectlVersion"` +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go index 8487cfeb50..135f8eed73 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go +++ b/pkg/app/pipedv1/plugin/kubernetes/deployment/server.go @@ -15,15 +15,19 @@ package deployment import ( + "cmp" "context" + "encoding/json" "time" kubeconfig "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/config" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/provider" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/toolregistry" config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/model" "github.com/pipe-cd/pipecd/pkg/plugin/api/v1alpha1/deployment" - "github.com/pipe-cd/pipecd/pkg/regexpool" + "github.com/pipe-cd/pipecd/pkg/plugin/logpersister" + "github.com/pipe-cd/pipecd/pkg/plugin/signalhandler" "go.uber.org/zap" "google.golang.org/grpc" @@ -31,8 +35,18 @@ import ( "google.golang.org/grpc/status" ) +const ( + defaultKubectlVersion = "1.18.2" +) + +type toolClient interface { + InstallTool(ctx context.Context, name, version, script string) (string, error) +} + type toolRegistry interface { - InstallTool(ctx context.Context, name, version string) (path string, err error) + Kubectl(ctx context.Context, version string) (string, error) + Kustomize(ctx context.Context, version string) (string, error) + Helm(ctx context.Context, version string) (string, error) } type loader interface { @@ -51,23 +65,36 @@ type applier interface { ForceReplaceManifest(ctx context.Context, manifest provider.Manifest) error } +type logPersister interface { + StageLogPersister(deploymentID, stageID string) logpersister.StageLogPersister +} + type DeploymentService struct { deployment.UnimplementedDeploymentServiceServer - RegexPool *regexpool.Pool - Logger *zap.Logger - ToolRegistry toolRegistry - Loader loader + // this field is set with the plugin configuration + // the plugin configuration is sent from piped while initializing the plugin + pluginConfig *config.PipedPlugin + + logger *zap.Logger + toolRegistry toolRegistry + loader loader + logPersister logPersister } // NewDeploymentService creates a new planService. func NewDeploymentService( logger *zap.Logger, + toolClient toolClient, + logPersister logPersister, ) *DeploymentService { + toolRegistry := toolregistry.NewRegistry(toolClient) + return &DeploymentService{ - RegexPool: regexpool.DefaultPool(), - Logger: logger.Named("planner"), - ToolRegistry: nil, // TODO: set the tool registry + logger: logger.Named("planner"), + toolRegistry: toolRegistry, + loader: provider.NewLoader(toolRegistry), + logPersister: logPersister, } } @@ -95,7 +122,7 @@ func (a *DeploymentService) DetermineStrategy(ctx context.Context, request *depl return nil, status.Error(codes.Internal, err.Error()) } - strategy, summary := determineStrategy(runnings, targets, cfg.Spec.Workloads, a.Logger) + strategy, summary := determineStrategy(runnings, targets, cfg.Spec.Workloads, a.logger) return &deployment.DetermineStrategyResponse{ SyncStrategy: strategy, @@ -158,7 +185,10 @@ func (a *DeploymentService) FetchDefinedStages(context.Context, *deployment.Fetc } func (a *DeploymentService) loadManifests(ctx context.Context, deploy *model.Deployment, spec *kubeconfig.KubernetesApplicationSpec, deploymentSource *deployment.DeploymentSource) ([]provider.Manifest, error) { - manifests, err := a.Loader.LoadManifests(ctx, provider.LoaderInput{ + manifests, err := a.loader.LoadManifests(ctx, provider.LoaderInput{ + PipedID: deploy.GetPipedId(), + AppID: deploy.GetApplicationId(), + CommitHash: deploy.GetTrigger().GetCommit().GetHash(), AppName: deploy.GetApplicationName(), AppDir: deploymentSource.GetApplicationDirectory(), ConfigFilename: deploymentSource.GetApplicationConfigFilename(), @@ -175,3 +205,172 @@ func (a *DeploymentService) loadManifests(ctx context.Context, deploy *model.Dep return manifests, nil } + +// ExecuteStage performs stage-defined tasks. +// It returns stage status after execution without error. +// Error only be raised if the given stage is not supported. +func (a *DeploymentService) ExecuteStage(ctx context.Context, request *deployment.ExecuteStageRequest) (response *deployment.ExecuteStageResponse, _ error) { + lp := a.logPersister.StageLogPersister(request.GetInput().GetDeployment().GetId(), request.GetInput().GetStage().GetId()) + defer func() { + // When termination signal received and the stage is not completed yet, we should not mark the log persister as completed. + // This can occur when the piped is shutting down while the stage is still running. + if !response.GetStatus().IsCompleted() && signalhandler.Terminated() { + return + } + lp.Complete(time.Minute) + }() + + switch request.GetInput().GetStage().GetName() { + case StageK8sSync.String(): + return &deployment.ExecuteStageResponse{ + Status: a.executeK8sSyncStage(ctx, lp, request.GetInput()), + }, nil + case StageK8sRollback.String(): + return &deployment.ExecuteStageResponse{ + Status: a.executeK8sRollbackStage(ctx, lp, request.GetInput()), + }, nil + default: + return nil, status.Error(codes.InvalidArgument, "unimplemented or unsupported stage") + } +} + +func (a *DeploymentService) executeK8sSyncStage(ctx context.Context, lp logpersister.StageLogPersister, input *deployment.ExecutePluginInput) model.StageStatus { + lp.Infof("Start syncing the deployment") + + cfg, err := config.DecodeYAML[*kubeconfig.KubernetesApplicationSpec](input.GetTargetDeploymentSource().GetApplicationConfig()) + if err != nil { + lp.Errorf("Failed while decoding application config (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + lp.Infof("Loading manifests at commit %s for handling", input.GetDeployment().GetTrigger().GetCommit().GetHash()) + manifests, err := a.loadManifests(ctx, input.GetDeployment(), cfg.Spec, input.GetTargetDeploymentSource()) + if err != nil { + lp.Errorf("Failed while loading manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + lp.Successf("Successfully loaded %d manifests", len(manifests)) + + // Because the loaded manifests are read-only + // we duplicate them to avoid updating the shared manifests data in cache. + // TODO: implement duplicateManifests function + + // When addVariantLabelToSelector is true, ensure that all workloads + // have the variant label in their selector. + var ( + variantLabel = cfg.Spec.VariantLabel.Key + primaryVariant = cfg.Spec.VariantLabel.PrimaryValue + ) + // TODO: handle cfg.Spec.QuickSync.AddVariantLabelToSelector + + // Add variant annotations to all manifests. + for i := range manifests { + manifests[i].AddAnnotations(map[string]string{ + variantLabel: primaryVariant, + }) + } + + // TODO: implement annotateConfigHash to ensure restart of workloads when config changes + + // Get the deploy target config. + var deployTargetConfig kubeconfig.KubernetesDeployTargetConfig + deployTarget := a.pluginConfig.FindDeployTarget(input.GetDeployment().GetDeployTargets()[0]) // TODO: check if there is a deploy target + if err := json.Unmarshal(deployTarget.Config, &deployTargetConfig); err != nil { // TODO: do not unmarshal the config here, but in the initialization of the plugin + lp.Errorf("Failed while unmarshalling deploy target config (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Get the kubectl tool path. + kubectlPath, err := a.toolRegistry.Kubectl(ctx, cmp.Or(cfg.Spec.Input.KubectlVersion, deployTargetConfig.KubectlVersion, defaultKubectlVersion)) + if err != nil { + lp.Errorf("Failed while getting kubectl tool (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Create the applier for the target cluster. + applier := provider.NewApplier(provider.NewKubectl(kubectlPath), cfg.Spec.Input, deployTargetConfig, a.logger) + + // Start applying all manifests to add or update running resources. + if err := applyManifests(ctx, applier, manifests, cfg.Spec.Input.Namespace, lp); err != nil { + lp.Errorf("Failed while applying manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // TODO: implement prune resources + + return model.StageStatus_STAGE_SUCCESS +} + +func (a *DeploymentService) executeK8sRollbackStage(ctx context.Context, lp logpersister.StageLogPersister, input *deployment.ExecutePluginInput) model.StageStatus { + if input.GetDeployment().GetRunningCommitHash() == "" { + lp.Errorf("Unable to determine the last deployed commit to rollback. It seems this is the first deployment.") + return model.StageStatus_STAGE_FAILURE + } + + lp.Info("Start rolling back the deployment") + + cfg, err := config.DecodeYAML[*kubeconfig.KubernetesApplicationSpec](input.GetRunningDeploymentSource().GetApplicationConfig()) + if err != nil { + lp.Errorf("Failed while decoding application config (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + lp.Infof("Loading manifests at commit %s for handling", input.GetDeployment().GetRunningCommitHash()) + manifests, err := a.loadManifests(ctx, input.GetDeployment(), cfg.Spec, input.GetRunningDeploymentSource()) + if err != nil { + lp.Errorf("Failed while loading manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + lp.Successf("Successfully loaded %d manifests", len(manifests)) + + // Because the loaded manifests are read-only + // we duplicate them to avoid updating the shared manifests data in cache. + // TODO: implement duplicateManifests function + + // When addVariantLabelToSelector is true, ensure that all workloads + // have the variant label in their selector. + var ( + variantLabel = cfg.Spec.VariantLabel.Key + primaryVariant = cfg.Spec.VariantLabel.PrimaryValue + ) + // TODO: handle cfg.Spec.QuickSync.AddVariantLabelToSelector + + // Add variant annotations to all manifests. + for i := range manifests { + manifests[i].AddAnnotations(map[string]string{ + variantLabel: primaryVariant, + }) + } + + // TODO: implement annotateConfigHash to ensure restart of workloads when config changes + + // Get the deploy target config. + var deployTargetConfig kubeconfig.KubernetesDeployTargetConfig + deployTarget := a.pluginConfig.FindDeployTarget(input.GetDeployment().GetDeployTargets()[0]) // TODO: check if there is a deploy target + if err := json.Unmarshal(deployTarget.Config, &deployTargetConfig); err != nil { // TODO: do not unmarshal the config here, but in the initialization of the plugin + lp.Errorf("Failed while unmarshalling deploy target config (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Get the kubectl tool path. + kubectlPath, err := a.toolRegistry.Kubectl(ctx, cmp.Or(cfg.Spec.Input.KubectlVersion, deployTargetConfig.KubectlVersion, defaultKubectlVersion)) + if err != nil { + lp.Errorf("Failed while getting kubectl tool (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // Create the applier for the target cluster. + applier := provider.NewApplier(provider.NewKubectl(kubectlPath), cfg.Spec.Input, deployTargetConfig, a.logger) + + // Start applying all manifests to add or update running resources. + if err := applyManifests(ctx, applier, manifests, cfg.Spec.Input.Namespace, lp); err != nil { + lp.Errorf("Failed while applying manifests (%v)", err) + return model.StageStatus_STAGE_FAILURE + } + + // TODO: implement prune resources + // TODO: delete all resources of CANARY variant + // TODO: delete all resources of BASELINE variant + + return model.StageStatus_STAGE_SUCCESS +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/applier.go b/pkg/app/pipedv1/plugin/kubernetes/provider/applier.go new file mode 100644 index 0000000000..f7b168ce77 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/applier.go @@ -0,0 +1,166 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provider + +import ( + "context" + "errors" + + "go.uber.org/zap" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/config" +) + +type kubectl interface { + Apply(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + Create(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + Replace(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + ForceReplace(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + Delete(ctx context.Context, kubeconfig, namespace string, key ResourceKey) error + Get(ctx context.Context, kubeconfig, namespace string, key ResourceKey) (Manifest, error) + CreateNamespace(ctx context.Context, kubeconfig, namespace string) error +} + +type Applier struct { + kubectl kubectl + + input config.KubernetesDeploymentInput + deployTarget config.KubernetesDeployTargetConfig + logger *zap.Logger +} + +func NewApplier(kubectl kubectl, input config.KubernetesDeploymentInput, cp config.KubernetesDeployTargetConfig, logger *zap.Logger) *Applier { + return &Applier{ + kubectl: kubectl, + input: input, + deployTarget: cp, + logger: logger.Named("kubernetes-applier"), + } +} + +// ApplyManifest does applying the given manifest. +func (a *Applier) ApplyManifest(ctx context.Context, manifest Manifest) error { + if a.input.AutoCreateNamespace { + err := a.kubectl.CreateNamespace( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + ) + if err != nil && !errors.Is(err, errResourceAlreadyExists) { + return err + } + } + + return a.kubectl.Apply( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + manifest, + ) +} + +// CreateManifest uses kubectl to create the given manifests. +func (a *Applier) CreateManifest(ctx context.Context, manifest Manifest) error { + if a.input.AutoCreateNamespace { + err := a.kubectl.CreateNamespace( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + ) + if err != nil && !errors.Is(err, errResourceAlreadyExists) { + return err + } + } + + return a.kubectl.Create( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + manifest, + ) +} + +// ReplaceManifest uses kubectl to replace the given manifests. +func (a *Applier) ReplaceManifest(ctx context.Context, manifest Manifest) error { + err := a.kubectl.Replace( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + manifest, + ) + if err == nil { + return nil + } + + if errors.Is(err, errorReplaceNotFound) { + return ErrNotFound + } + + return err +} + +// ForceReplaceManifest uses kubectl to forcefully replace the given manifests. +func (a *Applier) ForceReplaceManifest(ctx context.Context, manifest Manifest) error { + err := a.kubectl.ForceReplace( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(manifest.Key), + manifest, + ) + if err == nil { + return nil + } + + if errors.Is(err, errorReplaceNotFound) { + return ErrNotFound + } + + return err +} + +// Delete deletes the given resource from Kubernetes cluster. +// If the resource key is different, this returns ErrNotFound. +func (a *Applier) Delete(ctx context.Context, k ResourceKey) (err error) { + m, err := a.kubectl.Get( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(k), + k, + ) + + if err != nil { + return err + } + + if k.String() != m.Body.GetAnnotations()[LabelResourceKey] { + return ErrNotFound + } + + return a.kubectl.Delete( + ctx, + a.deployTarget.KubeConfigPath, + a.getNamespaceToRun(k), + k, + ) +} + +// getNamespaceToRun returns namespace used on kubectl apply/delete commands. +// priority: config.KubernetesDeploymentInput > kubernetes.ResourceKey +func (a *Applier) getNamespaceToRun(k ResourceKey) string { + if a.input.Namespace != "" { + return a.input.Namespace + } + return k.Namespace +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/applier_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/applier_test.go new file mode 100644 index 0000000000..3b414d2c02 --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/applier_test.go @@ -0,0 +1,593 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provider + +import ( + "context" + "errors" + "testing" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/config" + "go.uber.org/zap" +) + +type mockKubectl struct { + ApplyFunc func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + CreateFunc func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + ReplaceFunc func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + ForceReplaceFunc func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error + DeleteFunc func(ctx context.Context, kubeconfig, namespace string, key ResourceKey) error + GetFunc func(ctx context.Context, kubeconfig, namespace string, key ResourceKey) (Manifest, error) + CreateNamespaceFunc func(ctx context.Context, kubeconfig, namespace string) error +} + +var ( + errUnexpectedCall = errors.New("unexpected call") +) + +func (m *mockKubectl) Apply(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + if m.ApplyFunc != nil { + return m.ApplyFunc(ctx, kubeconfig, namespace, manifest) + } + return errUnexpectedCall +} + +func (m *mockKubectl) Create(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + if m.CreateFunc != nil { + return m.CreateFunc(ctx, kubeconfig, namespace, manifest) + } + return errUnexpectedCall +} + +func (m *mockKubectl) Replace(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + if m.ReplaceFunc != nil { + return m.ReplaceFunc(ctx, kubeconfig, namespace, manifest) + } + return errUnexpectedCall +} + +func (m *mockKubectl) ForceReplace(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + if m.ForceReplaceFunc != nil { + return m.ForceReplaceFunc(ctx, kubeconfig, namespace, manifest) + } + return errUnexpectedCall +} + +func (m *mockKubectl) Delete(ctx context.Context, kubeconfig, namespace string, key ResourceKey) error { + if m.DeleteFunc != nil { + return m.DeleteFunc(ctx, kubeconfig, namespace, key) + } + return errUnexpectedCall +} + +func (m *mockKubectl) Get(ctx context.Context, kubeconfig, namespace string, key ResourceKey) (Manifest, error) { + if m.GetFunc != nil { + return m.GetFunc(ctx, kubeconfig, namespace, key) + } + return Manifest{}, errUnexpectedCall +} + +func (m *mockKubectl) CreateNamespace(ctx context.Context, kubeconfig, namespace string) error { + if m.CreateNamespaceFunc != nil { + return m.CreateNamespaceFunc(ctx, kubeconfig, namespace) + } + return errUnexpectedCall +} + +func TestApplier_ApplyManifest(t *testing.T) { + t.Parallel() + + var ( + errNamespaceCreation = errors.New("namespace creation error") + errApply = errors.New("apply error") + ) + + testCases := []struct { + name string + autoCreateNamespace bool + createNamespaceErr error + applyErr error + expectedErr error + }{ + { + name: "successful apply without namespace creation", + autoCreateNamespace: false, + expectedErr: nil, + }, + { + name: "successful apply with namespace creation", + autoCreateNamespace: true, + expectedErr: nil, + }, + { + name: "namespace creation error", + autoCreateNamespace: true, + createNamespaceErr: errNamespaceCreation, + expectedErr: errNamespaceCreation, + }, + { + name: "apply error", + autoCreateNamespace: false, + applyErr: errApply, + expectedErr: errApply, + }, + { + name: "successful apply with existing namespace", + autoCreateNamespace: true, + createNamespaceErr: errResourceAlreadyExists, + expectedErr: nil, + }, + { + name: "apply error after successful namespace creation", + autoCreateNamespace: true, + applyErr: errApply, + expectedErr: errApply, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + mockKubectl := &mockKubectl{ + CreateNamespaceFunc: func(ctx context.Context, kubeconfig, namespace string) error { + return tc.createNamespaceErr + }, + ApplyFunc: func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + return tc.applyErr + }, + } + + applier := NewApplier( + mockKubectl, + config.KubernetesDeploymentInput{ + AutoCreateNamespace: tc.autoCreateNamespace, + }, + config.KubernetesDeployTargetConfig{ + KubeConfigPath: "test-kubeconfig", + }, + zap.NewNop(), + ) + + manifest := Manifest{ + Key: ResourceKey{ + Namespace: "test-namespace", + }, + } + + err := applier.ApplyManifest(context.Background(), manifest) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("expected error %v, got %v", tc.expectedErr, err) + } + }) + } +} + +func TestApplier_CreateManifest(t *testing.T) { + t.Parallel() + + var ( + errNamespaceCreation = errors.New("namespace creation error") + errCreate = errors.New("create error") + ) + + testCases := []struct { + name string + autoCreateNamespace bool + createNamespaceErr error + createErr error + expectedErr error + }{ + { + name: "successful create without namespace creation", + autoCreateNamespace: false, + expectedErr: nil, + }, + { + name: "successful create with namespace creation", + autoCreateNamespace: true, + expectedErr: nil, + }, + { + name: "namespace creation error", + autoCreateNamespace: true, + createNamespaceErr: errNamespaceCreation, + expectedErr: errNamespaceCreation, + }, + { + name: "create error", + autoCreateNamespace: false, + createErr: errCreate, + expectedErr: errCreate, + }, + { + name: "successful create with existing namespace", + autoCreateNamespace: true, + createNamespaceErr: errResourceAlreadyExists, + expectedErr: nil, + }, + { + name: "create error after successful namespace creation", + autoCreateNamespace: true, + createErr: errCreate, + expectedErr: errCreate, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + mockKubectl := &mockKubectl{ + CreateNamespaceFunc: func(ctx context.Context, kubeconfig, namespace string) error { + return tc.createNamespaceErr + }, + CreateFunc: func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + return tc.createErr + }, + } + + applier := NewApplier( + mockKubectl, + config.KubernetesDeploymentInput{ + AutoCreateNamespace: tc.autoCreateNamespace, + }, + config.KubernetesDeployTargetConfig{ + KubeConfigPath: "test-kubeconfig", + }, + zap.NewNop(), + ) + + manifest := Manifest{ + Key: ResourceKey{ + Namespace: "test-namespace", + }, + } + + err := applier.CreateManifest(context.Background(), manifest) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("expected error %v, got %v", tc.expectedErr, err) + } + }) + } +} + +func TestApplier_ReplaceManifest(t *testing.T) { + t.Parallel() + + var ( + errReplace = errors.New("replace error") + ) + + testCases := []struct { + name string + replaceErr error + expectedErr error + }{ + { + name: "successful replace", + expectedErr: nil, + }, + { + name: "replace error", + replaceErr: errReplace, + expectedErr: errReplace, + }, + { + name: "replace not found error", + replaceErr: errorReplaceNotFound, + expectedErr: ErrNotFound, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + mockKubectl := &mockKubectl{ + ReplaceFunc: func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + return tc.replaceErr + }, + } + + applier := NewApplier( + mockKubectl, + config.KubernetesDeploymentInput{}, + config.KubernetesDeployTargetConfig{ + KubeConfigPath: "test-kubeconfig", + }, + zap.NewNop(), + ) + + manifest := Manifest{ + Key: ResourceKey{ + Namespace: "test-namespace", + }, + } + + err := applier.ReplaceManifest(context.Background(), manifest) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("expected error %v, got %v", tc.expectedErr, err) + } + }) + } +} + +func TestApplier_ForceReplaceManifest(t *testing.T) { + t.Parallel() + + var ( + errForceReplace = errors.New("force replace error") + ) + + testCases := []struct { + name string + forceReplaceErr error + expectedErr error + }{ + { + name: "successful force replace", + expectedErr: nil, + }, + { + name: "force replace error", + forceReplaceErr: errForceReplace, + expectedErr: errForceReplace, + }, + { + name: "force replace not found error", + forceReplaceErr: errorReplaceNotFound, + expectedErr: ErrNotFound, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + mockKubectl := &mockKubectl{ + ForceReplaceFunc: func(ctx context.Context, kubeconfig, namespace string, manifest Manifest) error { + return tc.forceReplaceErr + }, + } + + applier := NewApplier( + mockKubectl, + config.KubernetesDeploymentInput{}, + config.KubernetesDeployTargetConfig{ + KubeConfigPath: "test-kubeconfig", + }, + zap.NewNop(), + ) + + manifest := Manifest{ + Key: ResourceKey{ + Namespace: "test-namespace", + }, + } + + err := applier.ForceReplaceManifest(context.Background(), manifest) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("expected error %v, got %v", tc.expectedErr, err) + } + }) + } +} + +func TestApplier_Delete(t *testing.T) { + t.Parallel() + + var ( + errGet = errors.New("get error") + errDelete = errors.New("delete error") + ) + + testCases := []struct { + name string + getErr error + deleteErr error + manifest string + resourceKey ResourceKey + expectedErr error + }{ + { + name: "successful delete", + manifest: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config + annotations: + pipecd.dev/resource-key: "v1:ConfigMap::test-config" +`, + resourceKey: ResourceKey{ + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "", + Name: "test-config", + }, + expectedErr: nil, + }, + { + name: "get error", + getErr: errGet, + manifest: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config + annotations: + pipecd.dev/resource-key: "v1:ConfigMap::test-config" +`, + resourceKey: ResourceKey{ + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "", + Name: "test-config", + }, + expectedErr: errGet, + }, + { + name: "delete error", + deleteErr: errDelete, + manifest: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config + annotations: + pipecd.dev/resource-key: "v1:ConfigMap::test-config" +`, + resourceKey: ResourceKey{ + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "", + Name: "test-config", + }, + expectedErr: errDelete, + }, + { + name: "resource key mismatch", + manifest: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config + annotations: + pipecd.dev/resource-key: "v1:ConfigMap::test-config" +`, + resourceKey: ResourceKey{ + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "", + Name: "another-config", + }, + expectedErr: ErrNotFound, + }, + { + name: "successful delete with namespace", + manifest: ` +apiVersion: v1 +kind: ConfigMap +metadata: + name: test-config + namespace: test-namespace + annotations: + pipecd.dev/resource-key: "v1:ConfigMap:test-namespace:test-config" +`, + resourceKey: ResourceKey{ + APIVersion: "v1", + Kind: "ConfigMap", + Namespace: "test-namespace", + Name: "test-config", + }, + expectedErr: nil, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + manifests := mustParseManifests(t, tc.manifest) + manifest := manifests[0] + + mockKubectl := &mockKubectl{ + GetFunc: func(ctx context.Context, kubeconfig, namespace string, key ResourceKey) (Manifest, error) { + if tc.getErr != nil { + return Manifest{}, tc.getErr + } + return manifest, nil + }, + DeleteFunc: func(ctx context.Context, kubeconfig, namespace string, key ResourceKey) error { + return tc.deleteErr + }, + } + + applier := NewApplier( + mockKubectl, + config.KubernetesDeploymentInput{}, + config.KubernetesDeployTargetConfig{ + KubeConfigPath: "test-kubeconfig", + }, + zap.NewNop(), + ) + + err := applier.Delete(context.Background(), tc.resourceKey) + if !errors.Is(err, tc.expectedErr) { + t.Errorf("expected error %v, got %v", tc.expectedErr, err) + } + }) + } +} + +func TestApplier_getNamespaceToRun(t *testing.T) { + t.Parallel() + + testCases := []struct { + name string + inputNamespace string + resourceKey ResourceKey + expected string + }{ + { + name: "input namespace is used", + inputNamespace: "input-namespace", + resourceKey: ResourceKey{ + Namespace: "resource-namespace", + }, + expected: "input-namespace", + }, + { + name: "resource key namespace is used", + inputNamespace: "", + resourceKey: ResourceKey{ + Namespace: "resource-namespace", + }, + expected: "resource-namespace", + }, + { + name: "both namespaces are empty", + inputNamespace: "", + resourceKey: ResourceKey{ + Namespace: "", + }, + expected: "", + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + applier := &Applier{ + input: config.KubernetesDeploymentInput{ + Namespace: tc.inputNamespace, + }, + } + + result := applier.getNamespaceToRun(tc.resourceKey) + if result != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, result) + } + }) + } +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go new file mode 100644 index 0000000000..66c195b06e --- /dev/null +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/kubectl.go @@ -0,0 +1,253 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package provider + +import ( + "bytes" + "context" + "errors" + "fmt" + "os/exec" + "strings" +) + +var ( + errorReplaceNotFound = errors.New("specified resource is not found") + errorNotFoundLiteral = "Error from server (NotFound)" + errResourceAlreadyExists = errors.New("resource already exists") + errAlreadyExistsLiteral = "Error from server (AlreadyExists)" +) + +// Kubectl is a wrapper for kubectl command. +type Kubectl struct { + execPath string +} + +// NewKubectl creates a new Kubectl instance. +func NewKubectl(path string) *Kubectl { + return &Kubectl{ + execPath: path, + } +} + +// Apply runs kubectl apply command with the given manifest. +func (c *Kubectl) Apply(ctx context.Context, kubeconfig, namespace string, manifest Manifest) (err error) { + // TODO: record the metrics for the kubectl apply command. + + data, err := manifest.YamlBytes() + if err != nil { + return err + } + + args := make([]string, 0, 8) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + + args = append(args, "apply") + if annotation := manifest.Body.GetAnnotations()[LabelServerSideApply]; annotation == UseServerSideApply { + args = append(args, "--server-side") + } + args = append(args, "-f", "-") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + r := bytes.NewReader(data) + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to apply: %s (%w)", string(out), err) + } + return nil +} + +// Create runs kubectl create command with the given manifest. +func (c *Kubectl) Create(ctx context.Context, kubeconfig, namespace string, manifest Manifest) (err error) { + // TODO: record the metrics for the kubectl create command. + + data, err := manifest.YamlBytes() + if err != nil { + return err + } + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "create", "-f", "-") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + r := bytes.NewReader(data) + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err != nil { + return fmt.Errorf("failed to create: %s (%w)", string(out), err) + } + return nil +} + +// Replace runs kubectl replace command with the given manifest. +func (c *Kubectl) Replace(ctx context.Context, kubeconfig, namespace string, manifest Manifest) (err error) { + // TODO: record the metrics for the kubectl replace command. + + data, err := manifest.YamlBytes() + if err != nil { + return err + } + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "replace", "-f", "-") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + r := bytes.NewReader(data) + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err == nil { + return nil + } + + if strings.Contains(string(out), errorNotFoundLiteral) { + return errorReplaceNotFound + } + + return fmt.Errorf("failed to replace: %s (%w)", string(out), err) +} + +// ForceReplace runs kubectl replace --force command with the given manifest. +func (c *Kubectl) ForceReplace(ctx context.Context, kubeconfig, namespace string, manifest Manifest) (err error) { + // TODO: record the metrics for the kubectl replace --force command. + + data, err := manifest.YamlBytes() + if err != nil { + return err + } + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "replace", "--force", "-f", "-") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + r := bytes.NewReader(data) + cmd.Stdin = r + + out, err := cmd.CombinedOutput() + if err == nil { + return nil + } + + if strings.Contains(string(out), errorNotFoundLiteral) { + return errorReplaceNotFound + } + + return fmt.Errorf("failed to replace: %s (%w)", string(out), err) +} + +// Delete runs kubectl delete command with the given resource key. +func (c *Kubectl) Delete(ctx context.Context, kubeconfig, namespace string, r ResourceKey) (err error) { + // TODO: record the metrics for the kubectl delete command. + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "delete", r.Kind, r.Name) + + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + + if strings.Contains(string(out), "(NotFound)") { + return fmt.Errorf("failed to delete: %s, (%w), %v", string(out), ErrNotFound, err) + } + if err != nil { + return fmt.Errorf("failed to delete: %s, %v", string(out), err) + } + return nil +} + +// Get runs kubectl get with the gibven resource key. +func (c *Kubectl) Get(ctx context.Context, kubeconfig, namespace string, r ResourceKey) (m Manifest, err error) { + // TODO: record the metrics for the kubectl get command. + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + if namespace != "" { + args = append(args, "--namespace", namespace) + } + args = append(args, "get", r.Kind, r.Name, "-o", "yaml") + + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + + if strings.Contains(string(out), "(NotFound)") { + return Manifest{}, fmt.Errorf("not found manifest %v, (%w), %v", r, ErrNotFound, err) + } + if err != nil { + return Manifest{}, fmt.Errorf("failed to get: %s, %v", string(out), err) + } + ms, err := ParseManifests(string(out)) + if err != nil { + return Manifest{}, fmt.Errorf("failed to parse manifests %v: %v", r, err) + } + if len(ms) == 0 { + return Manifest{}, fmt.Errorf("not found manifest %v, (%w)", r, ErrNotFound) + } + return ms[0], nil +} + +// CreateNamespace runs kubectl create namespace with the given namespace. +func (c *Kubectl) CreateNamespace(ctx context.Context, kubeconfig, namespace string) (err error) { + // TODO: record the metrics for the kubectl create namespace command. + + args := make([]string, 0, 7) + if kubeconfig != "" { + args = append(args, "--kubeconfig", kubeconfig) + } + args = append(args, "create", "namespace", namespace) + + cmd := exec.CommandContext(ctx, c.execPath, args...) + out, err := cmd.CombinedOutput() + + if strings.Contains(string(out), errAlreadyExistsLiteral) { + return errResourceAlreadyExists + } + if err != nil { + return fmt.Errorf("failed to create namespace: %s, %v", string(out), err) + } + return nil +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/kubernetes.go b/pkg/app/pipedv1/plugin/kubernetes/provider/kubernetes.go index 325af6d7b8..11402b398d 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/kubernetes.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/kubernetes.go @@ -21,9 +21,22 @@ var ( ) const ( - LabelSyncReplace = "pipecd.dev/sync-by-replace" // Use replace instead of apply. - LabelForceSyncReplace = "pipecd.dev/force-sync-by-replace" // Use replace --force instead of apply. - AnnotationOrder = "pipecd.dev/order" // The order number of resource used to sort them before using. + // labels + LabelManagedBy = "pipecd.dev/managed-by" // Always be piped. + LabelPiped = "pipecd.dev/piped" // The id of piped handling this application. + LabelApplication = "pipecd.dev/application" // The application this resource belongs to. + LabelSyncReplace = "pipecd.dev/sync-by-replace" // Use replace instead of apply. + LabelForceSyncReplace = "pipecd.dev/force-sync-by-replace" // Use replace --force instead of apply. + LabelServerSideApply = "pipecd.dev/server-side-apply" // Use server side apply instead of client side apply. + LabelCommitHash = "pipecd.dev/commit-hash" // Hash value of the deployed commit. + LabelResourceKey = "pipecd.dev/resource-key" // The resource key generated by apiVersion, namespace and name. e.g. apps/v1/Deployment/namespace/demo-app + LabelOriginalAPIVersion = "pipecd.dev/original-api-version" // The api version defined in git configuration. e.g. apps/v1 - UseReplaceEnabled = "enabled" + // annotations + AnnotationOrder = "pipecd.dev/order" // The order number of resource used to sort them before using. + + // label/annotation values + ManagedByPiped = "piped" + UseReplaceEnabled = "enabled" + UseServerSideApply = "true" ) diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go index 2dcf8f0e87..c62f9ac2e7 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/loader.go @@ -42,6 +42,12 @@ const ( ) type LoaderInput struct { + // for annotations to manage the application live state. + PipedID string + CommitHash string + AppID string + + // for templating manifests AppName string AppDir string ConfigFilename string @@ -71,11 +77,30 @@ type ToolRegistry interface { Helm(ctx context.Context, version string) (string, error) } +func NewLoader(registry ToolRegistry) *Loader { + return &Loader{ + toolRegistry: registry, + } +} + func (l *Loader) LoadManifests(ctx context.Context, input LoaderInput) (manifests []Manifest, err error) { defer func() { // Override namespace if set because ParseManifests does not parse it // if namespace is not explicitly specified in the manifests. setNamespace(manifests, input.Namespace) + + // Add builtin annotations for tracking application live state. + for i := range manifests { + manifests[i].AddAnnotations(map[string]string{ + LabelManagedBy: ManagedByPiped, + LabelPiped: input.PipedID, + LabelApplication: input.AppID, + LabelOriginalAPIVersion: manifests[i].Key.APIVersion, + LabelResourceKey: manifests[i].Key.String(), + LabelCommitHash: input.CommitHash, + }) + } + sortManifests(manifests) }() diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/loader_test.go b/pkg/app/pipedv1/plugin/kubernetes/provider/loader_test.go index 86e40b1842..15f61719f7 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/loader_test.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/loader_test.go @@ -31,6 +31,15 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/toolregistry/toolregistrytest" ) +func mustParseManifests(t *testing.T, data string) []Manifest { + t.Helper() + + manifests, err := ParseManifests(data) + require.NoError(t, err) + + return manifests +} + func TestParseManifests(t *testing.T) { tests := []struct { name string diff --git a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go index 93b3cc7b9e..f082bc4a22 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go +++ b/pkg/app/pipedv1/plugin/kubernetes/provider/manifest.go @@ -14,11 +14,14 @@ package provider -import "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "sigs.k8s.io/yaml" +) // Manifest represents a Kubernetes resource manifest. type Manifest struct { - Key ResourceKey + Key ResourceKey Body *unstructured.Unstructured } @@ -27,3 +30,23 @@ func (m *Manifest) UnmarshalJSON(data []byte) error { m.Body = new(unstructured.Unstructured) return m.Body.UnmarshalJSON(data) } + +func (m *Manifest) YamlBytes() ([]byte, error) { + return yaml.Marshal(m.Body) +} + +func (m Manifest) AddAnnotations(annotations map[string]string) { + if len(annotations) == 0 { + return + } + + annos := m.Body.GetAnnotations() + if annos == nil { + m.Body.SetAnnotations(annotations) + return + } + for k, v := range annotations { + annos[k] = v + } + m.Body.SetAnnotations(annos) +} diff --git a/pkg/app/pipedv1/plugin/kubernetes/server.go b/pkg/app/pipedv1/plugin/kubernetes/server.go index 76103cb955..6d963493d8 100644 --- a/pkg/app/pipedv1/plugin/kubernetes/server.go +++ b/pkg/app/pipedv1/plugin/kubernetes/server.go @@ -16,10 +16,15 @@ package main import ( "context" + "net" + "strconv" "time" "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/kubernetes/deployment" + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/plugin/toolregistry" "github.com/pipe-cd/pipecd/pkg/cli" + "github.com/pipe-cd/pipecd/pkg/plugin/logpersister" + "github.com/pipe-cd/pipecd/pkg/plugin/pipedapi" "github.com/pipe-cd/pipecd/pkg/rpc" "github.com/spf13/cobra" "go.uber.org/zap" @@ -27,11 +32,12 @@ import ( ) type server struct { - apiPort int - gracePeriod time.Duration - tls bool - certFile string - keyFile string + apiPort int + pipedPluginServicePort int + gracePeriod time.Duration + tls bool + certFile string + keyFile string enableGRPCReflection bool } @@ -49,6 +55,8 @@ func NewServerCommand() *cobra.Command { } cmd.Flags().IntVar(&s.apiPort, "api-port", s.apiPort, "The port number used to run a grpc server for external apis.") + cmd.Flags().IntVar(&s.pipedPluginServicePort, "piped-plugin-service-port", s.pipedPluginServicePort, "The port number used to connect to the piped plugin service.") // TODO: we should discuss about the name of this flag, or we should use environment variable instead. + cmd.MarkFlagRequired("piped-plugin-service-port") cmd.Flags().DurationVar(&s.gracePeriod, "grace-period", s.gracePeriod, "How long to wait for graceful shutdown.") cmd.Flags().BoolVar(&s.tls, "tls", s.tls, "Whether running the gRPC server with TLS or not.") @@ -68,11 +76,19 @@ func (s *server) run(ctx context.Context, input cli.Input) (runErr error) { group, ctx := errgroup.WithContext(ctx) + pipedapiClient, err := pipedapi.NewClient(ctx, net.JoinHostPort("localhost", strconv.Itoa(s.pipedPluginServicePort)), nil) + if err != nil { + input.Logger.Error("failed to create piped plugin service client", zap.Error(err)) + return err + } + // Start a gRPC server for handling external API requests. { var ( service = deployment.NewDeploymentService( input.Logger, + toolregistry.NewToolRegistry(pipedapiClient), + logpersister.NewPersister(pipedapiClient, input.Logger), ) opts = []rpc.Option{ rpc.WithPort(s.apiPort), @@ -80,6 +96,7 @@ func (s *server) run(ctx context.Context, input cli.Input) (runErr error) { rpc.WithLogger(input.Logger), rpc.WithLogUnaryInterceptor(input.Logger), rpc.WithRequestValidationUnaryInterceptor(), + rpc.WithSignalHandlingUnaryInterceptor(), } ) if s.tls { diff --git a/pkg/app/pipedv1/plugin/toolregistry/toolregistry.go b/pkg/app/pipedv1/plugin/toolregistry/toolregistry.go index 4e674b79c9..debf54df2c 100644 --- a/pkg/app/pipedv1/plugin/toolregistry/toolregistry.go +++ b/pkg/app/pipedv1/plugin/toolregistry/toolregistry.go @@ -24,6 +24,12 @@ type ToolRegistry struct { client service.PluginServiceClient } +func NewToolRegistry(client service.PluginServiceClient) *ToolRegistry { + return &ToolRegistry{ + client: client, + } +} + func (r *ToolRegistry) InstallTool(ctx context.Context, name, version, script string) (path string, err error) { res, err := r.client.InstallTool(ctx, &service.InstallToolRequest{ Name: name, diff --git a/pkg/app/pipedv1/trigger/cache.go b/pkg/app/pipedv1/trigger/cache.go index 8ae227060a..16cd645d16 100644 --- a/pkg/app/pipedv1/trigger/cache.go +++ b/pkg/app/pipedv1/trigger/cache.go @@ -57,23 +57,22 @@ func (s *lastTriggeredCommitStore) Put(applicationID, commit string) error { } func (s *lastTriggeredCommitStore) getLastTriggeredDeployment(ctx context.Context, applicationID string) (*model.ApplicationDeploymentReference, error) { - var ( - err error - resp *pipedservice.GetApplicationMostRecentDeploymentResponse - retry = pipedservice.NewRetry(3) - req = &pipedservice.GetApplicationMostRecentDeploymentRequest{ - ApplicationId: applicationID, - Status: model.DeploymentStatus_DEPLOYMENT_PENDING, - } - ) + req := &pipedservice.GetApplicationMostRecentDeploymentRequest{ + ApplicationId: applicationID, + Status: model.DeploymentStatus_DEPLOYMENT_PENDING, + } - for retry.WaitNext(ctx) { - if resp, err = s.apiClient.GetApplicationMostRecentDeployment(ctx, req); err == nil { + d, err := pipedservice.NewRetry(3).Do(ctx, func() (interface{}, error) { + resp, err := s.apiClient.GetApplicationMostRecentDeployment(ctx, req) + if err == nil { return resp.Deployment, nil } - if !pipedservice.Retriable(err) { - return nil, err - } + return nil, pipedservice.NewRetriableErr(err) + }) + + if err != nil { + return nil, err } - return nil, err + + return d.(*model.ApplicationDeploymentReference), nil } diff --git a/pkg/app/pipedv1/trigger/deployment.go b/pkg/app/pipedv1/trigger/deployment.go index 9adb939467..f73ba30725 100644 --- a/pkg/app/pipedv1/trigger/deployment.go +++ b/pkg/app/pipedv1/trigger/deployment.go @@ -23,7 +23,7 @@ import ( "github.com/google/uuid" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/git" "github.com/pipe-cd/pipecd/pkg/model" ) @@ -109,29 +109,26 @@ func buildDeployment( } func reportMostRecentlyTriggeredDeployment(ctx context.Context, client apiClient, d *model.Deployment) error { - var ( - err error - req = &pipedservice.ReportApplicationMostRecentDeploymentRequest{ - ApplicationId: d.ApplicationId, - Status: model.DeploymentStatus_DEPLOYMENT_PENDING, - Deployment: &model.ApplicationDeploymentReference{ - DeploymentId: d.Id, - Trigger: d.Trigger, - Summary: d.Summary, - Version: d.Version, - Versions: d.Versions, - StartedAt: d.CreatedAt, - CompletedAt: d.CompletedAt, - }, + req := &pipedservice.ReportApplicationMostRecentDeploymentRequest{ + ApplicationId: d.ApplicationId, + Status: model.DeploymentStatus_DEPLOYMENT_PENDING, + Deployment: &model.ApplicationDeploymentReference{ + DeploymentId: d.Id, + Trigger: d.Trigger, + Summary: d.Summary, + Version: d.Version, + Versions: d.Versions, + StartedAt: d.CreatedAt, + CompletedAt: d.CompletedAt, + }, + } + _, err := pipedservice.NewRetry(10).Do(ctx, func() (interface{}, error) { + _, err := client.ReportApplicationMostRecentDeployment(ctx, req) + if err != nil { + return nil, fmt.Errorf("failed to report most recent successful deployment: %w", err) } - retry = pipedservice.NewRetry(10) - ) + return nil, nil + }) - for retry.WaitNext(ctx) { - if _, err = client.ReportApplicationMostRecentDeployment(ctx, req); err == nil { - return nil - } - err = fmt.Errorf("failed to report most recent successful deployment: %w", err) - } return err } diff --git a/pkg/app/pipedv1/trigger/deployment_chain.go b/pkg/app/pipedv1/trigger/deployment_chain.go index 3f214dad93..1efc47fd28 100644 --- a/pkg/app/pipedv1/trigger/deployment_chain.go +++ b/pkg/app/pipedv1/trigger/deployment_chain.go @@ -19,7 +19,7 @@ import ( "fmt" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/model" ) diff --git a/pkg/app/pipedv1/trigger/determiner.go b/pkg/app/pipedv1/trigger/determiner.go index 5968935269..3def2dae1f 100644 --- a/pkg/app/pipedv1/trigger/determiner.go +++ b/pkg/app/pipedv1/trigger/determiner.go @@ -23,7 +23,7 @@ import ( "go.uber.org/zap" "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/filematcher" "github.com/pipe-cd/pipecd/pkg/git" "github.com/pipe-cd/pipecd/pkg/model" diff --git a/pkg/app/pipedv1/trigger/trigger.go b/pkg/app/pipedv1/trigger/trigger.go index ee9c067fb8..0057394459 100644 --- a/pkg/app/pipedv1/trigger/trigger.go +++ b/pkg/app/pipedv1/trigger/trigger.go @@ -27,7 +27,7 @@ import ( "github.com/pipe-cd/pipecd/pkg/app/server/service/pipedservice" "github.com/pipe-cd/pipecd/pkg/cache/memorycache" - "github.com/pipe-cd/pipecd/pkg/config" + config "github.com/pipe-cd/pipecd/pkg/configv1" "github.com/pipe-cd/pipecd/pkg/git" "github.com/pipe-cd/pipecd/pkg/model" ) @@ -222,7 +222,7 @@ func (t *Trigger) checkRepoCandidates(ctx context.Context, repoID string, cs []c continue } - appCfg, err := config.LoadApplication(gitRepo.GetPath(), app.GitPath.GetApplicationConfigFilePath(), app.Kind) + appCfg, err := config.LoadApplication(gitRepo.GetPath(), app.GitPath.GetApplicationConfigFilePath()) if err != nil { t.logger.Error("failed to load application config file", zap.String("app", app.Name), diff --git a/pkg/app/server/service/pipedservice/service.go b/pkg/app/server/service/pipedservice/service.go index d8a79ed67c..46f3cbcaf3 100644 --- a/pkg/app/server/service/pipedservice/service.go +++ b/pkg/app/server/service/pipedservice/service.go @@ -47,6 +47,12 @@ func Retriable(err error) bool { } } +// NewRetriableErr returns a new backoff.Error for the given error. +// Based on the error code, it determines whether the error is retriable or not. +func NewRetriableErr(err error) *backoff.Error { + return backoff.NewError(err, Retriable(err)) +} + // NewRetry returns a new backoff.Retry for piped API caller. // 0s 997.867435ms 2.015381172s 3.485134345s 4.389600179s 18.118099328s 48.73058264s func NewRetry(maxRetries int) backoff.Retry { diff --git a/pkg/backoff/backoff_test.go b/pkg/backoff/backoff_test.go index 36bfbfc9d4..ad8f55a54c 100644 --- a/pkg/backoff/backoff_test.go +++ b/pkg/backoff/backoff_test.go @@ -138,6 +138,8 @@ func TestDo(t *testing.T) { cancel() } + defer cancel() + data, err := r.Do(ctx, tc.operation) assert.Equal(t, tc.expected, data) assert.Equal(t, tc.expectedErr, err) diff --git a/pkg/configv1/application.go b/pkg/configv1/application.go index 703673a7d6..4341eb5487 100644 --- a/pkg/configv1/application.go +++ b/pkg/configv1/application.go @@ -17,6 +17,8 @@ package config import ( "encoding/json" "fmt" + "os" + "path/filepath" "strings" "github.com/pipe-cd/pipecd/pkg/model" @@ -165,6 +167,18 @@ func (s GenericApplicationSpec) GetStage(index int32) (PipelineStage, bool) { return s.Pipeline.Stages[index], true } +func (s GenericApplicationSpec) GetStageByte(index int32) ([]byte, bool) { + stage, ok := s.GetStage(index) + if !ok { + return nil, false + } + b, err := json.Marshal(stage) + if err != nil { + return nil, false + } + return b, true +} + // HasStage checks if the given stage is included in the pipeline. func (s GenericApplicationSpec) HasStage(stage model.Stage) bool { if s.Pipeline == nil { @@ -552,3 +566,21 @@ func (dd *DriftDetection) Validate() error { } return nil } + +func LoadApplication(repoPath, configRelPath string) (*GenericApplicationSpec, error) { + absPath := filepath.Join(repoPath, configRelPath) + + cfg, err := LoadFromYAML[*GenericApplicationSpec](absPath) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("application config file %s was not found in Git", configRelPath) + } + return nil, err + } + + if !cfg.Kind.IsApplicationKind() { + return nil, fmt.Errorf("invalid application kind in the application config file, got: %s", cfg.Kind) + } + + return cfg.Spec, nil +} diff --git a/pkg/configv1/config.go b/pkg/configv1/config.go index 700f405433..aa77978872 100644 --- a/pkg/configv1/config.go +++ b/pkg/configv1/config.go @@ -154,3 +154,12 @@ func DecodeYAML[T Spec[RT], RT any](data []byte) (*Config[T, RT], error) { } return c, nil } + +// This logic to ensure the file app.pipecd.yaml is valid with kind defined by pipedv0. +func (k Kind) IsApplicationKind() bool { + switch k { + case KindKubernetesApp, KindTerraformApp, KindLambdaApp, KindCloudRunApp, KindECSApp, KindApplication: + return true + } + return false +} diff --git a/pkg/configv1/piped.go b/pkg/configv1/piped.go index 7a48d732ad..b38a2e6dd4 100644 --- a/pkg/configv1/piped.go +++ b/pkg/configv1/piped.go @@ -19,6 +19,7 @@ import ( "encoding/json" "errors" "fmt" + "net/url" "os" "strings" @@ -70,6 +71,8 @@ type PipedSpec struct { CloudProviders []PipedPlatformProvider `json:"cloudProviders,omitempty"` // List of platform providers can be used by this piped. PlatformProviders []PipedPlatformProvider `json:"platformProviders,omitempty"` + // List of plugiin configs + Plugins []PipedPlugin `json:"plugins,omitempty"` // List of analysis providers can be used by this piped. AnalysisProviders []PipedAnalysisProvider `json:"analysisProviders,omitempty"` // Sending notification to Slack, Webhook… @@ -1288,3 +1291,52 @@ type PipedEventWatcherGitRepo struct { // This is prioritized if both includes and this one are given. Excludes []string `json:"excludes,omitempty"` } + +// PipedPlugin defines the plugin configuration for the piped. +type PipedPlugin struct { + // The name of the plugin. + Name string `json:"name"` + // Source to download the plugin binary. + URL string `json:"url"` + // The port which the plugin listens to. + Port int `json:"port"` + // The deploy target names. + DeployTargets []PipedDeployTarget `json:"deployTargets,omitempty"` +} + +// PipedDeployTarget defines the deploy target configuration for the piped. +type PipedDeployTarget struct { + // The name of the deploy target. + Name string `json:"name"` + // The labes of the deploy target. + Labels map[string]string `json:"labels,omitempty"` + // The configuration of the deploy target. + Config json.RawMessage `json:"config"` +} + +func (p *PipedPlugin) Validate() error { + if p.Name == "" { + return errors.New("name must be set") + } + if p.URL == "" { + return errors.New("url must be set") + } + u, err := url.Parse(p.URL) + if err != nil { + return fmt.Errorf("invalid plugin url: %w", err) + } + if u.Scheme != "file" && u.Scheme != "https" { + return errors.New("only file and https schemes are supported") + } + return nil +} + +// FindDeployTarget finds the deploy target by the given name. +func (p *PipedPlugin) FindDeployTarget(name string) *PipedDeployTarget { + for _, dt := range p.DeployTargets { + if dt.Name == name { + return &dt + } + } + return nil +} diff --git a/pkg/lifecycle/binary.go b/pkg/lifecycle/binary.go index e4f7ef2980..1087f5d449 100644 --- a/pkg/lifecycle/binary.go +++ b/pkg/lifecycle/binary.go @@ -70,8 +70,8 @@ func (c *Command) GracefulStop(period time.Duration) error { } } -func RunBinary(execPath string, args []string) (*Command, error) { - cmd, err := backoff.NewRetry(runBinaryRetryCount, backoff.NewConstant(5*time.Second)).Do(context.Background(), func() (interface{}, error) { +func RunBinary(ctx context.Context, execPath string, args []string) (*Command, error) { + cmd, err := backoff.NewRetry(runBinaryRetryCount, backoff.NewConstant(5*time.Second)).Do(ctx, func() (interface{}, error) { cmd := exec.Command(execPath, args...) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout diff --git a/pkg/lifecycle/binary_test.go b/pkg/lifecycle/binary_test.go index 892d08495d..28d8bdc958 100644 --- a/pkg/lifecycle/binary_test.go +++ b/pkg/lifecycle/binary_test.go @@ -15,12 +15,16 @@ package lifecycle import ( + "context" + "net/http" + "net/http/httptest" "strconv" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/zap/zaptest" ) func TestGracefulStopCommand(t *testing.T) { @@ -40,7 +44,7 @@ func TestGracefulStopCommand(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - cmd, err := RunBinary("sh", []string{"sleep", "1m"}) + cmd, err := RunBinary(context.TODO(), "sh", []string{"sleep", "1m"}) require.NoError(t, err) require.NotNil(t, cmd) @@ -71,7 +75,7 @@ func TestGracefulStopCommandResult(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - cmd, err := RunBinary("sh", []string{"-c", "exit " + strconv.Itoa(tc.exitCode)}) + cmd, err := RunBinary(context.TODO(), "sh", []string{"-c", "exit " + strconv.Itoa(tc.exitCode)}) require.NoError(t, err) require.NotNil(t, cmd) @@ -81,3 +85,42 @@ func TestGracefulStopCommandResult(t *testing.T) { }) } } + +func TestDownloadBinary(t *testing.T) { + server := httpTestServer() + defer server.Close() + + logger := zaptest.NewLogger(t) + destDir := t.TempDir() + destFile := "test-binary" + + t.Run("successful download", func(t *testing.T) { + url := server.URL + "/binary" + path, err := DownloadBinary(url, destDir, destFile, logger) + require.NoError(t, err) + assert.FileExists(t, path) + }) + + t.Run("file already exists", func(t *testing.T) { + url := server.URL + "/binary" + path, err := DownloadBinary(url, destDir, destFile, logger) + require.NoError(t, err) + assert.FileExists(t, path) + + // Try downloading again, should not error and file should still exist + path, err = DownloadBinary(url, destDir, destFile, logger) + require.NoError(t, err) + assert.FileExists(t, path) + }) +} + +func httpTestServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.URL.Path == "/binary" { + w.WriteHeader(http.StatusOK) + w.Write([]byte("test binary content")) + } else { + w.WriteHeader(http.StatusNotFound) + } + })) +} diff --git a/pkg/model/application.pb.go b/pkg/model/application.pb.go index 650192a142..340f0d12a6 100644 --- a/pkg/model/application.pb.go +++ b/pkg/model/application.pb.go @@ -117,6 +117,8 @@ type Application struct { // This must be one of the provider names registered in the piped. // TODO: Add validation for this field. PlatformProvider string `protobuf:"bytes,15,opt,name=platform_provider,json=platformProvider,proto3" json:"platform_provider,omitempty"` + // The names of deploy taget where to deploy this application. + DeployTargets []string `protobuf:"bytes,16,rep,name=deploy_targets,json=deployTargets,proto3" json:"deploy_targets,omitempty"` // Additional description about application. Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"` // Custom attributes to identify applications. @@ -235,6 +237,13 @@ func (x *Application) GetPlatformProvider() string { return "" } +func (x *Application) GetDeployTargets() []string { + if x != nil { + return x.DeployTargets + } + return nil +} + func (x *Application) GetDescription() string { if x != nil { return x.Description @@ -506,7 +515,7 @@ var file_pkg_model_application_proto_rawDesc = []byte{ 0x6b, 0x67, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x22, 0xea, 0x07, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6f, 0x22, 0x91, 0x08, 0x0a, 0x0b, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, @@ -528,94 +537,97 @@ var file_pkg_model_application_proto_rawDesc = []byte{ 0x69, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, - 0x72, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x74, 0x0a, 0x23, 0x6d, - 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x73, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, - 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, - 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, - 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, - 0x20, 0x6d, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x53, 0x75, 0x63, - 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, - 0x74, 0x12, 0x72, 0x0a, 0x22, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x6c, 0x79, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x70, - 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, - 0x65, 0x6e, 0x63, 0x65, 0x52, 0x1f, 0x6d, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, - 0x6c, 0x79, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x65, 0x64, 0x44, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x0a, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x6f, 0x64, 0x65, - 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, - 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x09, 0x73, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x69, 0x6e, 0x67, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x69, 0x6e, 0x67, 0x12, - 0x26, 0x0a, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x62, 0x20, - 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x09, 0x64, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x18, 0x63, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x64, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x26, 0x0a, - 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, - 0x5f, 0x61, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, - 0x20, 0x00, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x39, 0x0a, - 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xe6, - 0x01, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, - 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, - 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x68, 0x6f, 0x72, 0x74, - 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, - 0x68, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, - 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, - 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, - 0x68, 0x65, 0x61, 0x64, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x12, 0x25, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xf1, 0x02, 0x0a, 0x1e, 0x41, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, - 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x0d, 0x64, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x6c, - 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x3c, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, - 0x67, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x6f, 0x64, 0x65, - 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, - 0x67, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x74, - 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, - 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, - 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x72, - 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x2a, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x0b, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x2a, 0x64, 0x0a, 0x15, 0x41, - 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, - 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x4e, 0x43, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, - 0x09, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, - 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x10, 0x03, 0x12, 0x12, 0x0a, - 0x0e, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, - 0x04, 0x42, 0x25, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x70, 0x69, 0x70, 0x65, 0x2d, 0x63, 0x64, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x63, 0x64, 0x2f, 0x70, - 0x6b, 0x67, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x5f, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x70, 0x6c, 0x6f, + 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x6f, 0x64, + 0x65, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x12, 0x74, 0x0a, 0x23, 0x6d, 0x6f, 0x73, 0x74, 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, + 0x74, 0x6c, 0x79, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x5f, 0x64, + 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x66, + 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x20, 0x6d, 0x6f, 0x73, 0x74, 0x52, 0x65, 0x63, 0x65, + 0x6e, 0x74, 0x6c, 0x79, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x66, 0x75, 0x6c, 0x44, 0x65, + 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x72, 0x0a, 0x22, 0x6d, 0x6f, 0x73, 0x74, + 0x5f, 0x72, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x5f, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, + 0x72, 0x65, 0x64, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x1f, 0x6d, 0x6f, 0x73, + 0x74, 0x52, 0x65, 0x63, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, + 0x65, 0x64, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x3a, 0x0a, 0x0a, + 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1b, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x09, 0x73, + 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x65, 0x70, 0x6c, + 0x6f, 0x79, 0x69, 0x6e, 0x67, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x64, 0x65, 0x70, + 0x6c, 0x6f, 0x79, 0x69, 0x6e, 0x67, 0x12, 0x26, 0x0a, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x18, 0x62, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, + 0x02, 0x28, 0x00, 0x52, 0x09, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, + 0x0a, 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x63, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x07, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x18, 0x64, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x69, 0x73, 0x61, + 0x62, 0x6c, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, + 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x26, 0x0a, 0x0a, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x03, + 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, + 0x04, 0x08, 0x03, 0x10, 0x04, 0x22, 0xe6, 0x01, 0x0a, 0x14, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x3e, + 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1c, + 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x08, 0xfa, 0x42, + 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x21, + 0x0a, 0x0c, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x68, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x12, 0x68, 0x65, 0x61, + 0x64, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x68, 0x65, 0x61, 0x64, 0x44, 0x65, 0x70, 0x6c, 0x6f, + 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, + 0x02, 0x20, 0x00, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xf1, + 0x02, 0x0a, 0x1e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x65, + 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x12, 0x2c, 0x0a, 0x0d, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x0c, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x3c, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, + 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, + 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, + 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, + 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x41, 0x74, 0x2a, 0x64, 0x0a, 0x15, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x55, + 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x59, 0x4e, 0x43, + 0x45, 0x44, 0x10, 0x01, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x49, 0x4e, + 0x47, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x53, 0x59, + 0x4e, 0x43, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x49, 0x4e, 0x56, 0x41, 0x4c, 0x49, 0x44, 0x5f, + 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x10, 0x04, 0x42, 0x25, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x2d, 0x63, 0x64, 0x2f, 0x70, + 0x69, 0x70, 0x65, 0x63, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/model/application.proto b/pkg/model/application.proto index 1d0d5c0394..9184f23de7 100644 --- a/pkg/model/application.proto +++ b/pkg/model/application.proto @@ -43,6 +43,8 @@ message Application { // This must be one of the provider names registered in the piped. // TODO: Add validation for this field. string platform_provider = 15; + // The names of deploy taget where to deploy this application. + repeated string deploy_targets = 16; // Additional description about application. string description = 9; // Custom attributes to identify applications. diff --git a/pkg/model/deployment.pb.go b/pkg/model/deployment.pb.go index 52e7c48f0a..cadb2db58e 100644 --- a/pkg/model/deployment.pb.go +++ b/pkg/model/deployment.pb.go @@ -247,6 +247,8 @@ type Deployment struct { // The name of platform provider where to deploy this application. // This must be one of the provider names registered in the piped. PlatformProvider string `protobuf:"bytes,11,opt,name=platform_provider,json=platformProvider,proto3" json:"platform_provider,omitempty"` + // The names of deploy taget where to deploy this application. + DeployTargets []string `protobuf:"bytes,12,rep,name=deploy_targets,json=deployTargets,proto3" json:"deploy_targets,omitempty"` // Custom attributes to identify applications. Labels map[string]string `protobuf:"bytes,10,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` Trigger *DeploymentTrigger `protobuf:"bytes,20,opt,name=trigger,proto3" json:"trigger,omitempty"` @@ -372,6 +374,13 @@ func (x *Deployment) GetPlatformProvider() string { return "" } +func (x *Deployment) GetDeployTargets() []string { + if x != nil { + return x.DeployTargets + } + return nil +} + func (x *Deployment) GetLabels() map[string]string { if x != nil { return x.Labels @@ -836,7 +845,7 @@ var file_pkg_model_deployment_proto_rawDesc = []byte{ 0x64, 0x65, 0x6c, 0x1a, 0x17, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x16, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9e, 0x0a, 0x0a, 0x0a, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc5, 0x0a, 0x0a, 0x0a, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x2e, 0x0a, 0x0e, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, @@ -862,161 +871,163 @@ var file_pkg_model_deployment_proto_rawDesc = []byte{ 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x5f, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x6f, 0x64, - 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x12, 0x3c, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x74, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, - 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x72, - 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x76, - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x72, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x15, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x36, 0x0a, 0x17, 0x72, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x15, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, - 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x17, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, - 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, - 0x10, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x1f, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, - 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, 0x73, 0x18, 0x20, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, - 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, 0x73, 0x12, 0x3b, 0x0a, - 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, - 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, - 0x64, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, - 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x64, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x62, - 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0d, - 0x52, 0x19, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, - 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x0c, 0x63, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x64, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, - 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x65, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, - 0x22, 0x02, 0x28, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, - 0x26, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x66, 0x20, - 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x09, 0x75, 0x70, - 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, - 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, - 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xee, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, - 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x6d, 0x6f, - 0x64, 0x65, 0x6c, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, - 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, - 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x12, 0x38, 0x0a, 0x0d, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, - 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, - 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c, 0x73, - 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x73, - 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x5f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x53, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xe2, 0x04, 0x0a, 0x0d, 0x50, 0x69, 0x70, 0x65, 0x6c, - 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, - 0x64, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x64, 0x65, 0x73, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, - 0x73, 0x63, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x64, - 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, - 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x12, 0x1c, 0x0a, 0x07, 0x76, 0x69, 0x73, 0x69, - 0x62, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x76, - 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x53, - 0x74, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, - 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, - 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, - 0x6e, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x50, 0x69, 0x70, 0x65, - 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x72, 0x6d, 0x50, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x12, 0x25, 0x0a, 0x0e, 0x64, 0x65, + 0x70, 0x6c, 0x6f, 0x79, 0x5f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x0c, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x0d, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x73, 0x12, 0x35, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x0a, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3c, 0x0a, 0x07, 0x74, 0x72, 0x69, 0x67, + 0x67, 0x65, 0x72, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x6f, 0x64, 0x65, + 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, + 0x67, 0x65, 0x72, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x07, 0x74, + 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, + 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x32, 0x0a, 0x08, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, + 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, + 0x0a, 0x13, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x15, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x72, 0x75, 0x6e, + 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x36, + 0x0a, 0x17, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x3c, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x15, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, + 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, + 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, + 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x1f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x67, 0x65, 0x73, + 0x18, 0x20, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x50, + 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x67, 0x65, 0x73, 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x21, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, + 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, - 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, - 0x63, 0x6b, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, - 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, + 0x61, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x28, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, + 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x49, + 0x64, 0x12, 0x3f, 0x0a, 0x1c, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x18, 0x29, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x19, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x65, 0x6e, 0x74, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x49, 0x6e, 0x64, + 0x65, 0x78, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x64, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x26, - 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, - 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, + 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x65, 0x20, 0x01, + 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, - 0x02, 0x20, 0x00, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x3b, - 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe7, 0x01, 0x0a, 0x06, - 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x68, - 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, - 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x12, 0x1f, 0x0a, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, - 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, 0x21, 0x0a, 0x0c, 0x70, 0x75, 0x6c, 0x6c, - 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, - 0x70, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, - 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, - 0x74, 0x65, 0x64, 0x41, 0x74, 0x2a, 0xc1, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, - 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, - 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, - 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, - 0x5f, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x10, 0x01, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, - 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, - 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, - 0x5f, 0x52, 0x4f, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x12, - 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x55, - 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50, 0x4c, 0x4f, - 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x05, 0x12, - 0x18, 0x0a, 0x14, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x41, - 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x06, 0x2a, 0x9b, 0x01, 0x0a, 0x0b, 0x53, 0x74, - 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x41, - 0x47, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x54, 0x41, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x59, - 0x45, 0x54, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x52, 0x55, - 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x47, 0x45, - 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, - 0x41, 0x47, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x03, 0x12, 0x13, 0x0a, - 0x0f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, - 0x10, 0x04, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x50, - 0x50, 0x45, 0x44, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x45, - 0x58, 0x49, 0x54, 0x45, 0x44, 0x10, 0x06, 0x2a, 0x4e, 0x0a, 0x0b, 0x54, 0x72, 0x69, 0x67, 0x67, - 0x65, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4d, - 0x4d, 0x49, 0x54, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, - 0x41, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, 0x4f, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x5f, - 0x4f, 0x46, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x4e, 0x5f, - 0x43, 0x48, 0x41, 0x49, 0x4e, 0x10, 0x03, 0x42, 0x25, 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x2d, 0x63, 0x64, 0x2f, 0x70, 0x69, - 0x70, 0x65, 0x63, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x64, 0x5f, 0x61, 0x74, 0x18, 0x66, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, + 0x02, 0x28, 0x00, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x04, 0x10, 0x05, 0x22, 0xee, 0x01, 0x0a, + 0x11, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x54, 0x72, 0x69, 0x67, 0x67, + 0x65, 0x72, 0x12, 0x2f, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x65, + 0x72, 0x12, 0x25, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x38, 0x0a, 0x0d, 0x73, 0x79, 0x6e, 0x63, + 0x5f, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x13, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x61, + 0x74, 0x65, 0x67, 0x79, 0x52, 0x0c, 0x73, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, + 0x67, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x5f, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x22, 0xe2, 0x04, + 0x0a, 0x0d, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, + 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x65, 0x73, 0x63, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x64, 0x65, 0x73, 0x63, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, + 0x65, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, + 0x22, 0x0a, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x08, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, + 0x6e, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x12, + 0x1c, 0x0a, 0x07, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x07, 0x76, 0x69, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x34, 0x0a, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x12, 0x2e, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x82, 0x01, 0x02, 0x10, 0x01, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x5f, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3e, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6d, 0x6f, 0x64, + 0x65, 0x6c, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, + 0x69, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x0c, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x6f, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x2a, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x42, + 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x28, 0x00, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, + 0x20, 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x26, 0x0a, + 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, 0x00, 0x52, 0x09, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xe7, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1b, 0x0a, + 0x04, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x68, 0x61, 0x73, 0x68, 0x12, 0x21, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, 0x42, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x1f, 0x0a, + 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xfa, + 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x12, 0x1f, + 0x0a, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xfa, 0x42, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x62, 0x72, 0x61, 0x6e, 0x63, 0x68, 0x12, + 0x21, 0x0a, 0x0c, 0x70, 0x75, 0x6c, 0x6c, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x70, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x75, 0x72, 0x6c, 0x12, 0x26, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, + 0x61, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x42, 0x07, 0xfa, 0x42, 0x04, 0x22, 0x02, 0x20, + 0x00, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x2a, 0xc1, 0x01, 0x0a, + 0x10, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, + 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50, + 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x50, 0x4c, 0x41, 0x4e, 0x4e, 0x45, 0x44, 0x10, + 0x01, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, + 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x02, 0x12, 0x1b, 0x0a, 0x17, 0x44, 0x45, 0x50, + 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x52, 0x4f, 0x4c, 0x4c, 0x49, 0x4e, 0x47, 0x5f, + 0x42, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, + 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x04, 0x12, 0x16, + 0x0a, 0x12, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x46, 0x41, 0x49, + 0x4c, 0x55, 0x52, 0x45, 0x10, 0x05, 0x12, 0x18, 0x0a, 0x14, 0x44, 0x45, 0x50, 0x4c, 0x4f, 0x59, + 0x4d, 0x45, 0x4e, 0x54, 0x5f, 0x43, 0x41, 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x06, + 0x2a, 0x9b, 0x01, 0x0a, 0x0b, 0x53, 0x74, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x19, 0x0a, 0x15, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x53, 0x54, + 0x41, 0x52, 0x54, 0x45, 0x44, 0x5f, 0x59, 0x45, 0x54, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x53, + 0x54, 0x41, 0x47, 0x45, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x11, + 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, + 0x02, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, + 0x52, 0x45, 0x10, 0x03, 0x12, 0x13, 0x0a, 0x0f, 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x43, 0x41, + 0x4e, 0x43, 0x45, 0x4c, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x11, 0x0a, 0x0d, 0x53, 0x54, 0x41, + 0x47, 0x45, 0x5f, 0x53, 0x4b, 0x49, 0x50, 0x50, 0x45, 0x44, 0x10, 0x05, 0x12, 0x10, 0x0a, 0x0c, + 0x53, 0x54, 0x41, 0x47, 0x45, 0x5f, 0x45, 0x58, 0x49, 0x54, 0x45, 0x44, 0x10, 0x06, 0x2a, 0x4e, + 0x0a, 0x0b, 0x54, 0x72, 0x69, 0x67, 0x67, 0x65, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0d, 0x0a, + 0x09, 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, + 0x4f, 0x4e, 0x5f, 0x43, 0x4f, 0x4d, 0x4d, 0x41, 0x4e, 0x44, 0x10, 0x01, 0x12, 0x12, 0x0a, 0x0e, + 0x4f, 0x4e, 0x5f, 0x4f, 0x55, 0x54, 0x5f, 0x4f, 0x46, 0x5f, 0x53, 0x59, 0x4e, 0x43, 0x10, 0x02, + 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x4e, 0x5f, 0x43, 0x48, 0x41, 0x49, 0x4e, 0x10, 0x03, 0x42, 0x25, + 0x5a, 0x23, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x69, 0x70, + 0x65, 0x2d, 0x63, 0x64, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x63, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, + 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/model/deployment.proto b/pkg/model/deployment.proto index ebc334612b..3fc8910014 100644 --- a/pkg/model/deployment.proto +++ b/pkg/model/deployment.proto @@ -74,7 +74,8 @@ message Deployment { // The name of platform provider where to deploy this application. // This must be one of the provider names registered in the piped. string platform_provider = 11; - + // The names of deploy taget where to deploy this application. + repeated string deploy_targets = 12; // Custom attributes to identify applications. map labels = 10; diff --git a/pkg/plugin/api/v1alpha1/deployment/api.pb.go b/pkg/plugin/api/v1alpha1/deployment/api.pb.go index b1a2fea2c6..cb32583344 100644 --- a/pkg/plugin/api/v1alpha1/deployment/api.pb.go +++ b/pkg/plugin/api/v1alpha1/deployment/api.pb.go @@ -630,12 +630,10 @@ type PlanPluginInput struct { // The deployment to build a plan for. Deployment *model.Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment,omitempty"` - // The configuration of plugin that handles the deployment. - PluginConfig []byte `protobuf:"bytes,2,opt,name=plugin_config,json=pluginConfig,proto3" json:"plugin_config,omitempty"` // The running deployment source. - RunningDeploymentSource *DeploymentSource `protobuf:"bytes,3,opt,name=running_deployment_source,json=runningDeploymentSource,proto3" json:"running_deployment_source,omitempty"` + RunningDeploymentSource *DeploymentSource `protobuf:"bytes,2,opt,name=running_deployment_source,json=runningDeploymentSource,proto3" json:"running_deployment_source,omitempty"` // The target deployment source. - TargetDeploymentSource *DeploymentSource `protobuf:"bytes,4,opt,name=target_deployment_source,json=targetDeploymentSource,proto3" json:"target_deployment_source,omitempty"` + TargetDeploymentSource *DeploymentSource `protobuf:"bytes,3,opt,name=target_deployment_source,json=targetDeploymentSource,proto3" json:"target_deployment_source,omitempty"` } func (x *PlanPluginInput) Reset() { @@ -677,13 +675,6 @@ func (x *PlanPluginInput) GetDeployment() *model.Deployment { return nil } -func (x *PlanPluginInput) GetPluginConfig() []byte { - if x != nil { - return x.PluginConfig - } - return nil -} - func (x *PlanPluginInput) GetRunningDeploymentSource() *DeploymentSource { if x != nil { return x.RunningDeploymentSource @@ -707,8 +698,8 @@ type ExecutePluginInput struct { Deployment *model.Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment,omitempty"` // The stage to execute. Stage *model.PipelineStage `protobuf:"bytes,2,opt,name=stage,proto3" json:"stage,omitempty"` - // The configuration of plugin that handles the deployment. - PluginConfig []byte `protobuf:"bytes,3,opt,name=plugin_config,json=pluginConfig,proto3" json:"plugin_config,omitempty"` + // The configuration of stage. + StageConfig []byte `protobuf:"bytes,3,opt,name=stage_config,json=stageConfig,proto3" json:"stage_config,omitempty"` // The running deployment source. RunningDeploymentSource *DeploymentSource `protobuf:"bytes,4,opt,name=running_deployment_source,json=runningDeploymentSource,proto3" json:"running_deployment_source,omitempty"` // The target deployment source. @@ -761,9 +752,9 @@ func (x *ExecutePluginInput) GetStage() *model.PipelineStage { return nil } -func (x *ExecutePluginInput) GetPluginConfig() []byte { +func (x *ExecutePluginInput) GetStageConfig() []byte { if x != nil { - return x.PluginConfig + return x.StageConfig } return nil } @@ -1045,129 +1036,127 @@ var file_pkg_plugin_api_v1alpha1_deployment_api_proto_rawDesc = []byte{ 0x32, 0x12, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x53, 0x74, 0x61, 0x67, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xd5, 0x02, 0x0a, 0x0f, 0x50, 0x6c, 0x61, 0x6e, 0x50, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0xb0, 0x02, 0x0a, 0x0f, 0x50, 0x6c, 0x61, 0x6e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x64, 0x65, 0x70, - 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x70, 0x0a, 0x19, - 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, - 0x6e, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x17, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, - 0x0a, 0x18, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, - 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, - 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x16, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x44, 0x65, - 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x8e, - 0x03, 0x0a, 0x12, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x49, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x3b, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, - 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x6f, 0x64, 0x65, - 0x6c, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, - 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, - 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, - 0x6e, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, - 0x01, 0x52, 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x0c, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x70, 0x0a, - 0x19, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, - 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, - 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, - 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x17, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x44, - 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, - 0x6e, 0x0a, 0x18, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, - 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, - 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, - 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x16, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x44, - 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, - 0xd2, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, - 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, 0x0a, 0x12, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x11, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, 0x1b, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x19, 0x61, 0x70, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, - 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xa0, 0x07, 0x0a, 0x11, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, - 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x95, 0x01, 0x0a, 0x12, 0x46, + 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x70, 0x0a, 0x19, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x17, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x18, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, + 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, + 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x52, 0x16, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x8c, 0x03, 0x0a, 0x12, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x65, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x3b, 0x0a, 0x0a, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x44, 0x65, 0x70, + 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, + 0x01, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6d, + 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x61, + 0x67, 0x65, 0x42, 0x08, 0xfa, 0x42, 0x05, 0x8a, 0x01, 0x02, 0x10, 0x01, 0x52, 0x05, 0x73, 0x74, + 0x61, 0x67, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x74, 0x61, 0x67, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0b, 0x73, 0x74, 0x61, 0x67, 0x65, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x70, 0x0a, 0x19, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, + 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, + 0x17, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, + 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x6e, 0x0a, 0x18, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x5f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x52, 0x16, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, + 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0xd2, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x70, + 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x33, 0x0a, + 0x15, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x69, 0x72, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x14, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, + 0x0a, 0x12, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x11, 0x61, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x3e, 0x0a, + 0x1b, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x19, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xa0, 0x07, + 0x0a, 0x11, 0x44, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x95, 0x01, 0x0a, 0x12, 0x46, 0x65, 0x74, 0x63, 0x68, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x12, 0x3d, 0x2e, 0x67, 0x72, 0x70, + 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, + 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x46, 0x65, 0x74, 0x63, 0x68, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, + 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x67, 0x65, - 0x73, 0x12, 0x3d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x92, 0x01, 0x0a, 0x11, + 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x44, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x64, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x3e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, - 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x46, 0x65, 0x74, 0x63, 0x68, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x65, 0x64, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x00, 0x12, 0x92, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, - 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, - 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, + 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x3d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, + 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x12, 0x92, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x53, 0x74, + 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x3c, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x92, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x74, 0x65, - 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x12, 0x3c, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, - 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x61, - 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x67, 0x72, - 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, - 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, - 0x67, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa4, 0x01, 0x0a, - 0x17, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x79, - 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x12, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, - 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x75, - 0x69, 0x6c, 0x64, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, - 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, - 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x00, 0x12, 0x9b, 0x01, 0x0a, 0x14, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x51, 0x75, 0x69, - 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x2e, 0x67, - 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, - 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, - 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x51, 0x75, 0x69, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, - 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, - 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, - 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x51, 0x75, 0x69, 0x63, 0x6b, 0x53, 0x79, 0x6e, - 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x00, 0x12, 0x83, 0x01, 0x0a, 0x0c, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x74, 0x61, - 0x67, 0x65, 0x12, 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, - 0x74, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x72, + 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x44, 0x65, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x65, 0x53, 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0xa4, 0x01, 0x0a, 0x17, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x50, + 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, + 0x73, 0x12, 0x42, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x50, 0x69, 0x70, 0x65, + 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x43, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x50, 0x69, 0x70, 0x65, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, + 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x9b, 0x01, 0x0a, + 0x14, 0x42, 0x75, 0x69, 0x6c, 0x64, 0x51, 0x75, 0x69, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x53, + 0x74, 0x61, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, 0x64, + 0x51, 0x75, 0x69, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x42, 0x75, 0x69, 0x6c, + 0x64, 0x51, 0x75, 0x69, 0x63, 0x6b, 0x53, 0x79, 0x6e, 0x63, 0x53, 0x74, 0x61, 0x67, 0x65, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x83, 0x01, 0x0a, 0x0c, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x12, 0x37, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x2d, 0x63, 0x64, 0x2f, 0x70, 0x69, - 0x70, 0x65, 0x63, 0x64, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x65, 0x70, - 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x2e, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x53, 0x74, 0x61, 0x67, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, + 0x42, 0x3e, 0x5a, 0x3c, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, + 0x69, 0x70, 0x65, 0x2d, 0x63, 0x64, 0x2f, 0x70, 0x69, 0x70, 0x65, 0x63, 0x64, 0x2f, 0x70, 0x6b, + 0x67, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/pkg/plugin/api/v1alpha1/deployment/api.pb.validate.go b/pkg/plugin/api/v1alpha1/deployment/api.pb.validate.go index 1325d0a454..eacbcbd332 100644 --- a/pkg/plugin/api/v1alpha1/deployment/api.pb.validate.go +++ b/pkg/plugin/api/v1alpha1/deployment/api.pb.validate.go @@ -1597,8 +1597,6 @@ func (m *PlanPluginInput) validate(all bool) error { } } - // no validation rules for PluginConfig - if all { switch v := interface{}(m.GetRunningDeploymentSource()).(type) { case interface{ ValidateAll() error }: @@ -1837,7 +1835,7 @@ func (m *ExecutePluginInput) validate(all bool) error { } } - // no validation rules for PluginConfig + // no validation rules for StageConfig if all { switch v := interface{}(m.GetRunningDeploymentSource()).(type) { diff --git a/pkg/plugin/api/v1alpha1/deployment/api.proto b/pkg/plugin/api/v1alpha1/deployment/api.proto index b5ecb21869..1e1e2c1ef2 100644 --- a/pkg/plugin/api/v1alpha1/deployment/api.proto +++ b/pkg/plugin/api/v1alpha1/deployment/api.proto @@ -115,13 +115,10 @@ message ExecuteStageResponse { message PlanPluginInput { // The deployment to build a plan for. model.Deployment deployment = 1 [(validate.rules).message.required = true]; - // The configuration of plugin that handles the deployment. - bytes plugin_config = 2; - // The running deployment source. - DeploymentSource running_deployment_source = 3; + DeploymentSource running_deployment_source = 2; // The target deployment source. - DeploymentSource target_deployment_source = 4; + DeploymentSource target_deployment_source = 3; } message ExecutePluginInput { @@ -130,8 +127,8 @@ message ExecutePluginInput { // The stage to execute. model.PipelineStage stage = 2 [(validate.rules).message.required = true]; - // The configuration of plugin that handles the deployment. - bytes plugin_config = 3; + // The configuration of stage. + bytes stage_config = 3; // The running deployment source. DeploymentSource running_deployment_source = 4; diff --git a/pkg/plugin/pipedapi/client.go b/pkg/plugin/pipedapi/client.go new file mode 100644 index 0000000000..e4635354c0 --- /dev/null +++ b/pkg/plugin/pipedapi/client.go @@ -0,0 +1,61 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package planner provides a piped component +// that decides the deployment pipeline of a deployment. +// The planner bases on the changes from git commits +// then builds the deployment manifests to know the behavior of the deployment. +// From that behavior the planner can decides which pipeline should be applied. + +package pipedapi + +import ( + "context" + "slices" + + "google.golang.org/grpc" + + "github.com/pipe-cd/pipecd/pkg/app/pipedv1/cmd/piped/service" + "github.com/pipe-cd/pipecd/pkg/rpc/rpcclient" +) + +type PipedServiceClient struct { + service.PluginServiceClient + conn *grpc.ClientConn +} + +func NewClient(ctx context.Context, address string, opts ...rpcclient.DialOption) (*PipedServiceClient, error) { + // Clone the opts to avoid modifying the original opts slice. + opts = slices.Clone(opts) + + // Append the required options. + // The WithBlock option is required to make the client wait until the connection is up. + // The WithInsecure option is required to disable the transport security. + // The piped service does not require transport security because it is only used in localhost. + opts = append(opts, rpcclient.WithBlock(), rpcclient.WithInsecure()) + + conn, err := rpcclient.DialContext(ctx, address, opts...) + if err != nil { + return nil, err + } + + return &PipedServiceClient{ + PluginServiceClient: service.NewPluginServiceClient(conn), + conn: conn, + }, nil +} + +func (c *PipedServiceClient) Close() error { + return c.conn.Close() +} diff --git a/pkg/plugin/signalhandler/handler.go b/pkg/plugin/signalhandler/handler.go new file mode 100644 index 0000000000..0444002894 --- /dev/null +++ b/pkg/plugin/signalhandler/handler.go @@ -0,0 +1,52 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package planner provides a piped component +// that decides the deployment pipeline of a deployment. +// The planner bases on the changes from git commits +// then builds the deployment manifests to know the behavior of the deployment. +// From that behavior the planner can decides which pipeline should be applied. + +package signalhandler + +import ( + "context" + "os" + "os/signal" + "sync/atomic" + "syscall" +) + +var ( + terminated atomic.Bool + + signals = []os.Signal{syscall.SIGINT, syscall.SIGTERM} +) + +func init() { + // Listen for termination signals. + // When a termination signal is received, the signal handler will set the terminated flag to true. + ctx, cancel := signal.NotifyContext(context.Background(), signals...) + go func() { + defer cancel() + <-ctx.Done() + terminated.Store(true) + }() +} + +// Terminated returns true if the signal handler has received a termination signal. +// The termination signals are sent by the piped when it wants to stop running gracefully. +func Terminated() bool { + return terminated.Load() +} diff --git a/pkg/rpc/server.go b/pkg/rpc/server.go index f73e46c042..018813edb7 100644 --- a/pkg/rpc/server.go +++ b/pkg/rpc/server.go @@ -55,6 +55,7 @@ type Server struct { requestValidationUnaryInterceptor grpc.UnaryServerInterceptor logUnaryInterceptor grpc.UnaryServerInterceptor prometheusUnaryInterceptor grpc.UnaryServerInterceptor + signalHandlingUnaryInterceptor grpc.UnaryServerInterceptor } // Option defines a function to set configurable field of Server. @@ -116,6 +117,12 @@ func WithPrometheusUnaryInterceptor() Option { } } +func WithSignalHandlingUnaryInterceptor() Option { + return func(s *Server) { + s.signalHandlingUnaryInterceptor = SignalHandlingInterceptor + } +} + // WithTLS configures TLS files. func WithTLS(certFile, keyFile string) Option { return func(s *Server) { @@ -218,6 +225,9 @@ func (s *Server) init() error { if s.requestValidationUnaryInterceptor != nil { unaryInterceptors = append(unaryInterceptors, s.requestValidationUnaryInterceptor) } + if s.signalHandlingUnaryInterceptor != nil { + unaryInterceptors = append(unaryInterceptors, s.signalHandlingUnaryInterceptor) + } if s.prometheusUnaryInterceptor != nil { unaryInterceptors = append(unaryInterceptors, s.prometheusUnaryInterceptor) } diff --git a/pkg/rpc/signal_interceptor.go b/pkg/rpc/signal_interceptor.go new file mode 100644 index 0000000000..13a067e2fc --- /dev/null +++ b/pkg/rpc/signal_interceptor.go @@ -0,0 +1,31 @@ +// Copyright 2024 The PipeCD Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package rpc + +import ( + "context" + "os/signal" + "syscall" + + "google.golang.org/grpc" +) + +// SignalHandlingInterceptor is a gRPC interceptor that cancels the context when a termination signal is received. +func SignalHandlingInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + ctx, cancel := signal.NotifyContext(ctx, syscall.SIGINT, syscall.SIGTERM) + defer cancel() + + return handler(ctx, req) +} diff --git a/quickstart/manifests/control-plane.yaml b/quickstart/manifests/control-plane.yaml index 57bcdad3e2..7cd4e008e0 100644 --- a/quickstart/manifests/control-plane.yaml +++ b/quickstart/manifests/control-plane.yaml @@ -5,10 +5,10 @@ kind: Secret metadata: name: pipecd labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm type: Opaque data: @@ -22,10 +22,10 @@ kind: ConfigMap metadata: name: pipecd labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm data: control-plane-config.yaml: |- @@ -57,10 +57,10 @@ kind: ConfigMap metadata: name: pipecd-gateway-envoy-config labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: gateway data: @@ -269,10 +269,10 @@ kind: Service metadata: name: pipecd labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: ingress annotations: @@ -293,10 +293,10 @@ kind: Service metadata: name: pipecd-gateway labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: gateway spec: @@ -316,10 +316,10 @@ kind: Service metadata: name: pipecd-server labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: server spec: @@ -352,10 +352,10 @@ kind: Service metadata: name: pipecd-cache labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: cache spec: @@ -375,10 +375,10 @@ kind: Service metadata: name: pipecd-ops labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: ops spec: @@ -402,10 +402,10 @@ kind: Service metadata: name: pipecd-mysql labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: mysql spec: @@ -425,10 +425,10 @@ kind: Service metadata: name: pipecd-minio labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: minio spec: @@ -449,10 +449,10 @@ kind: Deployment metadata: name: pipecd-gateway labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: gateway spec: @@ -469,7 +469,7 @@ spec: app.kubernetes.io/instance: pipecd app.kubernetes.io/component: gateway annotations: - checksum/config: 92b5cb7c1ba299e1febe1b11fd8ca5e406938a9144c21231578c78d0105896d3 # ref; https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments + checksum/config: c5bffdd78ee7af2788d604a241d6aef279741345405d9b60d4882b15029976a0 # ref; https://helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments spec: containers: - name: envoy @@ -519,10 +519,10 @@ kind: Deployment metadata: name: pipecd-server labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: server spec: @@ -551,7 +551,7 @@ spec: done; containers: - name: server - image: "ghcr.io/pipe-cd/pipecd:v0.49.4" + image: "ghcr.io/pipe-cd/pipecd:v0.50.0" imagePullPolicy: IfNotPresent args: - server @@ -614,10 +614,10 @@ kind: Deployment metadata: name: pipecd-cache labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: cache spec: @@ -650,10 +650,10 @@ kind: Deployment metadata: name: pipecd-ops labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: ops spec: @@ -684,7 +684,7 @@ spec: done; containers: - name: ops - image: "ghcr.io/pipe-cd/pipecd:v0.49.4" + image: "ghcr.io/pipe-cd/pipecd:v0.50.0" imagePullPolicy: IfNotPresent args: - ops @@ -730,10 +730,10 @@ kind: Deployment metadata: name: pipecd-mysql labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: mysql spec: @@ -771,10 +771,10 @@ kind: Deployment metadata: name: pipecd-minio labels: - helm.sh/chart: pipecd-v0.49.4 + helm.sh/chart: pipecd-v0.50.0 app.kubernetes.io/name: pipecd app.kubernetes.io/instance: pipecd - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: minio spec: diff --git a/quickstart/manifests/piped.yaml b/quickstart/manifests/piped.yaml index 40fe457f0b..2d5c3001d6 100644 --- a/quickstart/manifests/piped.yaml +++ b/quickstart/manifests/piped.yaml @@ -5,10 +5,10 @@ kind: ServiceAccount metadata: name: piped labels: - helm.sh/chart: piped-v0.49.4 + helm.sh/chart: piped-v0.50.0 app.kubernetes.io/name: piped app.kubernetes.io/instance: piped - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm --- # Source: piped/templates/secret.yaml @@ -17,10 +17,10 @@ kind: Secret metadata: name: piped labels: - helm.sh/chart: piped-v0.49.4 + helm.sh/chart: piped-v0.50.0 app.kubernetes.io/name: piped app.kubernetes.io/instance: piped - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm type: Opaque data: @@ -31,10 +31,10 @@ kind: ConfigMap metadata: name: piped labels: - helm.sh/chart: piped-v0.49.4 + helm.sh/chart: piped-v0.50.0 app.kubernetes.io/name: piped app.kubernetes.io/instance: piped - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm data: piped-config.yaml: |- @@ -57,10 +57,10 @@ kind: ClusterRole metadata: name: piped labels: - helm.sh/chart: piped-v0.49.4 + helm.sh/chart: piped-v0.50.0 app.kubernetes.io/name: piped app.kubernetes.io/instance: piped - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm rules: @@ -81,10 +81,10 @@ kind: ClusterRoleBinding metadata: name: piped labels: - helm.sh/chart: piped-v0.49.4 + helm.sh/chart: piped-v0.50.0 app.kubernetes.io/name: piped app.kubernetes.io/instance: piped - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm roleRef: apiGroup: rbac.authorization.k8s.io @@ -101,10 +101,10 @@ kind: Service metadata: name: piped labels: - helm.sh/chart: piped-v0.49.4 + helm.sh/chart: piped-v0.50.0 app.kubernetes.io/name: piped app.kubernetes.io/instance: piped - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm spec: type: ClusterIP @@ -122,10 +122,10 @@ kind: Deployment metadata: name: piped labels: - helm.sh/chart: piped-v0.49.4 + helm.sh/chart: piped-v0.50.0 app.kubernetes.io/name: piped app.kubernetes.io/instance: piped - app.kubernetes.io/version: "v0.49.4" + app.kubernetes.io/version: "v0.50.0" app.kubernetes.io/managed-by: Helm spec: replicas: 1 @@ -142,13 +142,13 @@ spec: app.kubernetes.io/instance: piped annotations: sidecar.istio.io/inject: "false" - rollme: "a17EN" + rollme: "9nhOh" spec: serviceAccountName: piped containers: - name: piped imagePullPolicy: IfNotPresent - image: "ghcr.io/pipe-cd/piped:v0.49.4" + image: "ghcr.io/pipe-cd/piped:v0.50.0" args: - piped - --config-file=/etc/piped-config/piped-config.yaml diff --git a/tool/actions-gh-release/Dockerfile b/tool/actions-gh-release/Dockerfile index 87c2d8f9bc..68918b67e5 100644 --- a/tool/actions-gh-release/Dockerfile +++ b/tool/actions-gh-release/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.5-alpine3.20 +FROM golang:1.23.3-alpine3.20 RUN apk update && apk add git diff --git a/tool/actions-gh-release/go.mod b/tool/actions-gh-release/go.mod index b773e80d34..8cbd5acbc0 100644 --- a/tool/actions-gh-release/go.mod +++ b/tool/actions-gh-release/go.mod @@ -1,6 +1,6 @@ module github.com/pipe-cd/actions-gh-release -go 1.22.4 +go 1.23.3 require ( github.com/creasty/defaults v1.5.2 diff --git a/web/model/application_pb.d.ts b/web/model/application_pb.d.ts index c456cb8267..e69cc74331 100644 --- a/web/model/application_pb.d.ts +++ b/web/model/application_pb.d.ts @@ -32,6 +32,11 @@ export class Application extends jspb.Message { getPlatformProvider(): string; setPlatformProvider(value: string): Application; + getDeployTargetsList(): Array; + setDeployTargetsList(value: Array): Application; + clearDeployTargetsList(): Application; + addDeployTargets(value: string, index?: number): Application; + getDescription(): string; setDescription(value: string): Application; @@ -89,6 +94,7 @@ export namespace Application { gitPath?: pkg_model_common_pb.ApplicationGitPath.AsObject, cloudProvider: string, platformProvider: string, + deployTargetsList: Array, description: string, labelsMap: Array<[string, string]>, mostRecentlySuccessfulDeployment?: ApplicationDeploymentReference.AsObject, diff --git a/web/model/application_pb.js b/web/model/application_pb.js index 11af3091d4..9dd8785e40 100644 --- a/web/model/application_pb.js +++ b/web/model/application_pb.js @@ -42,7 +42,7 @@ goog.exportSymbol('proto.model.ApplicationSyncStatus', null, global); * @constructor */ proto.model.Application = function(opt_data) { - jspb.Message.initialize(this, opt_data, 0, -1, null, null); + jspb.Message.initialize(this, opt_data, 0, -1, proto.model.Application.repeatedFields_, null); }; goog.inherits(proto.model.Application, jspb.Message); if (goog.DEBUG && !COMPILED) { @@ -95,6 +95,13 @@ if (goog.DEBUG && !COMPILED) { proto.model.ApplicationDeploymentReference.displayName = 'proto.model.ApplicationDeploymentReference'; } +/** + * List of repeated fields within this message type. + * @private {!Array} + * @const + */ +proto.model.Application.repeatedFields_ = [16]; + if (jspb.Message.GENERATE_TO_OBJECT) { @@ -134,6 +141,7 @@ proto.model.Application.toObject = function(includeInstance, msg) { gitPath: (f = msg.getGitPath()) && pkg_model_common_pb.ApplicationGitPath.toObject(includeInstance, f), cloudProvider: jspb.Message.getFieldWithDefault(msg, 8, ""), platformProvider: jspb.Message.getFieldWithDefault(msg, 15, ""), + deployTargetsList: (f = jspb.Message.getRepeatedField(msg, 16)) == null ? undefined : f, description: jspb.Message.getFieldWithDefault(msg, 9, ""), labelsMap: (f = msg.getLabelsMap()) ? f.toObject(includeInstance, undefined) : [], mostRecentlySuccessfulDeployment: (f = msg.getMostRecentlySuccessfulDeployment()) && proto.model.ApplicationDeploymentReference.toObject(includeInstance, f), @@ -214,6 +222,10 @@ proto.model.Application.deserializeBinaryFromReader = function(msg, reader) { var value = /** @type {string} */ (reader.readString()); msg.setPlatformProvider(value); break; + case 16: + var value = /** @type {string} */ (reader.readString()); + msg.addDeployTargets(value); + break; case 9: var value = /** @type {string} */ (reader.readString()); msg.setDescription(value); @@ -349,6 +361,13 @@ proto.model.Application.serializeBinaryToWriter = function(message, writer) { f ); } + f = message.getDeployTargetsList(); + if (f.length > 0) { + writer.writeRepeatedString( + 16, + f + ); + } f = message.getDescription(); if (f.length > 0) { writer.writeString( @@ -592,6 +611,43 @@ proto.model.Application.prototype.setPlatformProvider = function(value) { }; +/** + * repeated string deploy_targets = 16; + * @return {!Array} + */ +proto.model.Application.prototype.getDeployTargetsList = function() { + return /** @type {!Array} */ (jspb.Message.getRepeatedField(this, 16)); +}; + + +/** + * @param {!Array} value + * @return {!proto.model.Application} returns this + */ +proto.model.Application.prototype.setDeployTargetsList = function(value) { + return jspb.Message.setField(this, 16, value || []); +}; + + +/** + * @param {string} value + * @param {number=} opt_index + * @return {!proto.model.Application} returns this + */ +proto.model.Application.prototype.addDeployTargets = function(value, opt_index) { + return jspb.Message.addToRepeatedField(this, 16, value, opt_index); +}; + + +/** + * Clears the list making it empty but non-null. + * @return {!proto.model.Application} returns this + */ +proto.model.Application.prototype.clearDeployTargetsList = function() { + return this.setDeployTargetsList([]); +}; + + /** * optional string description = 9; * @return {string} diff --git a/web/model/deployment_pb.d.ts b/web/model/deployment_pb.d.ts index b026a275c5..a3c13726b4 100644 --- a/web/model/deployment_pb.d.ts +++ b/web/model/deployment_pb.d.ts @@ -34,6 +34,11 @@ export class Deployment extends jspb.Message { getPlatformProvider(): string; setPlatformProvider(value: string): Deployment; + getDeployTargetsList(): Array; + setDeployTargetsList(value: Array): Deployment; + clearDeployTargetsList(): Deployment; + addDeployTargets(value: string, index?: number): Deployment; + getLabelsMap(): jspb.Map; clearLabelsMap(): Deployment; @@ -107,6 +112,7 @@ export namespace Deployment { gitPath?: pkg_model_common_pb.ApplicationGitPath.AsObject, cloudProvider: string, platformProvider: string, + deployTargetsList: Array, labelsMap: Array<[string, string]>, trigger?: DeploymentTrigger.AsObject, summary: string, diff --git a/web/model/deployment_pb.js b/web/model/deployment_pb.js index ef80e51d03..503a445b9a 100644 --- a/web/model/deployment_pb.js +++ b/web/model/deployment_pb.js @@ -122,7 +122,7 @@ if (goog.DEBUG && !COMPILED) { * @private {!Array} * @const */ -proto.model.Deployment.repeatedFields_ = [24,32]; +proto.model.Deployment.repeatedFields_ = [12,24,32]; @@ -164,6 +164,7 @@ proto.model.Deployment.toObject = function(includeInstance, msg) { gitPath: (f = msg.getGitPath()) && pkg_model_common_pb.ApplicationGitPath.toObject(includeInstance, f), cloudProvider: jspb.Message.getFieldWithDefault(msg, 9, ""), platformProvider: jspb.Message.getFieldWithDefault(msg, 11, ""), + deployTargetsList: (f = jspb.Message.getRepeatedField(msg, 12)) == null ? undefined : f, labelsMap: (f = msg.getLabelsMap()) ? f.toObject(includeInstance, undefined) : [], trigger: (f = msg.getTrigger()) && proto.model.DeploymentTrigger.toObject(includeInstance, f), summary: jspb.Message.getFieldWithDefault(msg, 22, ""), @@ -255,6 +256,10 @@ proto.model.Deployment.deserializeBinaryFromReader = function(msg, reader) { var value = /** @type {string} */ (reader.readString()); msg.setPlatformProvider(value); break; + case 12: + var value = /** @type {string} */ (reader.readString()); + msg.addDeployTargets(value); + break; case 10: var value = msg.getLabelsMap(); reader.readMessage(value, function(message, reader) { @@ -419,6 +424,13 @@ proto.model.Deployment.serializeBinaryToWriter = function(message, writer) { f ); } + f = message.getDeployTargetsList(); + if (f.length > 0) { + writer.writeRepeatedString( + 12, + f + ); + } f = message.getLabelsMap(true); if (f && f.getLength() > 0) { f.serializeBinary(10, writer, jspb.BinaryWriter.prototype.writeString, jspb.BinaryWriter.prototype.writeString); @@ -712,6 +724,43 @@ proto.model.Deployment.prototype.setPlatformProvider = function(value) { }; +/** + * repeated string deploy_targets = 12; + * @return {!Array} + */ +proto.model.Deployment.prototype.getDeployTargetsList = function() { + return /** @type {!Array} */ (jspb.Message.getRepeatedField(this, 12)); +}; + + +/** + * @param {!Array} value + * @return {!proto.model.Deployment} returns this + */ +proto.model.Deployment.prototype.setDeployTargetsList = function(value) { + return jspb.Message.setField(this, 12, value || []); +}; + + +/** + * @param {string} value + * @param {number=} opt_index + * @return {!proto.model.Deployment} returns this + */ +proto.model.Deployment.prototype.addDeployTargets = function(value, opt_index) { + return jspb.Message.addToRepeatedField(this, 12, value, opt_index); +}; + + +/** + * Clears the list making it empty but non-null. + * @return {!proto.model.Deployment} returns this + */ +proto.model.Deployment.prototype.clearDeployTargetsList = function() { + return this.setDeployTargetsList([]); +}; + + /** * map labels = 10; * @param {boolean=} opt_noLazyCreate Do not create the map if diff --git a/web/src/__fixtures__/dummy-application.ts b/web/src/__fixtures__/dummy-application.ts index b02bb37d92..81d4bc4b42 100644 --- a/web/src/__fixtures__/dummy-application.ts +++ b/web/src/__fixtures__/dummy-application.ts @@ -25,6 +25,7 @@ export const dummyApplication: Application.AsObject = { id: randomUUID(), cloudProvider: "", platformProvider: "kubernetes-default", + deployTargetsList: ["kubernetes-default"], disabled: false, gitPath: { configFilename: "", @@ -131,6 +132,7 @@ export function createApplicationFromObject( const app = new Application(); app.setId(o.id); app.setPlatformProvider(o.platformProvider); + app.setDeployTargetsList(o.deployTargetsList); app.setDisabled(o.disabled); app.setKind(o.kind); app.setName(o.name); diff --git a/web/src/__fixtures__/dummy-deployment.ts b/web/src/__fixtures__/dummy-deployment.ts index 38a92b9b9b..691a37acb8 100644 --- a/web/src/__fixtures__/dummy-deployment.ts +++ b/web/src/__fixtures__/dummy-deployment.ts @@ -25,6 +25,7 @@ export const dummyDeployment: Deployment.AsObject = { versionsList: [], cloudProvider: "kube-1", platformProvider: "kube-1", + deployTargetsList: ["kube-1"], labelsMap: [], createdAt: createdAt.unix(), updatedAt: completedAt.unix(),