diff --git a/.github/bin/delete-namespace b/.github/bin/delete-namespace index 2a88060..aeb5489 100755 --- a/.github/bin/delete-namespace +++ b/.github/bin/delete-namespace @@ -1,6 +1,8 @@ #!/bin/bash -# Delete all resources in namespace +set -euo pipefail + +# Function to delete all resources in a namespace delete_resources_in_namespace() { local namespace=$1 echo "Deleting all resources in namespace: $namespace" @@ -8,14 +10,20 @@ delete_resources_in_namespace() { | xargs -n 1 kubectl delete --all -n "$namespace" --ignore-not-found --wait } -# Remove finalizers and delete stuck namespace -delete_namespace() { +# Function to remove finalizers from a namespace +remove_finalizers() { local namespace=$1 - echo "Removing finalizers and deleting namespace: $namespace" + echo "Removing finalizers from namespace: $namespace" kubectl get namespace "$namespace" -o json \ | jq 'del(.spec.finalizers)' \ | kubectl replace --raw "/api/v1/namespaces/$namespace/finalize" -f - - kubectl delete namespace "$namespace" +} + +# Function to delete a namespace +delete_namespace() { + local namespace=$1 + echo "Deleting namespace: $namespace" + kubectl delete namespace "$namespace" --ignore-not-found --wait echo "Waiting for namespace $namespace to be deleted..." while kubectl get namespace "$namespace" &>/dev/null; do sleep 1 @@ -23,14 +31,36 @@ delete_namespace() { echo "Namespace $namespace has been deleted." } -# Check for at least one namespace provided as argument -if [ $# -eq 0 ]; then - echo "Usage: $0 ... " - exit 1 -fi - -# Loop through and delete list of namespaces -for namespace in "$@"; do - delete_resources_in_namespace "$namespace" - delete_namespace "$namespace" -done +# Function to delete CustomResourceDefinitions (CRDs) +delete_crds() { + local crds=("certificaterequests.cert-manager.io" "certificates.cert-manager.io" "challenges.acme.cert-manager.io" \ + "orders.acme.cert-manager.io" "clusterissuers.cert-manager.io" "issuers.cert-manager.io" \ + "kubevirts.kubevirt.io") + + for crd in "${crds[@]}"; do + echo "Deleting CRD: $crd" + kubectl delete crd "$crd" --ignore-not-found --wait + done +} + +# Main deletion process +main() { + if [ $# -eq 0 ]; then + echo "Usage: $0 ... " + exit 1 + fi + + for namespace in "$@"; do + delete_resources_in_namespace "$namespace" + remove_finalizers "$namespace" || echo "Failed to remove finalizers for namespace: $namespace" + delete_namespace "$namespace" || { + echo "Namespace $namespace is stuck. Attempting to force removal." + remove_finalizers "$namespace" + delete_namespace "$namespace" + } + done + + delete_crds +} + +main "$@" diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..c7e6885 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,103 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our community include: + +- Demonstrating empathy and kindness toward other people +- Being respectful of differing opinions, viewpoints, and experiences +- Giving and gracefully accepting constructive feedback +- Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience +- Focusing on what is best not just for us as individuals, but for the overall community + +Examples of unacceptable behavior include: + +- The use of sexualized language or imagery, and sexual attention or advances of any kind +- Trolling, insulting or derogatory comments, and personal or political attacks +- Public or private harassment +- Publishing others' private information, such as a physical or email address, without their explicit permission +- Other conduct which could reasonably be considered inappropriate in a professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official email address, posting via an official social media account, or acting as an appointed representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at [conduct@containercraft.org](mailto:conduct@containercraft.org). All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of actions. + +**Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression towards or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org), version 2.1, available at https://www.contributor-covenant.org/version/2/1/code_of_conduct.html. + +Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder](https://github.com/mozilla/diversity). + +For answers to common questions about this code of conduct, see the FAQ at https://www.contributor-covenant.org/faq. Translations are available at https://www.contributor-covenant.org/translations. + +--- + +## Reporting Guide + +To make sure everyone understands how to report a Code of Conduct violation, here are some detailed instructions: + +1. **Write down what happened.** + - Include the times and dates of incidents if possible. + - Remember, if you feel any aspect of this experience might help make things better for the community, it is worth reporting. + +2. **Submit your report.** + - You can report via email: [conduct@containercraft.org](mailto:conduct@containercraft.org). + +3. **Wait for response.** + - You should get a response from us within a few days. We take every report seriously and will inform you of any steps we will be taking as a result of your complaint. + +## Values and Principles + +At ContainerCraft, our core principles revolve around creating an inclusive, safe, and growth-oriented community. We believe that each individual has the potential to contribute meaningfully, and we strive to provide an environment that fosters learning, respect, and collaboration. We commit to: + +- Continually improve community health and inclusivity. +- Promote a culture where new ideas are welcome, and learning from mistakes is encouraged. +- Prioritize the safety and well-being of our members above all else. + +Thank you for helping to make this a welcoming and respectful community for all. diff --git a/CONTRIBUTOR.md b/CONTRIBUTOR.md new file mode 100644 index 0000000..a6e5cec --- /dev/null +++ b/CONTRIBUTOR.md @@ -0,0 +1,183 @@ +# Contributing to Kargo + +First off, thank you for your interest in Kargo! The Kargo project is designed to provide a delightful Developer Experience (DX) and User Experience (UX), and we welcome contributions from the community to help continue this mission. Whether you're fixing a bug, adding a new feature, or improving documentation, your contributions are greatly appreciated. + +--- + +## Table of Contents + +1. [Code of Conduct](#code-of-conduct) +2. [Getting Started](#getting-started) + - [Prerequisites](#prerequisites) + - [Setting up Development Environment](#setting-up-development-environment) +3. [How to Contribute](#how-to-contribute) + - [Reporting Bugs](#reporting-bugs) + - [Suggesting Enhancements](#suggesting-enhancements) + - [Submitting Changes](#submitting-changes) +4. [Style Guides](#style-guides) + - [Python Style Guide](#python-style-guide) + - [Commit Messages](#commit-messages) + - [Documentation](#documentation) +5. [Testing](#testing) +6. [Continuous Integration](#continuous-integration) +7. [Communication](#communication) +8. [Acknowledgements](#acknowledgements) +9. [Additional Resources](#additional-resources) + +--- + +## Code of Conduct + +This project adheres to the [Contributor Covenant Code of Conduct](CODE_OF_CONDUCT.md). By participating, you are expected to uphold this code. Please report unacceptable behavior to [emcee@braincraft.io](mailto:emcee@braincraft.io). + +--- + +## Getting Started + +### Prerequisites + +- **Github Account**: +- **Docker / Docker Desktop**: +- **VSCode**: +- **VSCode | Devcontainer Extension**: + +### Setting up Development Environment + +1. **Fork the Repository**: + - Navigate to the [Kargo GitHub repository](https://github.com/your-org/kargo) and click "Fork". + +2. **Clone the Forked Repository**: + ```sh + git clone https://github.com/your-username/kargo.git + cd kargo + ``` + +3. **Launch VSCode**: + + > NOTE: When prompted, relaunch in devcontainer wich will supply all dependencies. + + ```sh + code . + ``` + +4. **Login & Install Dependencies**: + ```sh + pulumi login + pulumi install + ``` + +5. **Configure Pre-commit Hooks**: + ```sh + pip install pre-commit + pre-commit install + ``` + +--- + +## How to Contribute + +### Reporting Bugs + +If you've found a bug, please open an issue on GitHub. Fill out the provided template with as much detail as possible, including: +- The version of Kargo you're using +- Steps to reproduce the bug +- Any relevant logs or screenshots + +### Suggesting Enhancements + +To suggest an enhancement, please open an issue with the "enhancement" label. Provide a clear description of the improvement and why it would be beneficial for the project. + +### Submitting Changes + +1. **Create a Branch**: + ```sh + git checkout -b feature/user/my-new-feature + ``` + +2. **Make Your Changes**: + - Ensure your code adheres to the [Python Style Guide](#python-style-guide). + - Write tests to cover your changes. + +3. **Commit Your Changes**: + ```sh + git add . + git commit -m "feat: add new feature" + ``` + +4. **Push to Your Fork**: + ```sh + git push origin user/feature/my-new-feature + ``` + +5. **Open a Pull Request**: + - Navigate to the repository on GitHub and click "Compare & pull request". Fill in the template and submit. + +--- + +## Style Guides + +### Python Style Guide + +Adhere to [PEP 8](https://www.python.org/dev/peps/pep-0008/) and aim for clean, readable code. We recommend using `flake8` and `black` to maintain code quality: +```sh +pip install flake8 black +flake8 . +black . +``` + +### Commit Messages + +- Follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification. +- Example Commit Message: + ``` + feat: add support for Kubernetes 1.24 + + This commit adds compatibility with Kubernetes 1.24, ensuring all API changes are accommodated. + ``` + +### Documentation + +- Use clear and concise docstrings for functions and classes. +- Update `README.md` and other relevant documentation files as applicable. + +--- + +## Testing + +1. **Unit Tests**: Your code should be accompanied by unit tests to ensure robust coverage. +2. **Running Tests**: + ```sh + pytest + ``` + +--- + +## Continuous Integration + +We use GitHub Actions for CI/CD. All pull requests will be automatically tested. Please ensure your PR passes all checks before requesting a review. + +--- + +## Communication + +- **GitHub Issues**: For proposing features, reporting bugs, and suggesting improvements. +- **Email**: For sensitive or private communication, please contact [emcee@braincraft.io](mailto:emcee@braincraft.io). + +--- + +## Acknowledgements + +Thank you to all the contributors who make this project possible. Your time and effort are sincerely appreciated! + +--- + +## Additional Resources + +- [Pulumi Documentation](https://www.pulumi.com/docs/) +- [Kubernetes Documentation](https://kubernetes.io/docs/) +- [Contributor Covenant Code of Conduct](https://www.contributor-covenant.org/) +- [GitHub Actions Documentation](https://docs.github.com/en/actions) + +--- + +Thank you for contributing to Kargo! diff --git a/Pulumi.yaml b/Pulumi.yaml index 6b9ed04..dc40dd2 100644 --- a/Pulumi.yaml +++ b/Pulumi.yaml @@ -8,55 +8,55 @@ runtime: name: python options: virtualenv: venv -config: - pulumi:tags: - value: - pulumi:template: ccio-kargo-python - vm: - value: - enabled: false - namespace: default - instance_name: ubuntu - image_name: docker.io/containercraft/ubuntu:22.04 - node_port: 30590 - ssh_user: kc2 - ssh_password: kc2 - kubernetes: - value: - context: kargo - kubeconfig: .kube/config - cert_manager: - value: - enabled: true - version: 1.15.3 - kubevirt: - value: - enabled: true - version: 1.3.1 - hostpath_provisioner: - value: - enabled: true - version: 0.20.0 - default_storage_class: true - cdi: - value: - enabled: true - version: 1.60.2 - multus: - value: - enabled: true - cnao: - value: - enabled: false - version: 0.94.2 - prometheus: - value: - enabled: false - version: 61.3.2 - kubernetes_dashboard: - value: - enabled: false - version: 7.4.0 - cilium: - value: - enabled: false +#config: +# pulumi:tags: +# value: +# pulumi:template: ccio-kargo-python +# vm: +# value: +# enabled: false +# namespace: default +# instance_name: ubuntu +# image_name: docker.io/containercraft/ubuntu:22.04 +# node_port: 30590 +# ssh_user: kc2 +# ssh_password: kc2 +# kubernetes: +# value: +# context: kargo +# kubeconfig: .kube/config +# cert_manager: +# value: +# enabled: true +# version: 1.15.3 +# kubevirt: +# value: +# enabled: true +# version: 1.3.1 +# hostpath_provisioner: +# value: +# enabled: true +# version: 0.20.0 +# default_storage_class: true +# cdi: +# value: +# enabled: true +# version: 1.60.2 +# multus: +# value: +# enabled: true +# cnao: +# value: +# enabled: false +# version: 0.94.2 +# prometheus: +# value: +# enabled: false +# version: 61.3.2 +# kubernetes_dashboard: +# value: +# enabled: false +# version: 7.4.0 +# cilium: +# value: +# enabled: false diff --git a/docs/deploy-on-kind-in-codespaces.md b/docs/deploy-on-kind-in-codespaces.md deleted file mode 100644 index a8b7482..0000000 --- a/docs/deploy-on-kind-in-codespaces.md +++ /dev/null @@ -1,67 +0,0 @@ -# How To: deploy on Kind in Codespaces - -1. launch codespaces from Kargo repository -2. open a terminal -3. run the following commands for minimum viable deployment of Kargo IaC: - -```bash -# Login to pulumi -pulumi login - -# Create Kind K8s Cluster -make kind - -# Configure Kargo -pulumi config set kubernetes.kubeconfig $PATH -pulumi config set cilium.enabled true - -# Deploy Kargo -pulumi up -``` - ---- - -## Additional Modules - -### Cilium - -```bash -# Enable Cilium -pulumi config set cilium.enabled true - -# Set Cilium version -pulumi config set cilium.version 1.14.7 -``` - -### Cert Manager - -```bash -# Enable Cert Manager -pulumi config set cert_manager.enabled true - -# Set Cert Manager version -pulumi config set cert-manager.version 1.15.1 -``` - -### Kubevirt - -```bash -# Enable Kubevirt -pulumi config set --path kubevirt.enabled true - -# Set Kubevirt version -pulumi config set --path kubevirt.version 1.2.2 -``` - -### Multus - -```bash -# Enable Multus -pulumi config set --path multus.enabled true - -# Set Multus version -pulumi config set --path multus.version master - -# Set Multus Default Bridge Name for Network Attachment Definition -pulumi config set --path multus.default_bridge br0 -``` diff --git a/pulumi/CALL_TO_ACTION.md b/pulumi/CALL_TO_ACTION.md new file mode 100644 index 0000000..89fa502 --- /dev/null +++ b/pulumi/CALL_TO_ACTION.md @@ -0,0 +1,157 @@ +### Refactoring Enhancements to Consider + +**Modular Design**: + - Core functionalities are segregated into distinct files/modules, such as `config.py`, `deployment.py`, `resource_helpers.py`, etc. + - Each module follows a clear pattern with separate `types.py` and `deploy.py` files. + +**Configuration Management**: + - Centralize configuration management using `config.py` to handle global settings. + - Use data classes for module configurations to ensure type safety and defaults. + +**Global Metadata Handling**: + - Implementation of a singleton pattern for managing global metadata (labels and annotations). + - Functions to generate and apply global metadata. + +**Consistency and Readability**: + - The existing TODO comments highlight areas needing reorganization and refactoring. + - Some modules including `kubevirt`, `cert_manager`, `hostpath_provisioner` and others deploy differently in terms of resource creation and dependency management, look for ways to improve consistency. + +**Centralized Configuration Loading**: + - Configuration loading and merging logic vary across modules. + - There is redundancy in fetching the latest versions for modules (e.g., `kubevirt`, `cert_manager`). Look for ways to reduce version fetching redundancy. + +**Exception Handling**: + - Exception handling is partially implemented in some places, consistent and detailed error handling across all modules will improve reliability. + +**Resource Helper Centralization**: + - Several helper functions like `create_namespace`, `create_custom_resource`, etc., provide common functionality but could be standardized further. + - Handling dependencies and resource transformations could be more DRY (Don't Repeat Yourself). + +**Standardize Configuration Management**: + - Refactor configuration management to ensure consistency across all modules. + - Implement a common pattern for fetching the latest versions and configuration merging. + +**Refactor `initialize_pulumi` Function**: + - Use data classes or named tuples instead of dictionaries for initialization components. + - Centralize and streamline initialization logic to reduce redundancy. + +**Enhance Error Handling and Logging**: + - Implement structured logging and consistent error handling across all the modules. + - Ensure all relevant operations are logged, and errors are informative. + +**Simplify Function Signatures and Improve Type Safety**: + - Refactor function signatures to use data classes and named tuples. This will improve readability and maintainability. + +**Centralize Shared Logic**: + - Standardize and centralize shared logic like version fetching, resource transformation, and compliance metadata generation. + - Use utility functions from `utils.py` and refactor repetitive logic across `deploy.py` files. + +### Implementation Examples + +#### Centralize Configuration Handling + +Refactor the `load_default_versions` function and adopt it across all modules: + +```python +# centralize logic in core/config.py +def load_default_versions(config: pulumi.Config, force_refresh=False) -> dict: + ... + # reuse this function for fetching specific versions in modules + return default_versions + +# example usage in kubevirt/types.py +@staticmethod +def get_latest_version() -> str: + return load_default_versions(pulumi.Config()).get('kubevirt', 'latest') +``` + +#### Standardize Initialization Method + +Refactor `initialize_pulumi` in `deployment.py`: + +```python +from typing import NamedTuple + +class PulumiInit(NamedTuple): + config: pulumi.Config + stack_name: str + project_name: str + default_versions: Dict[str, str] + versions: Dict[str, str] + configurations: Dict[str, Dict[str, Any]] + global_depends_on: List[pulumi.Resource] + k8s_provider: k8s.Provider + git_info: Dict[str, str] + compliance_config: ComplianceConfig + global_labels: Dict[str, str] + global_annotations: Dict[str, str] + +def initialize_pulumi() -> PulumiInit: + ... + # use PulumiInit named tuple for returning components + return PulumiInit(...) +``` + +Update main entry (`__main__.py`) to use the tuple: + +```python +def main(): + try: + init = initialize_pulumi() + + # Use named tuple instead of dictionary + config = init.config + k8s_provider = init.k8s_provider + versions = init.versions + configurations = init.configurations + default_versions = init.default_versions + global_depends_on = init.global_depends_on + + ... + except Exception as e: + log.error(f"Deployment failed: {str(e)}") + raise +``` + +#### Enhance Logging and Error Handling + +Standardize logging across the modules: + +```python +import logging + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +def deploy_module(...): + try: + ... + except ValueError as ve: + log.error(f"Value error during deployment: {ve}") + raise + except Exception as e: + log.error(f"General error during deployment: {e}") + raise +``` + +#### Improve Reusability of Helper Functions + +Refactor `resource_helpers.py` to adopt utility functions for setting metadata and transformations: + +```python +def universal_resource_transform(resource_args: pulumi.ResourceTransformationArgs): + props = resource_args.props + set_resource_metadata(props.get('metadata', {}), get_global_labels(), get_global_annotations()) + return pulumi.ResourceTransformationResult(props, resource_args.opts) +``` + +### Adopt universal transforms in more places: + +```python +def create_custom_resource(..., transformations: Optional[List] = None, ...): + ... + opts = pulumi.ResourceOptions.merge( + ... # include universal transformations + transformations=[universal_resource_transform] + (transformations or []) + ) +``` + diff --git a/pulumi/README.md b/pulumi/README.md new file mode 100644 index 0000000..a9daf92 --- /dev/null +++ b/pulumi/README.md @@ -0,0 +1,482 @@ +# Kargo KubeVirt Kubernetes PaaS - Pulumi Python Infrastructure as Code (IaC) + +Welcome to the **Kargo KubeVirt Kubernetes PaaS Pulumi Infrastructure as Code (IaC) project**! This guide is designed to help both newcomers to DevOps and experienced module developers navigate, contribute to, and get the most out of the Kargo platform. Whether you're setting up your environment for the first time or looking to develop new modules, this guide provides comprehensive instructions and best practices. + +--- + +## Table of Contents + +- [Introduction](#introduction) +- [Developer & Architecture Ethos](#developer--architecture-ethos) + - [Prime Directive](#prime-directive) + - [Developer Directives](#developer-directives) +- [Getting Started](#getting-started) + - [Prerequisites](#prerequisites) + - [Setting Up Your Environment](#setting-up-your-environment) +- [Developer Imperatives](#developer-imperatives) + - [Detailed Breakdown](#detailed-breakdown) +- [Developing New Modules](#developing-new-modules) + - [Directory Structure](#directory-structure) + - [Creating a New Module](#creating-a-new-module) +- [Common Utilities](#common-utilities) +- [Version Control](#version-control) +- [Contributing to the Project](#contributing-to-the-project) +- [Additional Resources](#additional-resources) +- [Conclusion](#conclusion) + +--- + +## Introduction + +The Kargo KubeVirt Kubernetes PaaS project leverages Pulumi and Python to manage your Kubernetes infrastructure as code. Our goal is to provide an enjoyable developer experience (DX) and user experience (UX) by simplifying the deployment and management of Kubernetes resources, including KubeVirt virtual machines and other essential components. + +This guide aims to make core concepts accessible to everyone, regardless of their experience level in DevOps. + +--- + +## Developer & Architecture Ethos + +### Prime Directive + +> **"Features are nice. Quality is paramount."** + +Quality is not just about the product or code; it's about creating an enjoyable developer and user experience. At ContainerCraft, we believe that the success of open-source projects depends on the happiness and satisfaction of the community developers and users. + +### Developer Directives + +1. **Improve Code Maintainability**: Write code that is structured, organized, and easy to understand. Prioritize readability, reusability, and extensibility. + +2. **Optimize Performance**: Ensure that the code performs efficiently and respects configurations. Avoid executing inactive or unnecessary code. + +3. **Establish Standard Practices**: Develop consistent approaches to configuration handling, module deployment, and code organization to guide future development. + +--- + +## Getting Started + +### Prerequisites + +Before you begin, make sure you have the following installed: + +- **Pulumi CLI**: [Install Pulumi](https://www.pulumi.com/docs/get-started/) +- **Python 3.6+**: Ensure you have Python installed on your system. +- **Python Dependencies**: Install required Python packages using `pip install -r requirements.txt` +- **Kubernetes Cluster**: Access to a Kubernetes cluster with `kubectl` configured. +- **Helm CLI**: [Install Helm](https://helm.sh/docs/intro/install/) if you plan to work with Helm charts. + +### Setting Up Your Environment + +Follow these steps to set up your environment: + +1. **Clone the Repository** + + ```bash + git clone https://github.com/ContainerCraft/Kargo.git + cd Kargo/pulumi + ``` + +2. **Install Python Dependencies** + + ```bash + pip install -r requirements.txt + ``` + +3. **Initialize Pulumi Stack** + + ```bash + pulumi stack init dev + ``` + +4. **Configure Pulumi** + + Set your Kubernetes context and any necessary configuration options. + + ```bash + pulumi config set --path kubernetes.kubeconfig + # Set other configuration options as needed + ``` + +5. **Deploy the Stack** + + Preview and deploy your changes. + + ```bash + pulumi up + ``` + + Follow the prompts to confirm the deployment. + +--- + +## Developer Imperatives + +### Detailed Breakdown + +1. **User Experience (UX)** + + - **Clear Error Messages**: Provide meaningful error messages to help users resolve issues. + - **Uniform Logging**: Use consistent logging practices to make debugging easier. + + ```python + pulumi.log.info(f"Deploying module: {module_name}") + ``` + +2. **Developer Experience (DX)** + + - **Documentation**: Include comprehensive docstrings and comments in your code. + + ```python + def deploy_module(...): + """ + Deploys a module based on configuration. + + Args: + module_name (str): Name of the module. + config (pulumi.Config): Pulumi configuration object. + ... + + Returns: + None + """ + ``` + + - **Examples**: Provide example configurations and usage in the documentation to help others understand how to use your code. + +3. **Configurable Modules** + + - **Pulumi Stack Configuration**: Use the Pulumi config object to allow users to customize module configurations. + + ```python + module_config = config.get_object("module_name") or {} + ``` + +4. **Module Data Classes** + + - **Typed Data Classes**: Use `dataclass` to encapsulate configurations clearly. + + ```python + from dataclasses import dataclass + + @dataclass + class KubeVirtConfig: + namespace: str = "default" + ``` + +5. **Sane Defaults in Data Classes** + + - **Sensible Defaults**: Set reasonable default values to minimize the need for user configuration. + + ```python + @dataclass + class CertManagerConfig: + namespace: str = "cert-manager" + install_crds: bool = True + ``` + +6. **User Configuration Handling** + + - **Merge Configurations**: Combine user-provided configurations with defaults to ensure all necessary parameters are set. + + ```python + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'CertManagerConfig': + default_config = CertManagerConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + setattr(default_config, key, value) + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in cert_manager config.") + return default_config + ``` + +7. **Simple Function Signatures** + + - **Reduce Parameters**: Keep function signatures minimal by encapsulating configurations within data classes. + + ```python + def deploy_module(config_module: ModuleConfig, ...) + ``` + +8. **Type Annotations** + + - **Enhance Readability**: Use type annotations to clarify expected parameter types and return values. + + ```python + def deploy_module(module_name: str, config: pulumi.Config) -> None: + ``` + +9. **Safe Function Signatures** + + - **Type Safety**: Use consistent type checks and raise meaningful errors when types don't match expectations. + + ```python + if not isinstance(module_name, str): + raise TypeError("module_name must be a string") + ``` + +10. **Streamlined Entrypoint** + + - **Encapsulate Logic**: Keep the top-level code minimal and encapsulate logic within functions. + + ```python + if __name__ == "__main__": + main() + ``` + +11. **Reuse and Deduplicate Code** + + - **Central Utilities**: Place reusable code in the `core` module to maintain consistency and reduce duplication. + + ```python + from core.utils import sanitize_label_value, extract_repo_name + ``` + +12. **Version Control Dependencies** + + - **Manage Versions**: Control component versions within configuration files to maintain consistency across deployments. + + ```python + default_versions = load_default_versions(config) + ``` + +13. **Transparency** + + - **Informative Outputs**: Export configuration and version information for visibility and auditing. + + ```python + pulumi.export("versions", versions) + ``` + +14. **Conditional Execution** + + - **Avoid Unnecessary Execution**: Only load and execute modules that are enabled in the configuration. + + ```python + if module_enabled: + deploy_func(...) + ``` + +15. **Remove Deprecated Code** + + - **Maintain a Clean Codebase**: Remove obsolete features and update code to align with current best practices. + +--- + +## Developing New Modules + +### Directory Structure + +Maintain a consistent directory structure for new modules: + +``` +kargo/ + pulumi/ + __main__.py + requirements.txt + core/ + __init__.py + utils.py + ... + modules/ + / + __init__.py + deploy.py + types.py + README.md + ... +``` + +### Creating a New Module + +1. **Define Configuration** + + Create a `types.py` file in your module directory to define the configuration data class: + + ```python + from dataclasses import dataclass, field + from typing import Optional, Dict, Any + + @dataclass + class NewModuleConfig: + version: Optional[str] = None + namespace: str = "default" + labels: Dict[str, str] = field(default_factory=dict) + annotations: Dict[str, Any] = field(default_factory=dict) + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'NewModuleConfig': + default_config = NewModuleConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + setattr(default_config, key, value) + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in new_module config.") + return default_config + ``` + +2. **Implement Deployment Logic** + + Define the deployment logic in `deploy.py`: + + ```python + import pulumi + import pulumi_kubernetes as k8s + from typing import List, Dict, Any, Tuple, Optional + + from core.metadata import get_global_labels, get_global_annotations + from core.resource_helpers import create_namespace + from .types import NewModuleConfig + + def deploy_new_module( + config_new_module: NewModuleConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[Optional[str], Optional[pulumi.Resource]]: + # Create Namespace + namespace_resource = create_namespace( + name=config_new_module.namespace, + labels=config_new_module.labels, + annotations=config_new_module.annotations, + k8s_provider=k8s_provider, + depends_on=global_depends_on, + ) + + # Implement specific resource creation logic + # ... + + return config_new_module.version, namespace_resource + ``` + +3. **Update `__main__.py`** + + Include your module in the main deployment script: + + ```python + from typing import List, Dict, Any + import pulumi + from pulumi_kubernetes import Provider + + from core.deployment import initialize_pulumi, deploy_module + from core.config import export_results + + def main(): + try: + init = initialize_pulumi() + + config = init["config"] + k8s_provider = init["k8s_provider"] + versions = init["versions"] + configurations = init["configurations"] + default_versions = init["default_versions"] + global_depends_on = init["global_depends_on"] + + modules_to_deploy = ["cert_manager", "kubevirt", "new_module"] # Add your module here + + deploy_modules( + modules=modules_to_deploy, + config=config, + default_versions=default_versions, + global_depends_on=global_depends_on, + k8s_provider=k8s_provider, + versions=versions, + configurations=configurations, + ) + + compliance_config = init.get("compliance_config", {}) + export_results(versions, configurations, compliance_config) + + except Exception as e: + pulumi.log.error(f"Deployment failed: {str(e)}") + raise + + if __name__ == "__main__": + main() + ``` + +4. **Document Your Module** + + Create a `README.md` file in your module directory to document its purpose, configuration options, and usage instructions. + + ```markdown + # New Module + + Description of your module. + + ## Configuration + + - **version** *(string)*: The version of the module to deploy. + - **namespace** *(string)*: The Kubernetes namespace where the module will be deployed. + - **labels** *(dict)*: Custom labels to apply to resources. + - **annotations** *(dict)*: Custom annotations to apply to resources. + + ## Usage + + Example of how to configure and deploy the module. + + ## Additional Information + + Any additional details or resources. + ``` + +--- + +## Common Utilities + +Refer to `core/utils.py` for common helper functions, such as applying global labels and annotations to resources. + +```python +import re +import pulumi +import pulumi_kubernetes as k8s +from typing import Dict, Any + +def set_resource_metadata(metadata: Any, global_labels: Dict[str, str], global_annotations: Dict[str, str]): + if isinstance(metadata, dict): + metadata.setdefault('labels', {}).update(global_labels) + metadata.setdefault('annotations', {}).update(global_annotations) + elif isinstance(metadata, k8s.meta.v1.ObjectMetaArgs): + metadata.labels = {**metadata.labels or {}, **global_labels} + metadata.annotations = {**metadata.annotations or {}, **global_annotations} + +def sanitize_label_value(value: str) -> str: + value = value.lower() + sanitized = re.sub(r'[^a-z0-9_.-]', '-', value) + sanitized = re.sub(r'^[^a-z0-9]+', '', sanitized) + sanitized = re.sub(r'[^a-z0-9]+$', '', sanitized) + return sanitized[:63] +``` + +--- + +## Version Control + +Manage module versions and dependencies within configuration files, such as `default_versions.json`, to ensure consistency across deployments. + +```json +{ + "cert_manager": "1.15.3", + "kubevirt": "1.3.1", + "new_module": "0.1.0" +} +``` + +--- + +## Contributing to the Project + +We welcome contributions from the community! Here's how you can help: + +- **Report Issues**: If you encounter any bugs or have feature requests, please open an issue on GitHub. + +- **Submit Pull Requests**: If you'd like to contribute code, fork the repository and submit a pull request. + +- **Improve Documentation**: Help us enhance this guide and other documentation to make it more accessible. + +--- + +## Additional Resources + +- **Kargo Project Repository**: [ContainerCraft Kargo on GitHub](https://github.com/ContainerCraft/Kargo) +- **Pulumi Documentation**: [Pulumi Official Docs](https://www.pulumi.com/docs/) +- **Kubernetes Documentation**: [Kubernetes Official Docs](https://kubernetes.io/docs/home/) +- **KubeVirt Documentation**: [KubeVirt Official Docs](https://kubevirt.io/docs/) diff --git a/pulumi/__main__.py b/pulumi/__main__.py index 1866716..20f1746 100644 --- a/pulumi/__main__.py +++ b/pulumi/__main__.py @@ -1,529 +1,62 @@ -import os -import requests -import pulumi -import pulumi_kubernetes as k8s -from pulumi_kubernetes import Provider - -from src.lib.kubernetes_api_endpoint import KubernetesApiEndpointIp -from src.cilium.deploy import deploy_cilium -from src.cert_manager.deploy import deploy_cert_manager -from src.kubevirt.deploy import deploy_kubevirt -from src.containerized_data_importer.deploy import deploy_cdi -from src.cluster_network_addons.deploy import deploy_cnao -from src.multus.deploy import deploy_multus -from src.hostpath_provisioner.deploy import deploy as deploy_hostpath_provisioner -from src.openunison.deploy import deploy_openunison -from src.prometheus.deploy import deploy_prometheus -from src.kubernetes_dashboard.deploy import deploy_kubernetes_dashboard -from src.kv_manager.deploy import deploy_ui_for_kubevirt -from src.ceph.deploy import deploy_rook_operator -from src.vm.ubuntu import deploy_ubuntu_vm -from src.vm.talos import deploy_talos_cluster - -################################################################################## -# Load the Pulumi Config -config = pulumi.Config() - -# Get pulumi stack name -stack_name = pulumi.get_stack() - -# Get the pulumi project name -project_name = pulumi.get_project() - -################################################################################## -# Get the Kubernetes configuration -kubernetes_config = config.get_object("kubernetes") or {} - -# Get Kubeconfig from Pulumi ESC Config -kubeconfig = kubernetes_config.get("kubeconfig") - -# Require Kubernetes context set explicitly -kubernetes_context = kubernetes_config.get("context") - -# Get the Kubernetes distribution (supports: kind, talos) -kubernetes_distribution = kubernetes_config.get("distribution") or "talos" - -# Create a Kubernetes provider instance -k8s_provider = Provider( - "k8sProvider", - kubeconfig=kubeconfig, - context=kubernetes_context -) - -versions = {} - -################################################################################## -## Enable/Disable Kargo Kubevirt PaaS Infrastructure Modules -################################################################################## - -# Utility function to handle config with default "enabled" flag -def get_module_config(module_name): - module_config = config.get_object(module_name) or {"enabled": "false"} - module_enabled = str(module_config.get('enabled')).lower() == "true" - return module_config, module_enabled - -# Get configurations and enabled flags -config_cilium, cilium_enabled = get_module_config('cilium') -config_cert_manager, cert_manager_enabled = get_module_config('cert_manager') -config_kubevirt, kubevirt_enabled = get_module_config('kubevirt') -config_cdi, cdi_enabled = get_module_config('cdi') -config_multus, multus_enabled = get_module_config('multus') -config_prometheus, prometheus_enabled = get_module_config('prometheus') -config_openunison, openunison_enabled = get_module_config('openunison') -config_hostpath_provisioner, hostpath_provisioner_enabled = get_module_config('hostpath_provisioner') -config_cnao, cnao_enabled = get_module_config('cnao') -config_kubernetes_dashboard, kubernetes_dashboard_enabled = get_module_config('kubernetes_dashboard') -config_kubevirt_manager, kubevirt_manager_enabled = get_module_config('kubevirt_manager') -config_vm, vm_enabled = get_module_config('vm') -config_talos, talos_cluster_enabled = get_module_config('talos') - -################################################################################## -## Core Kargo Kubevirt PaaS Infrastructure -################################################################################## - -depends = [] - -def safe_append(depends, resource): - if resource: - depends.append(resource) - -################################################################################## -# Fetch the Cilium Version -# Deploy Cilium -def run_cilium(): - if cilium_enabled: - namespace = "kube-system" - l2announcements = config_cilium.get('l2announcements') or "192.168.1.70/28" - l2_bridge_name = config_cilium.get('l2_bridge_name') or "br0" - cilium_version = config_cilium.get('version') # or "1.14.7" - - cilium = deploy_cilium( - "cilium-cni", - k8s_provider, - kubernetes_distribution, - project_name, - kubernetes_endpoint_service_address, - namespace, - cilium_version, - l2_bridge_name, - l2announcements, - ) - cilium_version = cilium[0] - cilium_release = cilium[1] - - safe_append(depends, cilium_release) - - versions["cilium"] = {"enabled": cilium_enabled, "version": cilium_version} - - return cilium_version, cilium_release - - return None, None - -cilium_version, cilium_release = run_cilium() - -################################################################################## -# Fetch the Cert Manager Version -# Deploy Cert Manager -def run_cert_manager(): - if cert_manager_enabled: - ns_name = "cert-manager" - cert_manager_version = config_cert_manager.get('version') or None - - cert_manager = deploy_cert_manager( - ns_name, - cert_manager_version, - kubernetes_distribution, - depends, - k8s_provider - ) - - versions["cert_manager"] = {"enabled": cert_manager_enabled, "version": cert_manager[0]} - cert_manager_release = cert_manager[1] - cert_manager_selfsigned_cert = cert_manager[2] - - pulumi.export("cert_manager_selfsigned_cert", cert_manager_selfsigned_cert) - - safe_append(depends, cert_manager_release) - - return cert_manager, cert_manager_release, cert_manager_selfsigned_cert - return None, None, None - -cert_manager, cert_manager_release, cert_manager_selfsigned_cert = run_cert_manager() - -################################################################################## -# Deploy KubeVirt -def run_kubevirt(): - if kubevirt_enabled: - ns_name = "kubevirt" - kubevirt_version = config_kubevirt.get('version') or None - kubevirt_emulation = config_kubevirt.get('emulation') or False - - custom_depends = [] - safe_append(custom_depends, cilium_release) - safe_append(custom_depends, cert_manager_release) - - kubevirt = deploy_kubevirt( - custom_depends, - ns_name, - kubevirt_version, - kubevirt_emulation, - k8s_provider, - kubernetes_distribution, - ) - - versions["kubevirt"] = {"enabled": kubevirt_enabled, "version": kubevirt[0]} - kubevirt_operator = kubevirt[1] - - safe_append(depends, kubevirt_operator) - - return kubevirt, kubevirt_operator - return None, None - -kubevirt, kubevirt_operator = run_kubevirt() - -################################################################################## -# Deploy Multus -def run_multus(): - if multus_enabled: - ns_name = "multus" - multus_version = config_multus.get('version') or "master" - bridge_name = config_multus.get('bridge_name') or "br0" - - custom_depends = [] - - if cilium_enabled: - safe_append(custom_depends, cilium_release) - if cert_manager_enabled: - safe_append(custom_depends, cert_manager_release) - - multus = deploy_multus( - custom_depends, - multus_version, - bridge_name, - k8s_provider - ) - - versions["multus"] = {"enabled": multus_enabled, "version": multus[0]} - multus_release = multus[1] - - safe_append(depends, multus_release) - - return multus, multus_release - return None, None - -multus, multus_release = run_multus() - -################################################################################## -# Deploy Cluster Network Addons Operator (CNAO) -def run_cnao(): - if cnao_enabled: - ns_name = "cluster-network-addons" - cnao_version = config_cnao.get('version') or None - - custom_depends = [] - - if cilium_enabled: - safe_append(custom_depends, cilium_release) - - if cert_manager_enabled: - safe_append(custom_depends, cert_manager_release) - - cnao = deploy_cnao( - custom_depends, - cnao_version, - k8s_provider - ) - - versions["cnao"] = {"enabled": cnao_enabled, "version": cnao[0]} - cnao_release = cnao[1] - - safe_append(depends, cnao_release) - - return cnao, cnao_release - return None, None - -cnao, cnao_release = run_cnao() - -################################################################################## -# Deploy Hostpath Provisioner -def run_hostpath_provisioner(): - if hostpath_provisioner_enabled: - if not cert_manager_enabled: - msg = "HPP requires Cert Manager. Please enable Cert Manager and try again." - pulumi.log.error(msg) - return None, None - - hostpath_default_path = config_hostpath_provisioner.get('default_path') or "/var/mnt/hostpath-provisioner" - hostpath_default_storage_class = config_hostpath_provisioner.get('default_storage_class') or False - ns_name = "hostpath-provisioner" - hostpath_provisioner_version = config_hostpath_provisioner.get('version') or None - - custom_depends = [] - - if cilium_enabled: - safe_append(custom_depends, cilium_release) - if cert_manager_enabled: - safe_append(custom_depends, cert_manager_release) - if kubevirt_enabled: - safe_append(custom_depends, kubevirt_operator) - - hostpath_provisioner = deploy_hostpath_provisioner( - custom_depends, - hostpath_provisioner_version, - ns_name, - hostpath_default_path, - hostpath_default_storage_class, - k8s_provider, - ) - - versions["hostpath_provisioner"] = {"enabled": hostpath_provisioner_enabled, "version": hostpath_provisioner[0]} - hostpath_provisioner_release = hostpath_provisioner[1] - - safe_append(depends, hostpath_provisioner_release) - - return hostpath_provisioner, hostpath_provisioner_release - return None, None - -hostpath_provisioner, hostpath_provisioner_release = run_hostpath_provisioner() - -################################################################################## -# Deploy Containerized Data Importer (CDI) -def run_cdi(): - if cdi_enabled: - ns_name = "cdi" - cdi_version = config_cdi.get('version') or None - - cdi = deploy_cdi( - depends, - cdi_version, - k8s_provider - ) - - versions["cdi"] = {"enabled": cdi_enabled, "version": cdi[0]} - cdi_release = cdi[1] - - safe_append(depends, cdi_release) - - return cdi, cdi_release - return None, None - -cdi, cdi_release = run_cdi() - -################################################################################## -# Deploy Prometheus -def run_prometheus(): - if prometheus_enabled: - ns_name = "monitoring" - prometheus_version = config_prometheus.get('version') or None - - prometheus = deploy_prometheus( - depends, - ns_name, - prometheus_version, - k8s_provider, - openunison_enabled - ) - - versions["prometheus"] = {"enabled": prometheus_enabled, "version": prometheus[0]} - prometheus_release = prometheus[1] - - safe_append(depends, prometheus_release) - - return prometheus, prometheus_release - return None, None - -prometheus, prometheus_release = run_prometheus() - -################################################################################## -# Deploy Kubernetes Dashboard -def run_kubernetes_dashboard(): - if kubernetes_dashboard_enabled: - ns_name = "kubernetes-dashboard" - kubernetes_dashboard_version = config_kubernetes_dashboard.get('version') or None - - if cilium_enabled: - safe_append(depends, cilium_release) - - kubernetes_dashboard = deploy_kubernetes_dashboard( - depends, - ns_name, - kubernetes_dashboard_version, - k8s_provider - ) - - versions["kubernetes_dashboard"] = {"enabled": kubernetes_dashboard_enabled, "version": kubernetes_dashboard[0]} - kubernetes_dashboard_release = kubernetes_dashboard[1] - - safe_append(depends, kubernetes_dashboard_release) - - return kubernetes_dashboard, kubernetes_dashboard_release - return None, None - -kubernetes_dashboard, kubernetes_dashboard_release = run_kubernetes_dashboard() - -################################################################################## -def run_openunison(): - if openunison_enabled: - ns_name = "openunison" - openunison_version = config_openunison.get('version') or None - domain_suffix = config_openunison.get('dns_suffix') or "kargo.arpa" - cluster_issuer = config_openunison.get('cluster_issuer') or "cluster-selfsigned-issuer-ca" - - config_openunison_github = config_openunison.get_object('github') or {} - openunison_github_teams = config_openunison_github.get('teams') - openunison_github_client_id = config_openunison_github.get('client_id') - openunison_github_client_secret = config_openunison_github.get('client_secret') - - enabled = {} - - if kubevirt_enabled: - enabled["kubevirt"] = {"enabled": kubevirt_enabled} - - if prometheus_enabled: - enabled["prometheus"] = {"enabled": prometheus_enabled} - - pulumi.export("enabled", enabled) - - openunison = deploy_openunison( - depends, - ns_name, - openunison_version, - k8s_provider, - domain_suffix, - cluster_issuer, - cert_manager_selfsigned_cert, - kubernetes_dashboard_release, - openunison_github_client_id, - openunison_github_client_secret, - openunison_github_teams, - enabled, - ) - - versions["openunison"] = {"enabled": openunison_enabled, "version": openunison[0]} - openunison_release = openunison[1] - - safe_append(depends, openunison_release) - - return openunison, openunison_release - return None, None - -openunison, openunison_release = run_openunison() - -################################################################################## -# Deploy Rook Ceph -def run_rook_ceph(): - deploy_ceph = config.get_bool('ceph.enabled') or False - if deploy_ceph: - rook_operator = deploy_rook_operator( - "kargo", - k8s_provider, - kubernetes_distribution, - "kargo", - "rook-ceph" - ) - return rook_operator - return None - -rook_operator = run_rook_ceph() - -################################################################################## -# Deploy Kubevirt Manager -def run_kubevirt_manager(): - kubevirt_manager_enabled = config.get_bool('kubevirt_manager.enabled') or False - if kubevirt_manager_enabled: - kubevirt_manager = deploy_ui_for_kubevirt( - "kargo", - k8s_provider, - kubernetes_distribution, - "kargo", - "kubevirt_manager" - ) - pulumi.export('kubevirt_manager', kubevirt_manager) - return kubevirt_manager - return None - -kubevirt_manager = run_kubevirt_manager() - -################################################################################## -# Deploy Ubuntu VM -def run_ubuntu_vm(): - if vm_enabled: - # Get the SSH Public Key string from Pulumi Config if it exists - ssh_pub_key = config.get("ssh_pub_key") - if not ssh_pub_key: - # Get the SSH public key from the local filesystem - with open(f"{os.environ['HOME']}/.ssh/id_rsa.pub", "r") as f: - ssh_pub_key = f.read().strip() - - # Define the default values - default_vm_config = { - "namespace": "default", - "instance_name": "ubuntu", - "image_name": "docker.io/containercraft/ubuntu:22.04", - "node_port": 30590, - "ssh_user": "kc2", - "ssh_password": "kc2", - "ssh_pub_key": ssh_pub_key - } - - # Merge the default values with the existing config_vm values - config_vm_merged = {**default_vm_config, **{k: v for k, v in config_vm.items() if v is not None}} - - # Pass the merged configuration to the deploy_ubuntu_vm function - ubuntu_vm, ubuntu_ssh_service = deploy_ubuntu_vm( - config_vm_merged, - k8s_provider, - depends - ) - - versions["ubuntu_vm"] = { - "enabled": vm_enabled, - "name": ubuntu_vm.metadata["name"] - } - - safe_append(depends, ubuntu_ssh_service) - - return ubuntu_vm, ubuntu_ssh_service - else: - return None, None - -ubuntu_vm, ubuntu_ssh_service = run_ubuntu_vm() - -################################################################################## -# Deploy Kargo-on-Kargo Development Cluster (Controlplane + Worker VirtualMachinePools) -def run_talos_cluster(): - if talos_cluster_enabled: - # Append the resources to the `depends` list - custom_depends = [] - - # depends on cert manager, multus - safe_append(custom_depends, cert_manager_release) - safe_append(custom_depends, multus_release) - if cdi_enabled: - safe_append(custom_depends, cdi_release) - - # Deploy the Talos cluster (controlplane and workers) - controlplane_vm_pool, worker_vm_pool = deploy_talos_cluster( - config_talos=config_talos, - k8s_provider=k8s_provider, - depends_on=custom_depends, - parent=kubevirt_operator, - ) - - # Export the Talos configuration and versions - versions["talos_cluster"] = { - "enabled": talos_cluster_enabled, - "running": config_talos.get("running", True), - "controlplane": config_talos.get("controlplane", {}), - "workers": config_talos.get("workers", {}) - } - - return controlplane_vm_pool, worker_vm_pool - else: - return None, None - -# Run the Talos cluster deployment -talos_controlplane_vm_pool, talos_worker_vm_pool = run_talos_cluster() - -# Export the component versions -pulumi.export("versions", versions) +# pulumi/__main__.py + +from pulumi import log +from core.config import export_results +from core.deployment import initialize_pulumi, deploy_modules + +def main(): + try: + # Initialize Pulumi + init = initialize_pulumi() + + # Extract the components from the initialization dictionary. + # TODO: + # - Refactor this to use dataclasses. + # - Relocate the dataclasses to a shared location. + # - Relocate module specific initialization logic into the pulumi/core/deployment.py module. + config = init["config"] + k8s_provider = init["k8s_provider"] + versions = init["versions"] + configurations = init["configurations"] + default_versions = init["default_versions"] + global_depends_on = init["global_depends_on"] + compliance_config = init.get("compliance_config", {}) + + # Map of modules to deploy with default boolean value. + # TODO: + # - Refactor this as a map of module names and default enabled booleans. + # - Map of module:enabled pairs will depricate the DEFAULT_ENABLED_CONFIG list in config.py. + modules_to_deploy = [ + "cert_manager", + "kubevirt", + "multus", + "hostpath_provisioner", + "containerized_data_importer", + "prometheus" + ] + + # Deploy modules + # TODO: + # - Simplify deploy_modules signature after relocating the module:enabled map and init dictionary location. + deploy_modules( + modules_to_deploy, + config, + default_versions, + global_depends_on, + k8s_provider, + versions, + configurations, + ) + + # Export stack outputs. + export_results(versions, configurations, compliance_config) + + except Exception as e: + log.error(f"Deployment failed: {str(e)}") + raise + +# Entry point for the Pulumi program. +# TODO: +# - Re-evaluate structure and best location for export_results function call. +if __name__ == "__main__": + main() diff --git a/pulumi/core/README.md b/pulumi/core/README.md new file mode 100644 index 0000000..2af0724 --- /dev/null +++ b/pulumi/core/README.md @@ -0,0 +1,257 @@ +# Core Module Developer Guide + +Welcome to the **Core Module** of the Kargo KubeVirt Kubernetes PaaS project! This guide is designed to help both newcomers to DevOps and experienced module developers navigate and contribute to the core functionalities of the Kargo platform. Whether you're looking to understand the basics or dive deep into the module development, this guide has got you covered. + +--- + +## Table of Contents + +- [Introduction](#introduction) +- [Getting Started](#getting-started) +- [Core Module Overview](#core-module-overview) + - [Module Structure](#module-structure) + - [Key Components](#key-components) +- [Detailed Explanation of Core Files](#detailed-explanation-of-core-files) + - [config.py](#configpy) + - [deployment.py](#deploymentpy) + - [metadata.py](#metadatapy) + - [resource_helpers.py](#resource_helperspy) + - [types.py](#typespy) + - [utils.py](#utilspy) +- [Best Practices](#best-practices) +- [Troubleshooting and FAQs](#troubleshooting-and-faqs) +- [Contributing to the Core Module](#contributing-to-the-core-module) +- [Additional Resources](#additional-resources) + +--- + +## Introduction + +The Core Module is the heart of the Kargo KubeVirt Kubernetes PaaS project. It provides essential functionalities that facilitate the development, deployment, and management of modules within the Kargo ecosystem. This guide aims to make core concepts accessible to everyone, regardless of their experience level in DevOps. + +--- + +## Getting Started + +If you're new to Kargo or DevOps, start here! + +- **Prerequisites**: + - Basic understanding of Python and Kubernetes. + - [Pulumi CLI](https://www.pulumi.com/docs/get-started/) installed. + - Access to a Kubernetes cluster (minikube, kind, or cloud-based). + +- **Setup Steps**: + 1. **Clone the Repository**: + ```bash + git clone https://github.com/ContainerCraft/Kargo.git + cd Kargo/pulumi + ``` + 2. **Install Dependencies**: + ```bash + pip install -r requirements.txt + ``` + 3. **Configure Pulumi**: + ```bash + pulumi login + pulumi stack init dev + ``` + +--- + +## Core Module Overview + +### Module Structure + +The Core Module is organized as follows: + +``` +pulumi/core/ +├── __init__.py +├── README.md +├── config.py +├── deployment.py +├── metadata.py +├── resource_helpers.py +├── types.py +└── utils.py +``` + +### Key Components + +- **Configuration Management**: Handles loading and merging of user configurations. +- **Deployment Orchestration**: Manages the deployment of modules and resources. +- **Metadata Management**: Generates and applies global labels and annotations. +- **Utility Functions**: Provides helper functions for common tasks. +- **Type Definitions**: Contains shared data structures used across modules. + +--- + +## Detailed Explanation of Core Files + +### config.py + +**Purpose**: Manages configuration settings for modules, including loading defaults and exporting deployment results. + +**Key Functions**: + +- `get_module_config(module_name, config, default_versions)`: Retrieves and merges the configuration for a specific module. +- `load_default_versions(config, force_refresh=False)`: Loads default module versions, prioritizing user-specified sources. +- `export_results(versions, configurations, compliance)`: Exports deployment outputs for reporting and auditing. + +**Usage Example**: + +```python +from core.config import get_module_config + +module_config, is_enabled = get_module_config('cert_manager', config, default_versions) +if is_enabled: + # Proceed with deployment +``` + +--- + +### deployment.py + +**Purpose**: Orchestrates the deployment of modules, initializing providers and handling dependencies. + +**Key Functions**: + +- `initialize_pulumi()`: Sets up Pulumi configurations and Kubernetes provider. +- `deploy_module(module_name, config, ...)`: Deploys a specified module, handling its configuration and dependencies. + +**Usage Example**: + +```python +from core.deployment import initialize_pulumi, deploy_module + +init = initialize_pulumi() +deploy_module('kubevirt', init['config'], ...) +``` + +--- + +### metadata.py + +**Purpose**: Manages global metadata, such as labels and annotations, ensuring consistency across resources. + +**Key Components**: + +- **Singleton Pattern**: Ensures a single source of truth for metadata. +- **Metadata Functions**: + - `set_global_labels(labels)` + - `set_global_annotations(annotations)` + - `get_global_labels()` + - `get_global_annotations()` + +**Usage Example**: + +```python +from core.metadata import set_global_labels + +set_global_labels({'app': 'kargo', 'env': 'production'}) +``` + +--- + +### resource_helpers.py + +**Purpose**: Provides helper functions for creating Kubernetes resources with consistent metadata. + +**Key Functions**: + +- `create_namespace(name, labels, annotations, ...)` +- `create_custom_resource(name, args, ...)` +- `create_helm_release(name, args, ...)` + +**Usage Example**: + +```python +from core.resource_helpers import create_namespace + +namespace = create_namespace('kargo-system', labels={'app': 'kargo'}) +``` + +--- + +### types.py + +**Purpose**: Defines shared data structures and configurations used across modules. + +**Key Data Classes**: + +- `NamespaceConfig` +- `FismaConfig` +- `NistConfig` +- `ScipConfig` +- `ComplianceConfig` + +**Usage Example**: + +```python +from core.types import ComplianceConfig + +compliance_settings = ComplianceConfig(fisma=FismaConfig(enabled=True)) +``` + +--- + +### utils.py + +**Purpose**: Contains utility functions for common tasks such as version checking and resource transformations. + +**Key Functions**: + +- `set_resource_metadata(metadata, global_labels, global_annotations)` +- `get_latest_helm_chart_version(url, chart_name)` +- `is_stable_version(version_str)` + +**Usage Example**: + +```python +from core.utils import get_latest_helm_chart_version + +latest_version = get_latest_helm_chart_version('https://charts.jetstack.io', 'cert-manager') +``` + +--- + +## Best Practices + +- **Consistency**: Use the core functions and types to ensure consistency across modules. +- **Modularity**: Keep module-specific logic separate from core functionalities. +- **Documentation**: Document your code and configurations to aid future developers. +- **Error Handling**: Use appropriate error handling and logging for better debugging. + +--- + +## Troubleshooting and FAQs + +**Q1: I get a `ConnectionError` when deploying modules. What should I do?** + +- **A**: Ensure your Kubernetes context is correctly configured and that you have network access to the cluster. + +**Q2: How do I add a new module?** + +- **A**: Create a new directory under `pulumi/modules/`, define your `deploy.py` and `types.py`, and update the main deployment script. + +**Q3: The deployment hangs during resource creation.** + +- **A**: Check for resource conflicts or namespace issues. Use `kubectl` to inspect the current state. + +--- + +## Contributing to the Core Module + +We welcome contributions from the community! + +- **Reporting Issues**: Use the GitHub issues page to report bugs or request features. +- **Submitting Pull Requests**: Follow the project's coding standards and ensure all tests pass. +- **Code Reviews**: Participate in reviews to maintain high code quality. + +--- + +## Additional Resources + +- **Kargo Project Documentation**: [Kargo GitHub Repository](https://github.com/ContainerCraft/Kargo) +- **Pulumi Documentation**: [Pulumi Official Docs](https://www.pulumi.com/docs/) +- **Kubernetes API Reference**: [Kubernetes API](https://kubernetes.io/docs/reference/generated/kubernetes-api/) diff --git a/pulumi/src/ceph/__init__.py b/pulumi/core/__init__.py similarity index 100% rename from pulumi/src/ceph/__init__.py rename to pulumi/core/__init__.py diff --git a/pulumi/core/config.py b/pulumi/core/config.py new file mode 100644 index 0000000..26bb601 --- /dev/null +++ b/pulumi/core/config.py @@ -0,0 +1,176 @@ +# pulumi/core/config.py + +""" +Configuration Management Module + +This module handles the retrieval and preparation of configurations for different modules +within the Kargo Pulumi IaC program. It centralizes configuration logic to promote reuse +and maintainability. +""" + +import json +import os +import pulumi +import requests +from typing import Any, Dict, Tuple +from .types import ComplianceConfig + +# Default versions URL template +DEFAULT_VERSIONS_URL_TEMPLATE = 'https://raw.githubusercontent.com/ContainerCraft/Kargo/rerefactor/pulumi/' + +# Module enabled defaults: Setting a module to True enables the module by default +# TODO: relocate to pulumi/__main__.py for better visibility +DEFAULT_ENABLED_CONFIG = { + "cert_manager": True, + "kubevirt": True, + "multus": True, + "hostpath_provisioner": True, + "containerized_data_importer": True, + "prometheus": True, +} + +# Centralized Pulumi Config Retrieval +# Fetches the configuration for a module and determines if the module is enabled. +def get_module_config( + module_name: str, + config: pulumi.Config, + default_versions: Dict[str, Any], + ) -> Tuple[Dict[str, Any], bool]: + """ + Retrieves and prepares the configuration for a module. + + Args: + module_name (str): The name of the module to configure. + config (pulumi.Config): The Pulumi configuration object. + default_versions (Dict[str, Any]): A dictionary of default versions for modules. + + Returns: + Tuple[Dict[str, Any], bool]: A tuple containing the module's configuration dictionary and a boolean indicating if the module is enabled. + """ + module_config = config.get_object(module_name) or {} + + # Retrieve enabled status from configuration or defaults to defined default setting + module_enabled = str(module_config.pop('enabled', DEFAULT_ENABLED_CONFIG.get(module_name, False))).lower() == "true" + + module_config['version'] = module_config.get('version', default_versions.get(module_name)) + return module_config, module_enabled + + +# Retrieve Module Component Version Control Configuration from external local or remote json file. +# Supports centralized component version control configuration via local or remote json objects including: +# - `latest` for dynamic retrieval and utilization of the latest version. +# - `v0.00.0` hard coded version in Pulumi Config for overrid-ing version control. +# - `{lts,stable,edge,latest}` for subscribing to remote version control channels. +# TODO: +# - Refactor function to use more obvious prescedence ordering and configuration loading. +# - Refactor function for easier module maintainer adoption. +# - Adopt remote stable channel as first default version source after first github releases are published. +# - Adopt local stable channel as exception fallback for centralized version configuration. +# - Adopt `latest` as default version for modules without remote or local or pulumi config version configuration. +def load_default_versions(config: pulumi.Config, force_refresh=False) -> dict: + """ + Loads the default versions for modules based on the specified configuration settings. + + This function attempts to load version information from multiple sources in order of precedence: + 1. User-specified source via Pulumi config (`default_versions.source`). + 2. Stack-specific versions file (`./versions/$STACK_NAME.json`) if `versions.stack_name` is set to true. + 3. Local default versions file (`./default_versions.json`). + 4. Remote versions based on the specified channel (`versions.channel`). + + Args: + config: The Pulumi configuration object. + + Returns: + A dictionary containing the default versions for modules. + + Raises: + Exception: If default versions cannot be loaded from any source. + """ + cache_file = '/tmp/default_versions.json' + if not force_refresh and os.path.exists(cache_file): + try: + with open(cache_file) as f: + return json.load(f) + except Exception as e: + pulumi.log.warn(f"Error reading cache file: {e}") + + stack_name = pulumi.get_stack() + default_versions_source = config.get('default_versions.source') + versions_channel = config.get('versions.channel') or 'stable' + versions_stack_name = config.get_bool('versions.stack_name') or False + default_versions = {} + + # Function to try loading default versions from file + # TODO: + # - Adopt standardized local file storage location `./pulumi/versions/{filename}.json` + # - Adopt file naming convention `default.json` `lts.json` `stable.json` `edge.json` `latest.json` + def load_versions_from_file(file_path: str) -> dict: + try: + with open(file_path, 'r') as f: + versions = json.load(f) + pulumi.log.info(f"Loaded default versions from file: {file_path}") + return versions + except (FileNotFoundError, json.JSONDecodeError) as e: + pulumi.log.warn(f"Error loading versions from file {file_path}: {e}") + return {} + + def load_versions_from_url(url: str) -> dict: + try: + response = requests.get(url) + response.raise_for_status() + versions = response.json() + pulumi.log.info(f"Loaded default versions from URL: {url}") + return versions + except (requests.RequestException, json.JSONDecodeError) as e: + pulumi.log.warn(f"Error loading versions from URL {url}: {e}") + return {} + + if default_versions_source: + if default_versions_source.startswith(('http://', 'https://')): + default_versions = load_versions_from_url(default_versions_source) + else: + default_versions = load_versions_from_file(default_versions_source) + + if not default_versions: + raise Exception(f"Failed to load default versions from specified source: {default_versions_source}") + + else: + if versions_stack_name: + current_dir = os.path.dirname(os.path.abspath(__file__)) + stack_versions_path = os.path.join(current_dir, '..', 'versions', f'{stack_name}.json') + default_versions = load_versions_from_file(stack_versions_path) + + if not default_versions: + current_dir = os.path.dirname(os.path.abspath(__file__)) + default_versions_path = os.path.join(current_dir, '..', 'default_versions.json') + default_versions = load_versions_from_file(default_versions_path) + + if not default_versions: + versions_url = f'{DEFAULT_VERSIONS_URL_TEMPLATE}{versions_channel}_versions.json' + default_versions = load_versions_from_url(versions_url) + + if not default_versions: + raise Exception("Cannot proceed without default versions.") + + with open(cache_file, 'w') as f: + json.dump(default_versions, f) + + return default_versions + +# Function to export global deployment stack metadata +def export_results( + versions: Dict[str, str], + configurations: Dict[str, Dict[str, Any]], + compliance: Dict[str, Any] + ): + """ + Exports the results of the deployment processes including versions, configurations, and compliance information. + + Args: + versions (Dict[str, str]): A dictionary containing the versions of the deployed modules. + configurations (Dict[str, Dict[str, Any]]): A dictionary containing the configurations of the deployed modules. + compliance (Dict[str, Any]): A dictionary containing the compliance information. + """ + pulumi.export("versions", versions) + pulumi.export("configuration", configurations) + pulumi.export("compliance", compliance) diff --git a/pulumi/core/deployment.py b/pulumi/core/deployment.py new file mode 100644 index 0000000..2dc8812 --- /dev/null +++ b/pulumi/core/deployment.py @@ -0,0 +1,286 @@ +# pulumi/core/deployment.py + +""" +Deployment Management Module + +This module manages the deployment orchestration of modules, +initializes Pulumi and Kubernetes providers, and handles module deployments. +""" + +import os +import inspect +import importlib +from typing import Dict, Any, List, Type, Callable + +import pulumi +import pulumi_kubernetes as k8s +from pulumi import log +from pulumi_kubernetes import Provider + +from .config import get_module_config, load_default_versions +from .metadata import ( + collect_git_info, + generate_git_labels, + generate_git_annotations, + set_global_labels, + set_global_annotations, + generate_compliance_labels, + generate_compliance_annotations +) +from .utils import generate_global_transformations +from .types import ComplianceConfig + +# Function to perform all prerequisite configuration retrieval and variable initialization. +# TODO: Evaluate all code to further improve centralized configuration and variable init. +def initialize_pulumi() -> Dict[str, Any]: + """ + Initializes Pulumi configuration, Kubernetes provider, and global resources. + + Returns: + Dict[str, Any]: A dictionary containing initialized components. + """ + config = pulumi.Config() + stack_name = pulumi.get_stack() + project_name = pulumi.get_project() + + try: + # Load global default versions and initialize variables from configuration. + # TODO: + # - Refactor this to utilize a dataclass for type safety and better organization. + default_versions = load_default_versions(config) + versions: Dict[str, str] = {} + + # Initialize empty global configuration and dependency list variables. + configurations: Dict[str, Dict[str, Any]] = {} + global_depends_on: List[pulumi.Resource] = [] + + # Initialize the Kubernetes provider. + kubernetes_config = config.get_object("kubernetes") or {} + kubernetes_context = kubernetes_config.get("context") + + # TODO: refactor to export kubeconfig as a secret for use by k8s_provider. + kubeconfig = kubernetes_config.get("kubeconfig") or os.getenv('KUBECONFIG') + + # Initialize the Kubernetes provider. + # TODO: + # - Refactor to utilized kubeconfig from pulumi secret export object to reduce risk of loss or exposure. + k8s_provider = Provider( + "k8sProvider", + kubeconfig=kubeconfig, + context=kubernetes_context, + ) + + # Export k8s_provider as secret for use in other pulumi stacks. + k8s_provider_secret = pulumi.Output.secret(k8s_provider) + pulumi.export("k8s_provider", k8s_provider_secret) + + log.info(f"Kubeconfig: {kubeconfig}") + log.info(f"Kubernetes context: {kubernetes_context}") + + # Collect and store git information in the global configuration. + # Global Compliance Metadata for global transformations to propagate as resource tag / label / annotations + git_info = collect_git_info() + configurations["source_repository"] = { + "remote": git_info["remote"], + "branch": git_info["branch"], + "commit": git_info["commit"] + } + + # Retrieve compliance metadata from pulumi configuration and generate global tags, labels, and annotations. + # TODO: + # - Evaluate for provider-specific structure and transformation to support Kubernetes, AWS, Azure, GCP, OpenStack, etc. + compliance_config_dict = config.get_object('compliance') or {} + compliance_config = ComplianceConfig.merge(compliance_config_dict) + compliance_labels = generate_compliance_labels(compliance_config) + compliance_annotations = generate_compliance_annotations(compliance_config) + + git_labels = generate_git_labels(git_info) + git_annotations = generate_git_annotations(git_info) + global_labels = {**compliance_labels, **git_labels} + global_annotations = {**compliance_annotations, **git_annotations} + + set_global_labels(global_labels) + set_global_annotations(global_annotations) + generate_global_transformations(global_labels, global_annotations) + + # Return the initialized components. + # TODO: Refactor this as a dataclas, namedtuple, or similar. + return { + "config": config, + "stack_name": stack_name, + "project_name": project_name, + "default_versions": default_versions, + "versions": versions, + "configurations": configurations, + "global_depends_on": global_depends_on, + "k8s_provider": k8s_provider, + "git_info": git_info, + "compliance_config": compliance_config, + "global_labels": global_labels, + "global_annotations": global_annotations, + } + except Exception as e: + log.error(f"Initialization error: {str(e)}") + raise + +# Reusable function to deploy any IaC module with dynamic configuration and versioning. +def deploy_module( + module_name: str, + config: pulumi.Config, + default_versions: Dict[str, Any], + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + versions: Dict[str, str], + configurations: Dict[str, Dict[str, Any]] +) -> None: + """ + Helper function to deploy a module based on configuration. + + Args: + module_name (str): Name of the module. + config (pulumi.Config): Pulumi configuration object. + default_versions (Dict[str, Any]): Default versions for modules. + global_depends_on (List[pulumi.Resource]): Global dependencies. + k8s_provider (k8s.Provider): Kubernetes provider. + versions (Dict[str, str]): Dictionary to store versions of deployed modules. + configurations (Dict[str, Dict[str, Any]]): Dictionary to store configurations of deployed modules. + + Raises: + TypeError: If any arguments have incorrect types. + ValueError: If any module-specific errors occur. + """ + + # Validate input types. + # TODO: Evalute for better approach to type checking. + if not isinstance(module_name, str): + raise TypeError("module_name must be a string") + if not isinstance(config, pulumi.Config): + raise TypeError("config must be an instance of pulumi.Config") + if not isinstance(default_versions, dict): + raise TypeError("default_versions must be a dictionary") + if not isinstance(global_depends_on, list): + raise TypeError("global_depends_on must be a list") + if not isinstance(k8s_provider, k8s.Provider): + raise TypeError("k8s_provider must be an instance of pulumi_kubernetes.Provider") + if not isinstance(versions, dict): + raise TypeError("versions must be a dictionary") + if not isinstance(configurations, dict): + raise TypeError("configurations must be a dictionary") + + # Retrieve module configuration and enabled status. + module_config_dict, module_enabled = get_module_config(module_name, config, default_versions) + + # Deploy the module if enabled. + if module_enabled: + + # Discover module configuration class and deploy function. + ModuleConfigClass = discover_config_class(module_name) + deploy_func = discover_deploy_function(module_name) + + # Merge module's default and user supplied configuration values. + config_obj = ModuleConfigClass.merge(module_config_dict) + + # Validate the configuration object. + deploy_func_args = inspect.signature(deploy_func).parameters.keys() + config_arg_name = list(deploy_func_args)[0] + + # Deploy the module. + try: + + # Execute the module's deploy function. + result = deploy_func( + **{config_arg_name: config_obj}, + global_depends_on=global_depends_on, + k8s_provider=k8s_provider, + ) + + # Parse the result for version and release information. + # Accommodate for optional exported value. + # TODO: + # - Refactor this to be more robust and less restrictive. + # - Refactor to inherit value names from name string of returned key arguments. + if isinstance(result, tuple) and len(result) == 3: + version, release, module_aux_meta = result + elif isinstance(result, tuple) and len(result) == 2: + version, release = result + module_aux_meta = None + else: + raise ValueError(f"Unexpected return value structure from {module_name} deploy function") + + # Append the module's version and configuration to the global dictionaries. + versions[module_name] = version + configurations[module_name] = {"enabled": module_enabled} + + # adopt value export names from the returned key name argument string if possible, else solve for naming positional args. + if module_aux_meta: + pulumi.export(f"meta_{module_name}", module_aux_meta) + + # Append the module to the global dependencies. + # TODO: Reevaluate optimization for global_depends_on and best location for appending each module's primary resource.' + global_depends_on.append(release) + + except Exception as e: + log.error(f"Deployment failed for module {module_name}: {str(e)}") + raise + + # Report to log if module is not enabled. + else: + log.info(f"Module {module_name} is not enabled.") + +def discover_config_class(module_name: str) -> Type: + """ + Discovers and returns the configuration class from the module's types.py. + + Args: + module_name (str): The name of the module. + + Returns: + Type: The configuration class. + """ + types_module = importlib.import_module(f"modules.{module_name}.types") + for name, obj in inspect.getmembers(types_module): + if inspect.isclass(obj) and hasattr(obj, "__dataclass_fields__"): + return obj + raise ValueError(f"No dataclass found in modules.{module_name}.types") + +def discover_deploy_function(module_name: str) -> Callable: + """ + Discovers and returns the deploy function from the module's deploy.py. + + Args: + module_name (str): The name of the module. + + Returns: + Callable: The deploy function. + """ + deploy_module = importlib.import_module(f"modules.{module_name}.deploy") + function_name = f"deploy_{module_name}_module" + deploy_function = getattr(deploy_module, function_name, None) + if not deploy_function: + raise ValueError(f"No deploy function named '{function_name}' found in modules.{module_name}.deploy") + return deploy_function + +# TODO: +# - Refactor to use a more robust approach to type checking. +# - Reevaluate deploy_module signature and optimize on existing argument encapsulation and passing for simpler module developer experience. +def deploy_modules( + modules: List[str], + config: pulumi.Config, + default_versions: Dict[str, Any], + global_depends_on: List[pulumi.Resource], + k8s_provider: Provider, + versions: Dict[str, str], + configurations: Dict[str, Dict[str, Any]], + ) -> None: + + for module_name in modules: + log.info(f"Deploying module: {module_name}") + deploy_module( + module_name=module_name, + config=config, + default_versions=default_versions, + global_depends_on=global_depends_on, + k8s_provider=k8s_provider, + versions=versions, + configurations=configurations, + ) diff --git a/pulumi/core/metadata.py b/pulumi/core/metadata.py new file mode 100644 index 0000000..adc137a --- /dev/null +++ b/pulumi/core/metadata.py @@ -0,0 +1,188 @@ +# pulumi/core/metadata.py +# TODO: +# - enhance with support for propagation of labels annotations on AWS resources +# - enhance by adding additional data to global tag / label / annotation metadata +# - support adding git release semver to global tag / label / annotation metadata + +""" +Metadata Management Module + +This module manages global metadata, labels, and annotations. +It includes functions to generate compliance and Git-related metadata. +""" + +import re +import json +import threading +import subprocess +from typing import Dict, Any + +import pulumi +from pulumi import log + +from .types import ComplianceConfig + +# Singleton class to manage global metadata +# Globals are correctly chosen to enforce consistency across all modules and resources +# This class is thread-safe and used to store global labels and annotations +class MetadataSingleton: + _instance = None + __lock = threading.Lock() + + def __new__(cls, *args, **kwargs): + if not cls._instance: + with cls.__lock: + if not cls._instance: + cls._instance = super(MetadataSingleton, cls).__new__(cls) + cls._instance._data = {"_global_labels": {}, "_global_annotations": {}} + return cls._instance + +def set_global_labels(labels: Dict[str, str]): + """ + Sets global labels. + + Args: + labels (Dict[str, str]): The global labels. + """ + MetadataSingleton()._data["_global_labels"] = labels + +def set_global_annotations(annotations: Dict[str, str]): + """ + Sets global annotations. + + Args: + annotations (Dict[str, str]): The global annotations. + """ + MetadataSingleton()._data["_global_annotations"] = annotations + +def get_global_labels() -> Dict[str, str]: + """ + Retrieves global labels. + + Returns: + Dict[str, str]: The global labels. + """ + return MetadataSingleton()._data["_global_labels"] + +def get_global_annotations() -> Dict[str, str]: + """ + Retrieves global annotations. + + Returns: + Dict[str, str]: The global annotations. + """ + return MetadataSingleton()._data["_global_annotations"] + +# Function to collect Git repository information +# TODO: re-implement this function to use the GitPython library or other more pythonic approach +# TODO: add support for fetching and returning the latest git release semver +def collect_git_info() -> Dict[str, str]: + """ + Collects Git repository information. + + Returns: + Dict[str, str]: The Git information. + """ + try: + remote = subprocess.check_output(['git', 'config', '--get', 'remote.origin.url'], stderr=subprocess.STDOUT).strip().decode('utf-8') + branch = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], stderr=subprocess.STDOUT).strip().decode('utf-8') + commit = subprocess.check_output(['git', 'rev-parse', 'HEAD'], stderr=subprocess.STDOUT).strip().decode('utf-8') + return {'remote': remote, 'branch': branch, 'commit': commit} + except subprocess.CalledProcessError as e: + log.error(f"Error fetching git information: {e}") + return {'remote': 'N/A', 'branch': 'N/A', 'commit': 'N/A'} + +def generate_git_labels(git_info: Dict[str, str]) -> Dict[str, str]: + """ + Generates git-related labels. + + Args: + git_info (Dict[str, str]): The Git information. + + Returns: + Dict[str, str]: The git-related labels. + """ + return { + "git.branch": git_info.get("branch", ""), + "git.commit": git_info.get("commit", "")[:7], + } + +def generate_git_annotations(git_info: Dict[str, str]) -> Dict[str, str]: + """ + Generates git-related annotations. + + Args: + git_info (Dict[str, str]): The Git information. + + Returns: + Dict[str, str]: The git-related annotations. + """ + return { + "git.remote": git_info.get("remote", ""), + "git.commit.full": git_info.get("commit", ""), + "git.branch": git_info.get("branch", "") + } + +def generate_compliance_labels(compliance_config: ComplianceConfig) -> Dict[str, str]: + """ + Generates compliance labels based on the given compliance configuration. + + Args: + compliance_config (ComplianceConfig): The compliance configuration object. + + Returns: + Dict[str, str]: A dictionary of compliance labels. + """ + labels = {} + if compliance_config.fisma.enabled: + labels['compliance.fisma.enabled'] = 'true' + if compliance_config.nist.enabled: + labels['compliance.nist.enabled'] = 'true' + if compliance_config.scip.environment: + labels['compliance.scip.environment'] = sanitize_label_value(compliance_config.scip.environment) + return labels + +def generate_compliance_annotations(compliance_config: ComplianceConfig) -> Dict[str, str]: + """ + Generates compliance annotations based on the given compliance configuration. + + Args: + compliance_config (ComplianceConfig): The compliance configuration object. + + Returns: + Dict[str, str]: A dictionary of compliance annotations. + """ + + # TODO: enhance if logic to improve efficiency, DRY, readability and maintainability + annotations = {} + if compliance_config.fisma.level: + annotations['compliance.fisma.level'] = compliance_config.fisma.level + if compliance_config.fisma.ato: + annotations['compliance.fisma.ato'] = json.dumps(compliance_config.fisma.ato) + if compliance_config.nist.controls: + annotations['compliance.nist.controls'] = json.dumps(compliance_config.nist.controls) + if compliance_config.nist.auxiliary: + annotations['compliance.nist.auxiliary'] = json.dumps(compliance_config.nist.auxiliary) + if compliance_config.nist.exceptions: + annotations['compliance.nist.exceptions'] = json.dumps(compliance_config.nist.exceptions) + return annotations + +# Function to sanitize a label value to comply with Kubernetes `label` naming conventions +# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set +# TODO: +# - retool this feature as a more efficient implementation in `collect_git_info()` and related functions. +def sanitize_label_value(value: str) -> str: + """ + Sanitizes a label value to comply with Kubernetes naming conventions. + + Args: + value (str): The value to sanitize. + + Returns: + str: The sanitized value. + """ + value = value.lower() + sanitized = re.sub(r'[^a-z0-9_.-]', '-', value) + sanitized = re.sub(r'^[^a-z0-9]+', '', sanitized) + sanitized = re.sub(r'[^a-z0-9]+$', '', sanitized) + return sanitized[:63] diff --git a/pulumi/core/resource_helpers.py b/pulumi/core/resource_helpers.py new file mode 100644 index 0000000..2310392 --- /dev/null +++ b/pulumi/core/resource_helpers.py @@ -0,0 +1,354 @@ +# pulumi/core/resource_helpers.py + +import pulumi +import pulumi_kubernetes as k8s +from typing import Optional, Dict, Any, List, Callable +from .metadata import get_global_labels, get_global_annotations +from .utils import set_resource_metadata + +def create_namespace( + name: str, + labels: Optional[Dict[str, str]] = None, + annotations: Optional[Dict[str, str]] = None, + finalizers: Optional[List[str]] = None, + custom_timeouts: Optional[Dict[str, str]] = None, + opts: Optional[pulumi.ResourceOptions] = None, + k8s_provider: Optional[k8s.Provider] = None, + parent: Optional[pulumi.Resource] = None, + depends_on: Optional[List[pulumi.Resource]] = None, +) -> k8s.core.v1.Namespace: + """ + Creates a Kubernetes Namespace with global labels and annotations. + + Args: + name (str): The name of the namespace. + labels (Optional[Dict[str, str]]): Additional labels to apply. + annotations (Optional[Dict[str, str]]): Additional annotations to apply. + finalizers (Optional[List[str]]): Finalizers for the namespace. + custom_timeouts (Optional[Dict[str, str]]): Custom timeouts for resource operations. + opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. + k8s_provider (Optional[k8s.Provider]): Kubernetes provider. + depends_on (Optional[List[pulumi.Resource]]): Resources this resource depends on. + + Returns: + k8s.core.v1.Namespace: The created Namespace resource. + """ + + # If the optional arguments are not provided, set them to default values. + # TODO: + # - refactor/simplify for better readability and maintainability + if opts is None: + opts = pulumi.ResourceOptions() + if labels is None: + labels = {} + if annotations is None: + annotations = {} + if custom_timeouts is None: + custom_timeouts = {} + if depends_on is None: + depends_on = [] + if parent is None: + parent = [] + + global_labels = get_global_labels() + global_annotations = get_global_annotations() + labels.update(global_labels) + annotations.update(global_annotations) + + metadata = { + "name": name, + "labels": labels, + "annotations": annotations, + } + + spec = {} + if finalizers: + spec["finalizers"] = finalizers + + # Set Global Pulumi Resource Options + # TODO: + # - Enhance core/config.py with a centralized default pulumi `opts` configuration + # - Support merging with custom opts + # - Adopt across project resources to improve consistency and DRYness + opts = pulumi.ResourceOptions.merge( + opts, + pulumi.ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + custom_timeouts=pulumi.CustomTimeouts( + create=custom_timeouts.get("create", "5m"), + update=custom_timeouts.get("update", "10m"), + delete=custom_timeouts.get("delete", "10m"), + ), + ), + ) + + return k8s.core.v1.Namespace( + name, + metadata=metadata, + spec=spec, + opts=opts, + ) + +def create_custom_resource( + name: str, + args: Dict[str, Any], + opts: Optional[pulumi.ResourceOptions] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[pulumi.Resource]] = None, +) -> k8s.apiextensions.CustomResource: + """ + Creates a Kubernetes CustomResource with global labels and annotations. + + Args: + name (str): The name of the custom resource. + args (Dict[str, Any]): Arguments for creating the custom resource. + opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. + k8s_provider (Optional[k8s.Provider]): Kubernetes provider. + depends_on (Optional[List[pulumi.Resource]]): Resources this custom resource depends on. + + Returns: + k8s.apiextensions.CustomResource: The created CustomResource. + """ + try: + if 'kind' not in args or 'apiVersion' not in args: + raise ValueError("The 'args' dictionary must include 'kind' and 'apiVersion' keys.") + + if opts is None: + opts = pulumi.ResourceOptions() + if depends_on is None: + depends_on = [] + + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + def custom_resource_transform(resource_args: pulumi.ResourceTransformationArgs): + props = resource_args.props + if 'metadata' in props: + set_resource_metadata(props['metadata'], global_labels, global_annotations) + return pulumi.ResourceTransformationResult(props, resource_args.opts) + + opts = pulumi.ResourceOptions.merge( + opts, + pulumi.ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + transformations=[custom_resource_transform], + ), + ) + + # Ensure metadata and spec are included if specified + metadata = args.get('metadata', {}) + spec = args.get('spec', {}) + + return k8s.apiextensions.CustomResource( + resource_name=name, + api_version=args['apiVersion'], + kind=args['kind'], + metadata=metadata, + spec=spec, + opts=opts, + ) + + except Exception as e: + pulumi.log.error(f"Failed to create custom resource '{name}': {e}") + raise + +def create_helm_release( + name: str, + args: k8s.helm.v3.ReleaseArgs, + opts: Optional[pulumi.ResourceOptions] = None, + transformations: Optional[List[Callable[[pulumi.ResourceTransformationArgs], Optional[pulumi.ResourceTransformationResult]]]] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[pulumi.Resource]] = None, +) -> k8s.helm.v3.Release: + """ + Creates a Helm Release with global labels and annotations. + + Args: + name (str): The release name. + args (k8s.helm.v3.ReleaseArgs): Arguments for the Helm release. + opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. + transformations (Optional[List[Callable]]): Additional transformations. + k8s_provider (Optional[k8s.Provider]): Kubernetes provider. + depends_on (Optional[List[pulumi.Resource]]): Resources this release depends on. + + Returns: + k8s.helm.v3.Release: The created Helm release. + """ + if opts is None: + opts = pulumi.ResourceOptions() + if transformations is None: + transformations = [] + if depends_on is None: + depends_on = [] + + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + def helm_resource_transform(resource_args: pulumi.ResourceTransformationArgs): + props = resource_args.props + if 'metadata' in props: + set_resource_metadata(props['metadata'], global_labels, global_annotations) + elif 'spec' in props and isinstance(props['spec'], dict): + if 'metadata' in props['spec']: + set_resource_metadata(props['spec']['metadata'], global_labels, global_annotations) + return pulumi.ResourceTransformationResult(props, resource_args.opts) + + transformations.append(helm_resource_transform) + + opts = pulumi.ResourceOptions.merge( + opts, + pulumi.ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + transformations=transformations, + ), + ) + + return k8s.helm.v3.Release(name, args, opts=opts) + +def create_secret( + name: str, + args: Dict[str, Any], + opts: Optional[pulumi.ResourceOptions] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[pulumi.Resource]] = None, +) -> k8s.core.v1.Secret: + """ + Creates a Kubernetes Secret with global labels and annotations. + + Args: + name (str): The name of the secret. + args (Dict[str, Any]): Arguments for creating the secret. + opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. + k8s_provider (Optional[k8s.Provider]): Kubernetes provider. + depends_on (Optional[List[pulumi.Resource]]): Resources this secret depends on. + + Returns: + k8s.core.v1.Secret: The created Secret. + """ + if opts is None: + opts = pulumi.ResourceOptions() + if depends_on is None: + depends_on = [] + + # Merge global labels and annotations (if any) + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + def secret_resource_transform(resource_args: pulumi.ResourceTransformationArgs): + props = resource_args.props + if 'metadata' in props: + set_resource_metadata(props['metadata'], global_labels, global_annotations) + return pulumi.ResourceTransformationResult(props, resource_args.opts) + + # Merge resource options + opts = pulumi.ResourceOptions.merge( + opts, + pulumi.ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + transformations=[secret_resource_transform], + ), + ) + + # Constructor call + return k8s.core.v1.Secret(name, opts, **args) + +def create_config_file( + name: str, + file: str, + opts: Optional[pulumi.ResourceOptions] = None, + transformations: Optional[List[Callable[[pulumi.ResourceTransformationArgs], Optional[pulumi.ResourceTransformationResult]]]] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[pulumi.Resource]] = None, +) -> k8s.yaml.ConfigFile: + """ + Creates Kubernetes resources from a YAML config file with global labels and annotations. + + Args: + name (str): The resource name. + file (str): The path to the YAML file. + opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. + transformations (Optional[List[Callable]]): Additional transformations. + k8s_provider (Optional[k8s.Provider]): Kubernetes provider. + depends_on (Optional[List[pulumi.Resource]]): Resources these resources depend on. + + Returns: + k8s.yaml.ConfigFile: The created resources. + """ + if opts is None: + opts = pulumi.ResourceOptions() + if transformations is None: + transformations = [] + if depends_on is None: + depends_on = [] + + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + def config_file_transform(resource_args: pulumi.ResourceTransformationArgs): + props = resource_args.props + if 'metadata' in props: + set_resource_metadata(props['metadata'], global_labels, global_annotations) + elif 'spec' in props and isinstance(props['spec'], dict): + if 'metadata' in props['spec']: + set_resource_metadata(props['spec']['metadata'], global_labels, global_annotations) + return pulumi.ResourceTransformationResult(props, resource_args.opts) + + transformations.append(config_file_transform) + + opts = pulumi.ResourceOptions.merge( + opts, + pulumi.ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + transformations=transformations, + ), + ) + + return k8s.yaml.ConfigFile(name, file, opts=opts) + +# ------------------------------------------------------------------------------ +# Metadata +# ------------------------------------------------------------------------------ +# TODO: +# - Evaluate full codebase for wider utilization of create_meta_objectmeta() +def create_meta_objectmeta( + name: str, + labels: Optional[Dict[str, str]] = None, + annotations: Optional[Dict[str, str]] = None, + namespace: Optional[str] = None, + **kwargs, +) -> k8s.meta.v1.ObjectMetaArgs: + """ + Creates a Kubernetes ObjectMetaArgs with global labels and annotations. + + Args: + name (str): The name of the resource. + labels (Optional[Dict[str, str]]): Additional labels to apply. + annotations (Optional[Dict[str, str]]): Additional annotations to apply. + namespace (Optional[str]): The namespace of the resource. + + Returns: + k8s.meta.v1.ObjectMetaArgs: The metadata arguments. + """ + if labels is None: + labels = {} + if annotations is None: + annotations = {} + + global_labels = get_global_labels() + global_annotations = get_global_annotations() + labels.update(global_labels) + annotations.update(global_annotations) + + return k8s.meta.v1.ObjectMetaArgs( + name=name, + labels=labels, + annotations=annotations, + namespace=namespace, + **kwargs, + ) diff --git a/pulumi/core/types.py b/pulumi/core/types.py new file mode 100644 index 0000000..7b9b44c --- /dev/null +++ b/pulumi/core/types.py @@ -0,0 +1,77 @@ +# pulumi/core/types.py + +""" +Types and Data Structures Module + +This module defines all shared data classes and types used across modules +within the Kargo PaaS platform. +""" + +from typing import Optional, List, Dict, Any + +import pulumi +from dataclasses import dataclass, field + +@dataclass +class NamespaceConfig: + name: str + labels: Dict[str, str] = field(default_factory=lambda: {"ccio.v1/app": "kargo"}) + annotations: Dict[str, str] = field(default_factory=dict) + finalizers: List[str] = field(default_factory=lambda: ["kubernetes"]) + protect: bool = False + retain_on_delete: bool = False + ignore_changes: List[str] = field(default_factory=lambda: ["metadata", "spec"]) + custom_timeouts: Dict[str, str] = field(default_factory=lambda: { + "create": "5m", + "update": "10m", + "delete": "10m" + }) + +@dataclass +class FismaConfig: + enabled: bool = False + level: Optional[str] = None + ato: Dict[str, str] = field(default_factory=dict) + +@dataclass +class NistConfig: + enabled: bool = False + controls: List[str] = field(default_factory=list) + auxiliary: List[str] = field(default_factory=list) + exceptions: List[str] = field(default_factory=list) + +@dataclass +class ScipConfig: + environment: Optional[str] = None + ownership: Dict[str, Any] = field(default_factory=dict) + provider: Dict[str, Any] = field(default_factory=dict) + +@dataclass +class ComplianceConfig: + fisma: FismaConfig = field(default_factory=FismaConfig) + nist: NistConfig = field(default_factory=NistConfig) + scip: ScipConfig = field(default_factory=ScipConfig) + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'ComplianceConfig': + """ + Merges user-provided compliance configuration with default configuration. + + Args: + user_config (Dict[str, Any]): The user-provided compliance configuration. + + Returns: + ComplianceConfig: The merged compliance configuration object. + """ + default_config = ComplianceConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + nested_config = getattr(default_config, key) + for nested_key, nested_value in value.items(): + if hasattr(nested_config, nested_key): + setattr(nested_config, nested_key, nested_value) + else: + pulumi.log.warn(f"Unknown key '{nested_key}' in compliance.{key}") + else: + pulumi.log.warn(f"Unknown compliance configuration key: {key}") + return default_config diff --git a/pulumi/core/utils.py b/pulumi/core/utils.py new file mode 100644 index 0000000..28c9a9a --- /dev/null +++ b/pulumi/core/utils.py @@ -0,0 +1,231 @@ +# pulumi/core/utils.py + +""" +Utility Functions Module + +This module provides generic, reusable utility functions. +It includes resource transformations, Helm interactions, and miscellaneous helpers. +""" + +import re +import os +import tempfile +import pulumi +import pulumi_kubernetes as k8s +from typing import Optional, Dict, Any, List +import requests +import logging +import yaml +from packaging.version import parse as parse_version, InvalidVersion, Version + + +# Set up basic logging +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') + +# Function to update global resource tags, labels, and annotations from compliance config spec +def set_resource_metadata(metadata: Any, global_labels: Dict[str, str], global_annotations: Dict[str, str]): + """ + Updates resource metadata with global labels and annotations. + """ + if isinstance(metadata, dict): + metadata.setdefault('labels', {}).update(global_labels) + metadata.setdefault('annotations', {}).update(global_annotations) + elif isinstance(metadata, k8s.meta.v1.ObjectMetaArgs): + if metadata.labels is None: + metadata.labels = {} + metadata.labels.update(global_labels) + if metadata.annotations is None: + metadata.annotations = {} + metadata.annotations.update(global_annotations) + +# Function to apply global resource tags, labels, and annotations to all yaml objects +def generate_global_transformations(global_labels: Dict[str, str], global_annotations: Dict[str, str]): + """ + Generates global transformations for resources. + """ + def global_transform(args: pulumi.ResourceTransformationArgs) -> Optional[pulumi.ResourceTransformationResult]: + props = args.props + + if 'metadata' in props: + set_resource_metadata(props['metadata'], global_labels, global_annotations) + elif 'spec' in props and isinstance(props['spec'], dict): + if 'metadata' in props['spec']: + set_resource_metadata(props['spec']['metadata'], global_labels, global_annotations) + + return pulumi.ResourceTransformationResult(props, args.opts) + + pulumi.runtime.register_stack_transformation(global_transform) + +# Function to fetch the latest stable version of a Helm chart from a helm chart index.yaml url +def get_latest_helm_chart_version(repo_url: str, chart_name: str) -> str: + """ + Fetches the latest stable version of a Helm chart from the given repository URL. + + Args: + repo_url (str): The base URL of the Helm repository. + chart_name (str): The name of the Helm chart. + + Returns: + str: The latest stable version of the chart. + """ + try: + index_url = repo_url.rstrip('/') + '/index.yaml' + + logging.info(f"Fetching Helm repository index from URL: {index_url}") + response = requests.get(index_url) + response.raise_for_status() + + index = yaml.safe_load(response.content) + if chart_name in index['entries']: + chart_versions = index['entries'][chart_name] + stable_versions = [v for v in chart_versions if is_stable_version(v['version'])] + if not stable_versions: + logging.info(f"No stable versions found for chart '{chart_name}'.") + return "Chart not found" + latest_chart = max(stable_versions, key=lambda x: parse_version(x['version'])) + return latest_chart['version'].lstrip('v') + else: + logging.info(f"No chart named '{chart_name}' found in repository.") + return "Chart not found" + + except requests.RequestException as e: + logging.error(f"Error fetching Helm repository index: {e}") + return f"Error fetching data: {e}" + except yaml.YAMLError as e: + logging.error(f"Error parsing Helm repository index YAML: {e}") + return f"Error parsing YAML: {e}" + +# Sanity check Helm chart versions for stable releases +def is_stable_version(version_str: str) -> bool: + """ + Determines if a version string represents a stable version. + + Args: + version_str (str): The version string to check. + + Returns: + bool: True if the version is stable, False otherwise. + """ + try: + parsed_version = parse_version(version_str) + return isinstance(parsed_version, Version) and not parsed_version.is_prerelease and not parsed_version.is_devrelease + except InvalidVersion: + return False + +# Function to extract the repository name from a Git remote URL +def extract_repo_name(remote_url: str) -> str: + """ + Extracts the repository name from a Git remote URL. + + Args: + remote_url (str): The Git remote URL. + + Returns: + str: The repository name. + """ + match = re.search(r'[:/]([^/:]+/[^/\.]+)(\.git)?$', remote_url) + if match: + return match.group(1) + return remote_url + + +# Function to wait for a list of CRDs to be present +def wait_for_crds(crd_names: List[str], k8s_provider: k8s.Provider, depends_on: List[pulumi.Resource], parent: pulumi.Resource) -> List[pulumi.Resource]: + """ + Waits for the specified CRDs to be present and ensures dependencies. + + Args: + crd_names (List[str]): A list of CRD names. + k8s_provider (k8s.Provider): The Kubernetes provider. + depends_on (List[pulumi.Resource]): A list of dependencies. + parent (pulumi.Resource): The parent resource. + + Returns: + List[pulumi.Resource]: The CRD resources or an empty list during preview. + """ + + # Instantiate crds list to store retrieved CRD resources with enforced type safety for k8s.apiextensions.v1.CustomResourceDefinition + crds: List[pulumi.Resource] = [] + + for crd_name in crd_names: + try: + crd = k8s.apiextensions.v1.CustomResourceDefinition.get( + resource_name=f"crd-{crd_name}", + id=crd_name, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + ), + ) + crds.append(crd) + except Exception: + if pulumi.runtime.is_dry_run(): + pulumi.log.info(f"CRD {crd_name} not found, creating dummy CRD.") + dummy_crd = create_dummy_crd(crd_name, k8s_provider, depends_on, parent) + if dummy_crd: + crds.append(dummy_crd) + + return crds + +# HACK: Create a dummy CRD definition to use during pulumi dry_run / preview runs if CRDs are not found. +# TODO: Solve this in a more elegant way. +def create_dummy_crd(crd_name: str, k8s_provider: k8s.Provider, depends_on: List[pulumi.Resource], parent: pulumi.Resource) -> Optional[k8s.yaml.ConfigFile]: + """ + Create a dummy CRD definition to use during preview runs. + + Args: + crd_name (str): The name of the CRD. + k8s_provider (k8s.Provider): The Kubernetes provider. + depends_on (List[pulumi.Resource]): A list of dependencies. + parent (pulumi.Resource): The parent resource. + + Returns: + Optional[k8s.yaml.ConfigFile]: The dummy CRD resource. + """ + parts = crd_name.split('.') + plural = parts[0] + group = '.'.join(parts[1:]) + kind = ''.join(word.title() for word in plural.split('_')) + + dummy_crd_yaml_template = """ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: {metadata_name} +spec: + group: {group} + names: + plural: {plural} + kind: {kind} + scope: Namespaced + versions: + - name: v1 + served: true + storage: true +""" + + dummy_crd_yaml = dummy_crd_yaml_template.format( + metadata_name=f"{plural}.{group}", + group=group, + plural=plural, + kind=kind, + ) + + try: + with tempfile.NamedTemporaryFile(delete=False, mode='w') as temp_file: + temp_file.write(dummy_crd_yaml) + temp_file_path = temp_file.name + + dummy_crd = k8s.yaml.ConfigFile( + "dummy-crd-{}".format(crd_name), + file=temp_file_path, + opts=pulumi.ResourceOptions( + parent=parent, + depends_on=depends_on, + provider=k8s_provider, + ) + ) + return dummy_crd + finally: + os.unlink(temp_file_path) diff --git a/pulumi/default_versions.json b/pulumi/default_versions.json new file mode 100644 index 0000000..0a26831 --- /dev/null +++ b/pulumi/default_versions.json @@ -0,0 +1,8 @@ +{ + "cert_manager": "1.15.3", + "containerized_data_importer": "1.60.3", + "hostpath_provisioner": "0.20.0", + "kubevirt": "1.3.1", + "multus": "master", + "prometheus": "62.7.0" +} diff --git a/pulumi/modules/README.md b/pulumi/modules/README.md new file mode 100644 index 0000000..a0c91f8 --- /dev/null +++ b/pulumi/modules/README.md @@ -0,0 +1,249 @@ +# Kargo Modules Development Guide + +Welcome to the Kargo Kubevirt PaaS IaC module developer guide. This document provides an overview of the design principles, code structure, and best practices for developing and maintaining modules within the Kargo IaC codebase. It is intended for developers and AI language models (like ChatGPT) to quickly understand and contribute to the project. + +## Table of Contents + +- [Introduction](#introduction) +- [Design Principles](#design-principles) +- [Code Structure](#code-structure) +- [Version Management](#version-management) +- [Module Development Guide](#module-development-guide) + - [1. Module Configuration](#1-module-configuration) + - [2. Defining Configuration Types](#2-defining-configuration-types) + - [3. Module Deployment Logic](#3-module-deployment-logic) + - [4. Updating `__main__.py`](#4-updating-__main__py) +- [Best Practices](#best-practices) +- [Example Module: Cert Manager](#example-module-cert-manager) +- [Conclusion](#conclusion) + +--- + +## Introduction + +Kargo is a Kubernetes & Kubevirt based Platform Engineering IaC development & deployment framework that leverages Pulumi for infrastructure as code (IaC). This guide aims to standardize module development by centralizing version handling, simplifying module code, and promoting consistency across the codebase. + +--- + +## Design Principles + +- **Centralization of Common Logic**: Shared functionality, such as version handling, is centralized to reduce duplication and simplify maintenance. +- **Simplification of Module Code**: Modules focus solely on their specific deployment logic, relying on centralized utilities for configuration and version management. +- **Consistency**: Establish clear patterns and standards for module development to ensure uniformity across the codebase. +- **Maintainability**: Write clean, readable code with proper documentation and type annotations to facilitate ease of maintenance and contribution. +- **Flexibility**: Allow users to override configurations and versions as needed, while providing sensible defaults. + +--- + +## IaC Module Structure + +- **`__main__.py`**: The entry point of the Pulumi program. Handles global configurations, Kubernetes provider setup, version loading, and module deployments. +- **`src/lib/`**: Contains shared utilities and libraries, such as version management (`versions.py`) and shared types (`types.py`). +- **`src//`**: Each module resides in its own directory under `src/`, containing its specific types (`types.py`) and deployment logic (`deploy.py`). +- **`src//types.py`**: Defines data classes for module configurations with default values and merging logic. +- **`src//deploy.py`**: Contains the module-specific deployment logic, taking in the merged configuration and returning relevant outputs. +- **`src//README.md`**: Module-specific documentation with configuration options, features, and usage instructions. +- **`src//*.py`**: Additional utility files or scripts specific to the module. + +--- + +## Version Management + +### Centralized Version Handling + +Version management is centralized in `src/lib/versions.py`. The `load_default_versions` function loads versions based on the following precedence: + +1. **User-Specified Source**: Via Pulumi config `default_versions.source`. +2. **Stack-Specific Versions**: If `versions.stack_name` is `true`, loads from `./versions/$STACK_NAME.json`. +3. **Local Default Versions**: Loads from `./default_versions.json`. +4. **Remote Versions**: Fetches from a remote URL based on the specified `versions.channel`. + +### Injecting Versions into Modules + +In `__main__.py`, the `get_module_config` function handles module configuration loading and version injection. Modules receive configurations with versions already set, eliminating the need for individual modules to handle version logic. + +--- + +## Module Development Guide + +Follow these steps to develop or enhance a module in the Kargo codebase. + +### 1. Module Configuration + +- **Purpose**: Retrieve and prepare the module's configuration, including version information. +- **Implementation**: Use the `get_module_config` function in `__main__.py`. + +```python +# __main__.py + +config_module_dict, module_enabled = get_module_config('module_name', config, default_versions) +``` + +- **Parameters**: + - `module_name`: The name of the module as defined in Pulumi config. + - `config`: The global Pulumi config object. + - `default_versions`: The dictionary containing default versions. + +### 2. Defining Configuration Types + +- **Purpose**: Define a data class for the module's configuration with default values. +- **Implementation**: Create a `types.py` in the module's directory. + +```python +# src/module_name/types.py + +from dataclasses import dataclass +from typing import Optional, Dict, Any +import pulumi + +@dataclass +class ModuleNameConfig: + version: Optional[str] = None # Version will be injected + # ... other configuration fields ... + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'ModuleNameConfig': + default_config = ModuleNameConfig() + merged_config = default_config.__dict__.copy() + for key, value in user_config.items(): + if hasattr(default_config, key): + merged_config[key] = value + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in module_name config.") + return ModuleNameConfig(**merged_config) +``` + +### 3. Module Deployment Logic + +- **Purpose**: Implement the module's deployment logic using the merged configuration. +- **Implementation**: Create a `deploy.py` in the module's directory. + +```python +# src/module_name/deploy.py + +def deploy_module_name( + config_module_name: ModuleNameConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, +) -> Tuple[Optional[str], Optional[pulumi.Resource]]: + # Module-specific deployment logic + # Use config_module_name.version as needed + # Return version and any relevant resources +``` + +### 4. Updating `__main__.py` + +- **Purpose**: Integrate the module into the main Pulumi program. +- **Implementation**: + +```python +# __main__.py + +if module_enabled: + from src.module_name.types import ModuleNameConfig + config_module_name = ModuleNameConfig.merge(config_module_dict) + + from src.module_name.deploy import deploy_module_name + + module_version, module_resource = deploy_module_name( + config_module_name=config_module_name, + global_depends_on=global_depends_on, + k8s_provider=k8s_provider, + ) + + # Update versions and configurations dictionaries + versions["module_name"] = module_version + configurations["module_name"] = { + "enabled": module_enabled, + } +``` + +--- + +## Best Practices + +- **Centralize Common Logic**: Use shared utilities from `src/lib/` to avoid duplication. +- **Type Annotations**: Use type hints throughout the code for better readability and tooling support. +- **Documentation**: Include docstrings and comments to explain complex logic. +- **Consistent Coding Style**: Follow the project's coding conventions for formatting and naming. +- **Error Handling**: Implement robust error handling and logging for easier debugging. +- **Avoid Global Variables**: Pass necessary objects as arguments to functions and methods. + +--- + +## Example Module: Cert Manager + +### Configuration + +```python +# Configurable by either of: +# - Pulumi Stack Config `Pulumi.stack.yaml` +# - Pulumi CLI `--config` flag +# - example: pulumi config set --path cert_manager.enabled true +# - example: pulumi config set --path cert_manager.version "1.15.3" +# - example: pulumi config set --path cert_manager.version "latest" +config: + cert_manager: + enabled: true + version: "1.15.3" +``` + +### Types Definition + +```python +# src/cert_manager/types.py + +from dataclasses import dataclass +from typing import Optional, Dict, Any +import pulumi + +@dataclass +class CertManagerConfig: + version: Optional[str] = None + # ... other fields ... + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'CertManagerConfig': + # Merging logic as shown above +``` + +### Deployment Logic + +```python +# src/cert_manager/deploy.py + +def deploy_cert_manager_module( + config_cert_manager: CertManagerConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, +) -> Tuple[Optional[str], Optional[pulumi.Resource], Optional[str]]: + # Deployment logic for Cert Manager + # Return version, release resource, and any additional outputs +``` + +### Integration in `__main__.py` + +```python +# __main__.py + +config_cert_manager_dict, cert_manager_enabled = get_module_config('cert_manager', config, default_versions) + +if cert_manager_enabled: + from src.cert_manager.types import CertManagerConfig + config_cert_manager = CertManagerConfig.merge(config_cert_manager_dict) + + from src.cert_manager.deploy import deploy_cert_manager_module + + cert_manager_version, cert_manager_release, cert_manager_selfsigned_cert = deploy_cert_manager_module( + config_cert_manager=config_cert_manager, + global_depends_on=global_depends_on, + k8s_provider=k8s_provider, + ) + + versions["cert_manager"] = cert_manager_version + configurations["cert_manager"] = { + "enabled": cert_manager_enabled, + } + + pulumi.export("cert_manager_selfsigned_cert", cert_manager_selfsigned_cert) +``` diff --git a/pulumi/src/cert_manager/__init__.py b/pulumi/modules/aws/__init__.py similarity index 100% rename from pulumi/src/cert_manager/__init__.py rename to pulumi/modules/aws/__init__.py diff --git a/pulumi/modules/aws/deploy.py b/pulumi/modules/aws/deploy.py new file mode 100644 index 0000000..03a987f --- /dev/null +++ b/pulumi/modules/aws/deploy.py @@ -0,0 +1,131 @@ +# src/aws/deploy.py +# Description: generic boilerplate code not currently active in the project. +# This code is provided as a reference for future implementation. +# Key features include utilization of generate_compliance_tags, generate_compliance_labels, and generate_compliance_annotations functions from src/compliance/utils.py. + +import pulumi +from pulumi import ResourceOptions +import pulumi_aws as aws +import pulumi_eks as eks +import pulumi_kubernetes as k8s + +# Global Pulumi settings +stack_tags = { + "project": pulumi.get_project(), + "stack": pulumi.get_stack(), + "owner": "pulumi-user", +} + +stack_labels = { + "environment": "testing", +} + +pulumi.runtime.set_all_project_tags(stack_tags) +pulumi.runtime.set_all_project_labels(stack_labels) + +# AWS S3 Bucket with global tags +s3_bucket = aws.s3.Bucket("nginxStorageBucket", + tags={ + **stack_tags, + "Name": "nginxStorageBucket", + "Environment": "Dev", + } +) + +# AWS EKS Cluster with global tags +eks_cluster = eks.Cluster("exampleCluster", + tags={ + **stack_tags, + "Name": "exampleCluster", + "Environment": "Dev", + } +) + +# Kubernetes Persistent Volume +persistent_volume = k8s.core.v1.PersistentVolume("nginxPv", + metadata=k8s.meta.v1.ObjectMetaArgs( + name="nginx-pv", + labels={ + **stack_labels, + "type": "local", + } + ), + spec=k8s.core.v1.PersistentVolumeSpecArgs( + capacity={"storage": "1Gi"}, + access_modes=["ReadWriteOnce"], + aws_elastic_block_store=k8s.core.v1.AWSElasticBlockStoreVolumeSourceArgs( + volume_id=s3_bucket.id.apply(lambda id: f"aws://{aws.region}/{id}"), + fs_type="ext4", + ), + ), + opts=ResourceOptions(parent=eks_cluster) +) + +# Kubernetes Persistent Volume Claim +persistent_volume_claim = k8s.core.v1.PersistentVolumeClaim("nginxPvc", + metadata=k8s.meta.v1.ObjectMetaArgs( + name="nginx-pvc", + labels=stack_labels, + ), + spec=k8s.core.v1.PersistentVolumeClaimSpecArgs( + access_modes=["ReadWriteOnce"], + resources=k8s.core.v1.ResourceRequirementsArgs( + requests={"storage": "1Gi"}, + ), + ), + opts=ResourceOptions(parent=eks_cluster) +) + +# Kubernetes Nginx Deployment with Persistent Storage +nginx_deployment = k8s.apps.v1.Deployment("nginxDeployment", + metadata=k8s.meta.v1.ObjectMetaArgs( + name="nginx-deployment", + labels={ + **stack_labels, + "app": "nginx", + } + ), + spec=k8s.apps.v1.DeploymentSpecArgs( + replicas=1, + selector=k8s.meta.v1.LabelSelectorArgs( + match_labels={ + "app": "nginx", + } + ), + template=k8s.core.v1.PodTemplateSpecArgs( + metadata=k8s.meta.v1.ObjectMetaArgs( + labels={ + **stack_labels, + "app": "nginx", + } + ), + spec=k8s.core.v1.PodSpecArgs( + containers=[ + k8s.core.v1.ContainerArgs( + name="nginx", + image="nginx:1.14.2", + ports=[k8s.core.v1.ContainerPortArgs(container_port=80)], + volume_mounts=[ + k8s.core.v1.VolumeMountArgs( + name="nginx-storage", + mount_path="/usr/share/nginx/html", + ) + ], + ) + ], + volumes=[ + k8s.core.v1.VolumeArgs( + name="nginx-storage", + persistent_volume_claim=k8s.core.v1.PersistentVolumeClaimVolumeSourceArgs( + claim_name=persistent_volume_claim.metadata.name, + ) + ) + ] + ) + ), + ), + opts=ResourceOptions(parent=eks_cluster) +) + +pulumi.export("s3BucketName", s3_bucket.bucket) +pulumi.export("eksClusterName", eks_cluster.core.apply(lambda core: core.endpoint)) diff --git a/pulumi/src/cilium/__init__.py b/pulumi/modules/azure/__init__.py similarity index 100% rename from pulumi/src/cilium/__init__.py rename to pulumi/modules/azure/__init__.py diff --git a/pulumi/src/cluster_network_addons/__init__.py b/pulumi/modules/ceph/__init__.py similarity index 100% rename from pulumi/src/cluster_network_addons/__init__.py rename to pulumi/modules/ceph/__init__.py diff --git a/pulumi/src/ceph/deploy.py b/pulumi/modules/ceph/deploy.py similarity index 100% rename from pulumi/src/ceph/deploy.py rename to pulumi/modules/ceph/deploy.py diff --git a/pulumi/modules/cert_manager/README.md b/pulumi/modules/cert_manager/README.md new file mode 100644 index 0000000..f3caa66 --- /dev/null +++ b/pulumi/modules/cert_manager/README.md @@ -0,0 +1,188 @@ +# Cert Manager Module Guide + +Welcome to the **Cert Manager Module** for the Kargo KubeVirt Kubernetes PaaS! This guide is tailored for both newcomers to DevOps and experienced developers, providing a comprehensive overview of how to deploy and configure the Cert Manager module within the Kargo platform. + +--- + +## Table of Contents + +- [Introduction](#introduction) +- [Why Use Cert Manager?](#why-use-cert-manager) +- [Getting Started](#getting-started) +- [Enabling the Module](#enabling-the-module) +- [Configuration Options](#configuration-options) + - [Default Settings](#default-settings) + - [Customizing Your Deployment](#customizing-your-deployment) +- [Module Components Explained](#module-components-explained) + - [Namespace Creation](#namespace-creation) + - [Helm Chart Deployment](#helm-chart-deployment) + - [Self-Signed Cluster Issuer Setup](#self-signed-cluster-issuer-setup) +- [Using the Module](#using-the-module) + - [Example Usage](#example-usage) +- [Troubleshooting and FAQs](#troubleshooting-and-faqs) +- [Additional Resources](#additional-resources) +- [Conclusion](#conclusion) + +--- + +## Introduction + +The Cert Manager module automates the management of SSL/TLS certificates in your Kubernetes cluster using [cert-manager](https://cert-manager.io/). It simplifies the process of obtaining, renewing, and managing certificates, enhancing the security of your applications without manual intervention. + +--- + +## Why Use Cert Manager? + +- **Automation**: Automatically provisions and renews certificates. +- **Integration**: Works seamlessly with Kubernetes Ingress resources and other services. +- **Security**: Enhances security by ensuring certificates are always up-to-date. +- **Compliance**: Helps meet compliance requirements by managing PKI effectively. + +--- + +## Getting Started + +### Prerequisites + +- **Kubernetes Cluster**: Ensure you have access to a Kubernetes cluster. +- **Pulumi CLI**: Install the Pulumi CLI and configure it. +- **Kubeconfig**: Your kubeconfig file should be properly set up. + +### Setup Steps + +1. **Navigate to the Kargo Pulumi Directory**: + ```bash + cd Kargo/pulumi + ``` +2. **Install Dependencies**: + ```bash + pip install -r requirements.txt + ``` +3. **Initialize Pulumi Stack**: + ```bash + pulumi stack init dev + ``` + +--- + +## Enabling the Module + +The Cert Manager module is enabled by default. To verify or modify its enabled status, adjust your Pulumi configuration. + +### Verifying Module Enablement + +```yaml +# Pulumi..yaml + +config: + cert_manager: + enabled: true # Set to false to disable +``` + +Alternatively, use the Pulumi CLI: + +```bash +pulumi config set --path cert_manager.enabled true +``` + +--- + +## Configuration Options + +### Default Settings + +The module is designed to work out-of-the-box with default settings: + +- **Namespace**: `cert-manager` +- **Version**: Defined in `default_versions.json` +- **Cluster Issuer Name**: `cluster-selfsigned-issuer` +- **Install CRDs**: `true` + +### Customizing Your Deployment + +You can tailor the module to fit your specific needs by customizing its configuration. + +#### Available Configuration Parameters + +- **enabled** *(bool)*: Enable or disable the module. +- **namespace** *(string)*: Kubernetes namespace for cert-manager. +- **version** *(string)*: Helm chart version to deploy. Use `'latest'` to fetch the most recent stable version. +- **cluster_issuer** *(string)*: Name of the ClusterIssuer resource. +- **install_crds** *(bool)*: Whether to install Custom Resource Definitions. + +#### Example Custom Configuration + +```yaml +config: + cert_manager: + enabled: true + namespace: "my-cert-manager" + version: "1.15.3" + cluster_issuer: "my-cluster-issuer" + install_crds: true +``` + +--- + +## Module Components Explained + +### Namespace Creation + +A dedicated namespace is created to isolate cert-manager resources. + +- **Why?**: Ensures better organization and avoids conflicts. +- **Customizable**: Change the namespace using the `namespace` parameter. + +### Helm Chart Deployment + +Deploys cert-manager using Helm. + +- **Chart Repository**: `https://charts.jetstack.io` +- **Version Management**: Specify a version or use `'latest'`. +- **Custom Values**: Resource requests and limits are set for optimal performance. + +### Self-Signed Cluster Issuer Setup + +Sets up a self-signed ClusterIssuer for certificate provisioning. + +- **Root ClusterIssuer**: Creates a root issuer. +- **CA Certificate**: Generates a CA certificate stored in a Kubernetes Secret. +- **Primary ClusterIssuer**: Issues certificates for your applications using the CA certificate. +- **Exported Values**: CA certificate data is exported for use in other modules. + +--- + +## Using the Module + +### Example Usage + +After enabling and configuring the module, deploy it using Pulumi: + +```bash +pulumi up +``` + +--- + +## Troubleshooting and FAQs + +**Q1: Cert-manager pods are not running.** + +- **A**: Check the namespace and ensure that CRDs are installed. Verify the Kubernetes version compatibility. + +**Q2: Certificates are not being issued.** + +- **A**: Ensure that the ClusterIssuer is correctly configured and that your Ingress resources reference it. + +**Q3: How do I update cert-manager to a newer version?** + +- **A**: Update the `version` parameter in your configuration and run `pulumi up`. + +--- + +## Additional Resources + +- **cert-manager Documentation**: [cert-manager.io/docs](https://cert-manager.io/docs/) +- **Kargo Project**: [Kargo GitHub Repository](https://github.com/ContainerCraft/Kargo) +- **Pulumi Kubernetes Provider**: [Pulumi Kubernetes Docs](https://www.pulumi.com/docs/reference/pkg/kubernetes/) +- **Helm Charts Repository**: [Artifact Hub - cert-manager](https://artifacthub.io/packages/helm/cert-manager/cert-manager) diff --git a/pulumi/src/containerized_data_importer/__init__.py b/pulumi/modules/cert_manager/__init__.py similarity index 100% rename from pulumi/src/containerized_data_importer/__init__.py rename to pulumi/modules/cert_manager/__init__.py diff --git a/pulumi/modules/cert_manager/deploy.py b/pulumi/modules/cert_manager/deploy.py new file mode 100644 index 0000000..ae59efd --- /dev/null +++ b/pulumi/modules/cert_manager/deploy.py @@ -0,0 +1,275 @@ +# pulumi/modules/cert_manager/deploy.py + +""" +Deploys the cert-manager module with proper dependency management. +""" + +from typing import List, Dict, Any, Tuple, Optional, cast + +import pulumi +import pulumi_kubernetes as k8s +from pulumi import log + +from core.types import NamespaceConfig +from core.utils import get_latest_helm_chart_version, wait_for_crds +from core.resource_helpers import ( + create_namespace, + create_helm_release, + create_custom_resource, + create_secret, +) + +from .types import CertManagerConfig + + +def deploy_cert_manager_module( + config_cert_manager: CertManagerConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[str, k8s.helm.v3.Release, str]: + """ + Deploys the cert-manager module and returns the version, release resource, and CA certificate. + """ + # TODO: Create module specific dependencies object to avoid blocking global resources on k8s_provider or other module specific dependencies + + # Deploy cert-manager + cert_manager_version, release, ca_cert_b64 = deploy_cert_manager( + config_cert_manager=config_cert_manager, + depends_on=global_depends_on, # Correctly pass the global dependencies + k8s_provider=k8s_provider, + ) + + # Update global dependencies + global_depends_on.append(release) + + return cert_manager_version, release, ca_cert_b64 + + +def deploy_cert_manager( + config_cert_manager: CertManagerConfig, + depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[str, k8s.helm.v3.Release, str]: + """ + Deploys cert-manager using Helm and sets up cluster issuers, + ensuring that CRDs are available before creating custom resources. + """ + namespace = config_cert_manager.namespace + version = config_cert_manager.version + cluster_issuer_name = config_cert_manager.cluster_issuer + install_crds = config_cert_manager.install_crds + + # Create Namespace using the helper function + namespace_resource = create_namespace( + name=namespace, + k8s_provider=k8s_provider, + parent=k8s_provider, + depends_on=depends_on, + ) + # TODO: consider adding k8s_provider to module_depends_on dependencies + + # Get Helm Chart Version + # TODO: set the chart name and repo URL as variables in the CertManagerConfig class to allow for user configuration + chart_name = "cert-manager" + chart_repo_url = "https://charts.jetstack.io" + + # TODO: re-implement into the get_module_config function and adopt across all modules to reduce code duplication + if version == 'latest' or version is None: + version = get_latest_helm_chart_version(chart_repo_url, chart_name) + log.info(f"Setting cert-manager chart version to latest: {version}") + else: + log.info(f"Using cert-manager chart version: {version}") + + # Generate Helm values + helm_values = generate_helm_values(config_cert_manager) + + # Create Helm Release using the helper function + release = create_helm_release( + name=chart_name, + args=k8s.helm.v3.ReleaseArgs( + chart=chart_name, + version=version, + namespace=namespace, + skip_await=False, + repository_opts=k8s.helm.v3.RepositoryOptsArgs(repo=chart_repo_url), + values=helm_values, + ), + opts=pulumi.ResourceOptions( + parent=namespace_resource, + custom_timeouts=pulumi.CustomTimeouts(create="8m", update="4m", delete="4m"), + ), + k8s_provider=k8s_provider, + depends_on=[namespace_resource] + depends_on, + ) + + # Wait for the CRDs to be registered + # TODO: re-evaluate effectiveness of approach to wait for CRDs and complete the wait_for_crds implementation until it's effective. + # The current implementation fails to wait for the CRDs to be registered before continuing with child and dependent resources. + crds = wait_for_crds( + crd_names=[ + "certificaterequests.cert-manager.io", + "certificates.cert-manager.io", + "challenges.acme.cert-manager.io", + "clusterissuers.cert-manager.io", + "issuers.cert-manager.io", + "orders.acme.cert-manager.io", + ], + k8s_provider=k8s_provider, + depends_on=[release], + parent=release + ) + + # Create Cluster Issuers using the helper function + # TODO: + # - make self-signed-issuer configurable enabled/disabled from boolean set in cert_manager/types.py CertManagerConfig class, default to enabled. + cluster_issuer_root, cluster_issuer_ca_certificate, cluster_issuer, ca_secret = create_cluster_issuers( + cluster_issuer_name, namespace, release, crds, k8s_provider + ) + + # Extract the CA certificate from the secret + # TODO: + # - re-evaluate relevance. IIRC this is used to return unwrapped secret values as b64 encoded strings for OpenUnison configuration. + # - consider maintaining the secret object as a return value for future use in other modules without exporting the secret values. + # - if user need requires the CA for client secret trust then consider documenting the use case and user instructions for utilization. + if ca_secret: + ca_data_tls_crt_b64 = ca_secret.data.apply(lambda data: data["tls.crt"]) + else: + ca_data_tls_crt_b64 = "" + + return version, release, ca_data_tls_crt_b64 + +def create_cluster_issuers( + cluster_issuer_name: str, + namespace: str, + release: k8s.helm.v3.Release, + crds: List[pulumi.Resource], + k8s_provider: k8s.Provider, +) -> Tuple[ + Optional[k8s.apiextensions.CustomResource], + Optional[k8s.apiextensions.CustomResource], + Optional[k8s.apiextensions.CustomResource], + Optional[k8s.core.v1.Secret], +]: + """ + Creates cluster issuers required for cert-manager, ensuring dependencies on CRDs. + + Args: + cluster_issuer_name (str): The name of the cluster issuer. + namespace (str): The Kubernetes namespace. + release (k8s.helm.v3.Release): The Helm release resource. + crds (List[pulumi.Resource]): List of CRDs. + k8s_provider (k8s.Provider): Kubernetes provider. + + Returns: + Tuple containing: + - ClusterIssuer for the self-signed root. + - ClusterIssuer's CA certificate. + - Primary ClusterIssuer. + - The secret resource containing the CA certificate. + """ + try: + # SelfSigned Root Issuer + cluster_issuer_root = create_custom_resource( + name="cluster-selfsigned-issuer-root", + args={ + "apiVersion": "cert-manager.io/v1", + "kind": "ClusterIssuer", + "metadata": { + "name": "cluster-selfsigned-issuer-root", + }, + "spec": {"selfSigned": {}}, + }, + opts=pulumi.ResourceOptions( + parent=release, + provider=k8s_provider, + depends_on=crds, + custom_timeouts=pulumi.CustomTimeouts(create="5m", update="5m", delete="5m"), + ), + ) + + # CA Certificate Issuer + cluster_issuer_ca_certificate = create_custom_resource( + name="cluster-selfsigned-issuer-ca", + args={ + "apiVersion": "cert-manager.io/v1", + "kind": "Certificate", + "metadata": { + "name": "cluster-selfsigned-issuer-ca", + "namespace": namespace, + }, + "spec": { + "commonName": "cluster-selfsigned-issuer-ca", + "duration": "2160h0m0s", + "isCA": True, + "issuerRef": { + "group": "cert-manager.io", + "kind": "ClusterIssuer", + "name": "cluster-selfsigned-issuer-root", + }, + "privateKey": {"algorithm": "RSA", "size": 2048}, + "renewBefore": "360h0m0s", + "secretName": "cluster-selfsigned-issuer-ca", + }, + }, + opts=pulumi.ResourceOptions( + parent=cluster_issuer_root, + provider=k8s_provider, + depends_on=[cluster_issuer_root], + custom_timeouts=pulumi.CustomTimeouts(create="5m", update="5m", delete="10m"), + ), + ) + + # Main Cluster Issuer + cluster_issuer = create_custom_resource( + name=cluster_issuer_name, + args={ + "apiVersion": "cert-manager.io/v1", + "kind": "ClusterIssuer", + "metadata": { + "name": cluster_issuer_name, + }, + "spec": { + "ca": {"secretName": "cluster-selfsigned-issuer-ca"}, + }, + }, + opts=pulumi.ResourceOptions( + parent=cluster_issuer_ca_certificate, + provider=k8s_provider, + depends_on=[cluster_issuer_ca_certificate], + custom_timeouts=pulumi.CustomTimeouts(create="5m", update="5m", delete="5m"), + ), + ) + + # Fetch CA Secret if not in dry-run + if not pulumi.runtime.is_dry_run(): + ca_secret = k8s.core.v1.Secret.get( + resource_name="cluster-selfsigned-issuer-ca", + id=f"{namespace}/cluster-selfsigned-issuer-ca", + opts=pulumi.ResourceOptions( + parent=cluster_issuer_ca_certificate, + provider=k8s_provider, + depends_on=[cluster_issuer_ca_certificate], + ) + ) + else: + ca_secret = None + + return cluster_issuer_root, cluster_issuer_ca_certificate, cluster_issuer, ca_secret + + except Exception as e: + log.error(f"Error during the creation of cluster issuers: {str(e)}") + return None, None, None, None + + +def generate_helm_values(config_cert_manager: CertManagerConfig) -> Dict[str, Any]: + """ + Generates Helm values for the CertManager deployment. + """ + return { + 'replicaCount': 1, + 'installCRDs': config_cert_manager.install_crds, + 'resources': { + 'limits': {'cpu': '500m', 'memory': '1024Mi'}, + 'requests': {'cpu': '250m', 'memory': '512Mi'}, + }, + } diff --git a/pulumi/modules/cert_manager/types.py b/pulumi/modules/cert_manager/types.py new file mode 100644 index 0000000..c6ebd38 --- /dev/null +++ b/pulumi/modules/cert_manager/types.py @@ -0,0 +1,31 @@ +# ./pulumi/modules/cert_manager/types.py +""" +Merges user-provided configuration with default configuration. + +Args: + user_config (Dict[str, Any]): The user-provided configuration. + +Returns: + CertManagerConfig: The merged configuration object. +""" + +from dataclasses import dataclass +from typing import Optional, Dict, Any +import pulumi + +@dataclass +class CertManagerConfig: + version: Optional[str] = "latest" + namespace: str = "cert-manager" + cluster_issuer: str = "cluster-selfsigned-issuer" + install_crds: bool = True + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'CertManagerConfig': + default_config = CertManagerConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + setattr(default_config, key, value) + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in cert_manager config.") + return default_config diff --git a/pulumi/src/hostpath_provisioner/__init__.py b/pulumi/modules/cilium/__init__.py similarity index 100% rename from pulumi/src/hostpath_provisioner/__init__.py rename to pulumi/modules/cilium/__init__.py diff --git a/pulumi/src/cilium/deploy.py b/pulumi/modules/cilium/deploy.py similarity index 100% rename from pulumi/src/cilium/deploy.py rename to pulumi/modules/cilium/deploy.py diff --git a/pulumi/src/kubernetes_dashboard/__init__.py b/pulumi/modules/cluster_network_addons/__init__.py similarity index 100% rename from pulumi/src/kubernetes_dashboard/__init__.py rename to pulumi/modules/cluster_network_addons/__init__.py diff --git a/pulumi/src/cluster_network_addons/deploy.py b/pulumi/modules/cluster_network_addons/deploy.py similarity index 100% rename from pulumi/src/cluster_network_addons/deploy.py rename to pulumi/modules/cluster_network_addons/deploy.py diff --git a/pulumi/src/kubevirt/__init__.py b/pulumi/modules/containerized_data_importer/__init__.py similarity index 100% rename from pulumi/src/kubevirt/__init__.py rename to pulumi/modules/containerized_data_importer/__init__.py diff --git a/pulumi/modules/containerized_data_importer/deploy.py b/pulumi/modules/containerized_data_importer/deploy.py new file mode 100644 index 0000000..e861f42 --- /dev/null +++ b/pulumi/modules/containerized_data_importer/deploy.py @@ -0,0 +1,138 @@ +# pulumi/modules/containerized_data_importer/deploy.py + +""" +Enhanced deployment script for the Containerized Data Importer (CDI) module. +""" + +import requests +from typing import List, Dict, Any, Tuple, Optional + +import pulumi +import pulumi_kubernetes as k8s +from pulumi import log + +from core.resource_helpers import create_namespace, create_custom_resource + +from .types import CdiConfig + +def deploy_containerized_data_importer_module( + config_cdi: CdiConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[Optional[str], Optional[pulumi.Resource]]: + """ + Deploys the Containerized Data Importer (CDI) module and returns the version and the deployed resource. + + Args: + config_cdi (CdiConfig): Configuration for the CDI module. + global_depends_on (List[pulumi.Resource]): Global dependencies for all modules. + k8s_provider (k8s.Provider): The Kubernetes provider. + + Returns: + Tuple[Optional[str], Optional[pulumi.Resource]]: The version deployed and the deployed resource. + """ + try: + log.info("Starting deployment of CDI module") + + version = config_cdi.version if config_cdi.version and config_cdi.version != "latest" else fetch_latest_version() + log.info(f"Using CDI version: {version}") + + # Create namespace + log.info(f"Creating namespace: {config_cdi.namespace}") + namespace_resource = create_namespace( + name=config_cdi.namespace, + labels=config_cdi.labels, + annotations=config_cdi.annotations, + k8s_provider=k8s_provider, + parent=k8s_provider, + depends_on=global_depends_on, + ) + + # Deploy CDI operator + # TODO: consider moving url variable to config via new value in ContainerizedDataImporterConfig class with default value, may require helper function in /types.py to support `latest` + operator_url = f"https://github.com/kubevirt/containerized-data-importer/releases/download/v{version}/cdi-operator.yaml" + log.info(f"Deploying CDI operator from URL: {operator_url}") + + operator_resource = k8s.yaml.ConfigFile( + "cdi-operator", + file=operator_url, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + parent=namespace_resource, + ) + ) + + # Ensure dependencies on operator and namespace + # TODO: re-evaluate implementing module specific module_depends_on and consistently adopting across all modules + depends_on = global_depends_on + [operator_resource] + + # Create CDI custom resource + log.info("Creating CDI custom resource") + cdi_resource = create_custom_resource( + name="cdi", + args={ + "apiVersion": "cdi.kubevirt.io/v1beta1", + "kind": "CDI", + "metadata": { + "name": "cdi", + "namespace": config_cdi.namespace, + }, + "spec": { + "config": { + "featureGates": [ + "HonorWaitForFirstConsumer", + ], + }, + "imagePullPolicy": "IfNotPresent", + "infra": { + "nodeSelector": { + "kubernetes.io/os": "linux", + }, + "tolerations": [ + { + "key": "CriticalAddonsOnly", + "operator": "Exists", + }, + ], + }, + "workload": { + "nodeSelector": { + "kubernetes.io/os": "linux", + }, + }, + }, + }, + opts=pulumi.ResourceOptions( + parent=operator_resource, + depends_on=namespace_resource, + provider=k8s_provider, + custom_timeouts=pulumi.CustomTimeouts(create="1m", update="1m", delete="1m"), + ) + ) + + log.info("CDI module deployment complete") + return version, operator_resource + + except Exception as e: + log.error(f"Deployment of CDI module failed: {str(e)}") + raise + +# Function to fetch the latest stable semantic version from GitHub releases +# TODO: consider making github latest release version fetching a shared utility function & adopting across all modules to reduce code duplication +def fetch_latest_version() -> str: + """ + Fetches the latest stable version of CDI from GitHub releases. + + Returns: + str: Latest stable version string. + """ + try: + latest_release_url = 'https://github.com/kubevirt/containerized-data-importer/releases/latest' + tag = requests.get(latest_release_url, allow_redirects=False).headers.get('location') + version = tag.split('/')[-1] + version = version.lstrip('v') + log.info(f"Fetched latest CDI version: {version}") + return version + except Exception as e: + log.error(f"Error fetching the latest version: {e}") + return "latest" diff --git a/pulumi/modules/containerized_data_importer/types.py b/pulumi/modules/containerized_data_importer/types.py new file mode 100644 index 0000000..1643f37 --- /dev/null +++ b/pulumi/modules/containerized_data_importer/types.py @@ -0,0 +1,25 @@ +# pulumi/modules/containerized_data_importer/types.py + +""" +Defines the data structure for the Containerized Data Importer (CDI) module configuration. +""" + +from dataclasses import dataclass, field +from typing import Optional, Dict, Any + +@dataclass +class CdiConfig: + version: Optional[str] = "latest" + namespace: str = "cdi" + labels: Dict[str, str] = field(default_factory=lambda: {"app": "cdi"}) + annotations: Dict[str, Any] = field(default_factory=dict) + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'CdiConfig': + default_config = CdiConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + setattr(default_config, key, value) + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in cdi config.") + return default_config diff --git a/pulumi/src/kv_manager/__init__.py b/pulumi/modules/hostpath_provisioner/__init__.py similarity index 100% rename from pulumi/src/kv_manager/__init__.py rename to pulumi/modules/hostpath_provisioner/__init__.py diff --git a/pulumi/modules/hostpath_provisioner/deploy.py b/pulumi/modules/hostpath_provisioner/deploy.py new file mode 100644 index 0000000..5e610e2 --- /dev/null +++ b/pulumi/modules/hostpath_provisioner/deploy.py @@ -0,0 +1,272 @@ +# pulumi/modules/hostpath_provisioner/deploy.py + +import requests +from typing import List, Dict, Any, Tuple, Optional + +import pulumi +import pulumi_kubernetes as k8s +from pulumi import log + +from core.resource_helpers import create_namespace, create_custom_resource, create_config_file +from core.utils import wait_for_crds +from .types import HostPathProvisionerConfig + + +# Function to call the deploy_hostpath_provisioner function and encapsulate any auxiliary logic like updating global dependencies +# TODO: standardize function signatures and common function names across all modules for deploy functions including adopting common naming conventions like using `config` parameter name instead of `config_` format. +# TODO: adopt a consistent naming convention for common function names across all modules. +def deploy_hostpath_provisioner_module( + config_hostpath_provisioner: HostPathProvisionerConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, +) -> Tuple[str, k8s.yaml.ConfigFile]: + """ + Deploys the HostPath Provisioner module and returns the version and the deployed resource. + + Args: + config_hostpath_provisioner (HostPathProvisionerConfig): Configuration for the HostPath Provisioner. + global_depends_on (List[pulumi.Resource]): Global dependencies for all modules. + k8s_provider (k8s.Provider): The Kubernetes provider. + + Returns: + Tuple[str, k8s.yaml.ConfigFile]: The version deployed and the configured webhook resource. + """ + hostpath_version, hostpath_resource = deploy_hostpath_provisioner( + config_hostpath_provisioner=config_hostpath_provisioner, + depends_on=global_depends_on, + k8s_provider=k8s_provider, + ) + + # Update global dependencies + # TODO: re-evaluate global_depends_on usage, implementation, and hygene, and document strategy. Then adopt a consistent approach across all modules. + global_depends_on.append(hostpath_resource) + + return hostpath_version, hostpath_resource + + +# Function to deploy the HostPath Provisioner +# TODO: standardize function signatures and common function names across all modules for deploy functions including adopting common naming conventions like using `config` parameter name instead of `config_` format. +def deploy_hostpath_provisioner( + config_hostpath_provisioner: HostPathProvisionerConfig, + depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, +) -> Tuple[str, k8s.yaml.ConfigFile]: + """ + Deploys the HostPath Provisioner and related resources. + + Args: + config_hostpath_provisioner (HostPathProvisionerConfig): Configuration for the HostPath Provisioner. + depends_on (List[pulumi.Resource]): Dependencies for this deployment. + k8s_provider (k8s.Provider): The Kubernetes provider. + + Returns: + Tuple[str, k8s.yaml.ConfigFile]: The version deployed and the configured webhook resource. + """ + name = "hostpath-provisioner" + namespace = config_hostpath_provisioner.namespace + + namespace_resource = create_namespace( + name=namespace, + labels=config_hostpath_provisioner.labels, + annotations=config_hostpath_provisioner.annotations, + k8s_provider=k8s_provider, + parent=k8s_provider, + depends_on=depends_on, + ) + + # Determine version to use + version = get_latest_version() if config_hostpath_provisioner.version == "latest" else config_hostpath_provisioner.version + + # Transformation function to enforce namespace override on all resources + # TODO: consider implementing as a utility or resource helper function and adopting directly in core/resource_helpers.py in applicable functions. + def enforce_namespace(resource_args: pulumi.ResourceTransformationArgs) -> pulumi.ResourceTransformationResult: + """ + Transformation function to enforce namespace on all resources. + """ + props = resource_args.props + namespace_conflict = False + + # Handle ObjectMetaArgs case + if isinstance(props.get('metadata'), k8s.meta.v1.ObjectMetaArgs): + meta = props['metadata'] + if meta.namespace and meta.namespace != namespace: + namespace_conflict = True + updated_meta = k8s.meta.v1.ObjectMetaArgs( + name=meta.name, + namespace=namespace, + labels=meta.labels, + annotations=meta.annotations + ) + props['metadata'] = updated_meta + + # Handle dictionary style metadata + elif isinstance(props.get('metadata'), dict): + meta = props['metadata'] + if 'namespace' in meta and meta['namespace'] != namespace: + namespace_conflict = True + meta['namespace'] = namespace + + # TODO: document when/if this case is applicable and why this approach is used. + if namespace_conflict: + raise ValueError("Resource namespace conflict detected.") + + return pulumi.ResourceTransformationResult(props, resource_args.opts) + + # Deploy the webhook + # TODO: consider relocating url variable into the HostpathProvisionerConfig class as a property for better user configuration. + # TODO: consider supporting remote and local path webhook.yaml sources. + webhook_url = f'https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/v{version}/webhook.yaml' + webhook = create_config_file( + name="hostpath-provisioner-webhook", + file=webhook_url, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + parent=namespace_resource, + depends_on=depends_on, + custom_timeouts=pulumi.CustomTimeouts(create="10m", update="5m", delete="5m"), + transformations=[enforce_namespace] + ), + ) + + # Deploy the operator + # TODO: consider relocating url variable into the HostpathProvisionerConfig class as a property for better user configuration. + # TODO: consider supporting remote and local path operator.yaml sources. + operator_url = f'https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/v{version}/operator.yaml' + operator = create_config_file( + name="hostpath-provisioner-operator", + file=operator_url, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + parent=webhook, + depends_on=depends_on, + custom_timeouts=pulumi.CustomTimeouts(create="10m", update="5m", delete="5m"), + transformations=[enforce_namespace] + ), + ) + + # Ensure CRDs are created before HostPathProvisioner resource + # TODO: re-evaluate if this is functional and finish the implementation to ensure pulumi waits for CRDs to be created before creating the HostPathProvisioner resource. + crds = wait_for_crds( + crd_names=["hostpathprovisioners.hostpathprovisioner.kubevirt.io"], + k8s_provider=k8s_provider, + depends_on=depends_on, + parent=operator, + ) + + # Create HostPathProvisioner resource + hostpath_provisioner = create_custom_resource( + name="hostpath-provisioner", + args={ + "apiVersion": "hostpathprovisioner.kubevirt.io/v1beta1", + "kind": "HostPathProvisioner", + "metadata": { + "name": "hostpath-provisioner", + "namespace": namespace, + }, + "spec": { + "imagePullPolicy": "IfNotPresent", + "storagePools": [{ + "name": "ssd", + "path": config_hostpath_provisioner.hostpath, + }], + "workload": { + "nodeSelector": { + "kubernetes.io/os": "linux" + } + } + }, + }, + opts=pulumi.ResourceOptions( + parent=operator, + depends_on=depends_on + crds, + provider=k8s_provider, + custom_timeouts=pulumi.CustomTimeouts(create="10m", update="5m", delete="5m") + ), + ) + + # Define the StorageClass + # TODO: make more user configurable and consider supporting multiple storage pools from a configuration map or array. + storage_class = create_storage_class( + name="hostpath-storage-class-ssd", + provisioner="kubevirt.io.hostpath-provisioner", + namespace=namespace, + default=config_hostpath_provisioner.default, + storage_pool="ssd", + parent=hostpath_provisioner, + k8s_provider=k8s_provider, + ) + + return version, webhook + + +# Function to retrieve the latest version of HostPath Provisioner from GitHub Releases +# TODO: consider relocating this function to a utility or resource helper module to reduce code duplication. +def get_latest_version() -> str: + """ + Retrieves the latest stable version of HostPath Provisioner. + + Returns: + str: The latest version number. + """ + try: + tag_url = 'https://github.com/kubevirt/hostpath-provisioner-operator/releases/latest' + response = requests.get(tag_url, allow_redirects=False) + final_url = response.headers.get('location') + version = final_url.split('/')[-1].lstrip('v') + return version + except Exception as e: + log.error(f"Error fetching the latest version: {e}") + return "0.17.0" + + +# Function to create a StorageClass resource +# TODO: consider supporting iterating over multiple storage pools from a configuration map or array. +def create_storage_class( + name: str, + provisioner: str, + namespace: str, + default: bool, + storage_pool: str, + parent: pulumi.Resource, + k8s_provider: k8s.Provider, +) -> k8s.storage.v1.StorageClass: + """ + Creates a StorageClass resource specific to HostPath Provisioner. + + Args: + name (str): The name of the storage class. + provisioner (str): The provisioner to use. + namespace (str): The namespace to deploy into. + default (bool): Whether this storage class should be the default. + storage_pool (str): The name of the storage pool. + parent (pulumi.Resource): The parent resource. + k8s_provider (k8s.Provider): The Kubernetes provider. + + Returns: + k8s.storage.v1.StorageClass: The created StorageClass resource. + """ + if default: + is_default_storage_class = "true" + else: + is_default_storage_class = "false" + + return k8s.storage.v1.StorageClass( + resource_name=name, + metadata=k8s.meta.v1.ObjectMetaArgs( + name=name, + annotations={ + "storageclass.kubernetes.io/is-default-class": is_default_storage_class, + }, + ), + provisioner=provisioner, + reclaim_policy="Delete", + volume_binding_mode="WaitForFirstConsumer", + parameters={ + "storagePool": storage_pool, + }, + opts=pulumi.ResourceOptions( + parent=parent, + provider=k8s_provider, + custom_timeouts=pulumi.CustomTimeouts(create="5m", update="5m", delete="5m") + ), + ) diff --git a/pulumi/modules/hostpath_provisioner/types.py b/pulumi/modules/hostpath_provisioner/types.py new file mode 100644 index 0000000..4761027 --- /dev/null +++ b/pulumi/modules/hostpath_provisioner/types.py @@ -0,0 +1,28 @@ +# pulumi/modules/hostpath_provisioner/types.py + +""" +Defines the data structure for the HostPath Provisioner module configuration. +""" + +from dataclasses import dataclass, field +from typing import Optional, Dict, Any +import pulumi + +@dataclass +class HostPathProvisionerConfig: + version: Optional[str] = "latest" + namespace: str = "hostpath-provisioner" + hostpath: str = "/var/lib/hostpath-provisioner" + default: bool = True + labels: Dict[str, str] = field(default_factory=dict) + annotations: Dict[str, Any] = field(default_factory=dict) + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'HostPathProvisionerConfig': + default_config = HostPathProvisionerConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + setattr(default_config, key, value) + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in hostpath_provisioner config.") + return default_config diff --git a/pulumi/src/lib/__init__.py b/pulumi/modules/kubernetes/__init__.py similarity index 100% rename from pulumi/src/lib/__init__.py rename to pulumi/modules/kubernetes/__init__.py diff --git a/pulumi/src/local_path_storage/__init__.py b/pulumi/modules/kubernetes_dashboard/__init__.py similarity index 100% rename from pulumi/src/local_path_storage/__init__.py rename to pulumi/modules/kubernetes_dashboard/__init__.py diff --git a/pulumi/src/kubernetes_dashboard/deploy.py b/pulumi/modules/kubernetes_dashboard/deploy.py similarity index 100% rename from pulumi/src/kubernetes_dashboard/deploy.py rename to pulumi/modules/kubernetes_dashboard/deploy.py diff --git a/pulumi/modules/kubevirt/README.md b/pulumi/modules/kubevirt/README.md new file mode 100644 index 0000000..2869321 --- /dev/null +++ b/pulumi/modules/kubevirt/README.md @@ -0,0 +1,186 @@ +# KubeVirt Module Guide + +Welcome to the **KubeVirt Module** for the Kargo KubeVirt Kubernetes PaaS! This guide is intended to help both newcomers and experienced developers understand, deploy, and customize the KubeVirt module within the Kargo platform. + +--- + +## Table of Contents + +- [Introduction](#introduction) +- [Why Use KubeVirt?](#why-use-kubevirt) +- [Getting Started](#getting-started) +- [Enabling the Module](#enabling-the-module) +- [Configuration Options](#configuration-options) + - [Default Settings](#default-settings) + - [Customizing Your Deployment](#customizing-your-deployment) +- [Module Components Explained](#module-components-explained) + - [Namespace Creation](#namespace-creation) + - [Operator Deployment](#operator-deployment) + - [Custom Resource Configuration](#custom-resource-configuration) +- [Using the Module](#using-the-module) + - [Example Usage](#example-usage) +- [Troubleshooting and FAQs](#troubleshooting-and-faqs) +- [Additional Resources](#additional-resources) +- [Conclusion](#conclusion) + +--- + +## Introduction + +The KubeVirt module enables you to run virtual machines (VMs) within your Kubernetes cluster using [KubeVirt](https://kubevirt.io/). It bridges the gap between containerized applications and traditional VM workloads, providing a unified platform for all your infrastructure needs. + +--- + +## Why Use KubeVirt? + +- **Unified Platform**: Manage containers and VMs in a single Kubernetes cluster. +- **Flexibility**: Run legacy applications alongside cloud-native ones. +- **Scalability**: Leverage Kubernetes scaling features for VMs. +- **Ecosystem Integration**: Use Kubernetes tools and practices for VM management. + +--- + +## Getting Started + +### Prerequisites + +- **Kubernetes Cluster**: Access to a cluster with appropriate resources. +- **Pulumi CLI**: Installed and configured. +- **Kubeconfig**: Properly set up for cluster access. + +### Setup Steps + +1. **Navigate to the Kargo Pulumi Directory**: + ```bash + cd Kargo/pulumi + ``` +2. **Install Dependencies**: + ```bash + pip install -r requirements.txt + ``` +3. **Initialize Pulumi Stack**: + ```bash + pulumi stack init dev + ``` + +--- + +## Enabling the Module + +The KubeVirt module is enabled by default. To confirm or adjust its status, modify your Pulumi configuration. + +### Verifying Module Enablement + +```yaml +# Pulumi..yaml + +config: + kubevirt: + enabled: true # Set to false to disable +``` + +Alternatively, use the Pulumi CLI: + +```bash +pulumi config set --path kubevirt.enabled true +``` + +--- + +## Configuration Options + +### Default Settings + +- **Namespace**: `kubevirt` +- **Version**: Defined in `default_versions.json` +- **Use Emulation**: `false` (suitable for bare-metal environments) + +### Customizing Your Deployment + +#### Available Configuration Parameters + +- **enabled** *(bool)*: Enable or disable the module. +- **namespace** *(string)*: Kubernetes namespace for KubeVirt. +- **version** *(string)*: Specific version to deploy. Use `'latest'` for the most recent stable version. +- **use_emulation** *(bool)*: Enable if running in a nested virtualization environment. +- **labels** *(dict)*: Custom labels for resources. +- **annotations** *(dict)*: Custom annotations for resources. + +#### Example Custom Configuration + +```yaml +config: + kubevirt: + enabled: true + namespace: "kubevirt" + version: "1.3.1" + use_emulation: true + labels: + app: "kubevirt" + annotations: + owner: "dev-team" +``` + +--- + +## Module Components Explained + +### Namespace Creation + +A dedicated namespace is created for KubeVirt. + +- **Purpose**: Isolates KubeVirt resources for better management. +- **Customization**: Change using the `namespace` parameter. + +### Operator Deployment + +Deploys the KubeVirt operator. + +- **Source**: Official KubeVirt operator YAML. +- **Version Management**: Specify a version or use `'latest'`. +- **Transformation**: YAML is adjusted to fit the specified namespace. + +### Custom Resource Configuration + +Defines the KubeVirt CustomResource to configure KubeVirt settings. + +- **Emulation Mode**: Controlled by `use_emulation`. +- **Feature Gates**: Enables additional features like `HostDevices` and `ExpandDisks`. +- **SMBIOS Configuration**: Sets metadata for virtual machines. + +--- + +## Using the Module + +### Example Usage + +Deploy the module with your custom configuration: + +```bash +pulumi up +``` + +--- + +## Troubleshooting and FAQs + +**Q1: Virtual machines are not starting.** + +- **A**: Ensure that your nodes support virtualization. If running in a VM without the `/dev/kvm` device, set `use_emulation` to `true`. + +**Q2: Deployment fails with version errors.** + +- **A**: Verify that the specified version exists. Use `'latest'` to automatically fetch the latest stable version. + +**Q3: How do I enable additional feature gates?** + +- **A**: Modify the `featureGates` section in the `deploy.py` or submit a feature request to expose this via configuration. + +--- + +## Additional Resources + +- **KubeVirt Documentation**: [kubevirt.io/docs](https://kubevirt.io/docs/) +- **Kargo Project**: [Kargo GitHub Repository](https://github.com/ContainerCraft/Kargo) +- **Pulumi Kubernetes Provider**: [Pulumi Kubernetes Docs](https://www.pulumi.com/docs/reference/pkg/kubernetes/) +- **KubeVirt Releases**: [KubeVirt GitHub Releases](https://github.com/kubevirt/kubevirt/releases) diff --git a/pulumi/src/multus/__init__.py b/pulumi/modules/kubevirt/__init__.py similarity index 100% rename from pulumi/src/multus/__init__.py rename to pulumi/modules/kubevirt/__init__.py diff --git a/pulumi/modules/kubevirt/deploy.py b/pulumi/modules/kubevirt/deploy.py new file mode 100644 index 0000000..af3e34b --- /dev/null +++ b/pulumi/modules/kubevirt/deploy.py @@ -0,0 +1,225 @@ +# pulumi/modules/kubevirt/deploy.py + +""" +Deploys the KubeVirt module. + +This module is responsible for deploying KubeVirt on the Kubernetes cluster. + +The configuration options are: + + namespace: str - The namespace where KubeVirt will be deployed. Default is 'kubevirt'. + version: Optional[str] - The version of KubeVirt to deploy. Default is None. + use_emulation: bool - Whether to use emulation or not. Default is False. + labels: Dict[str, str] - The labels to apply to the KubeVirt resources. Default is {}. + annotations: Dict[str, Any] - The annotations to apply to the KubeVirt resources. Default is {}. + global_depends_on: List[pulumi.Resource] - The list of resources that the KubeVirt module depends on. Default is []. + k8s_provider: k8s.Provider - The Kubernetes provider. Default is None. + + Returns: + Tuple[Optional[str], k8s.apiextensions.CustomResource] - The version of KubeVirt deployed and the deployed resource. + + Raises: + Exception: If the KubeVirt CRDs are not available. +""" + +# Import necessary modules +import requests +import yaml +import tempfile +import os +from typing import Optional, List, Tuple, Dict, Any + +import pulumi +import pulumi_kubernetes as k8s +from pulumi import log + +from core.utils import wait_for_crds +from core.metadata import get_global_labels, get_global_annotations +from core.resource_helpers import ( + create_namespace, + create_custom_resource, + create_config_file, +) +from .types import KubeVirtConfig + +def deploy_kubevirt_module( + config_kubevirt: KubeVirtConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[Optional[str], k8s.apiextensions.CustomResource]: + """ + Deploys the KubeVirt module and returns the version and the deployed resource. + """ + # Deploy KubeVirt + kubevirt_version, kubevirt_resource = deploy_kubevirt( + config_kubevirt=config_kubevirt, + depends_on=global_depends_on, + k8s_provider=k8s_provider, + ) + + # Update global dependencies if not None + # TODO: re-evaluate if global_depends_on should be updated here or in the calling function + # TODO: regardless, the if statement is not necessary as this code will not be executed if kubevirt module is not enabled + if kubevirt_resource: + global_depends_on.append(kubevirt_resource) + + return kubevirt_version, kubevirt_resource + +def deploy_kubevirt( + config_kubevirt: KubeVirtConfig, + depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[str, Optional[pulumi.Resource]]: + """ + Deploys KubeVirt operator and creates the KubeVirt CustomResource, + ensuring that the CRD is available before creating the CustomResource. + """ + # Create Namespace using the helper function from core/resource_helpers.py + namespace_resource = create_namespace( + name=config_kubevirt.namespace, + k8s_provider=k8s_provider, + parent=k8s_provider, + depends_on=depends_on, + ) + + # Add the namespace to the dependencies + # TODO: reevaluate if this is necessary, helpful, and if a module scoped `module_depends_on` pattern should be adopted across modules + depends_on = depends_on + [namespace_resource] + + # Extract config objects from config dictionary + version = config_kubevirt.version + namespace = config_kubevirt.namespace + use_emulation = config_kubevirt.use_emulation + + # Determine latest version release from GitHub Releases + # TODO: reimplement into the get_module_config function and adopt across all modules to reduce code duplication + if version == 'latest' or version is None: + version = get_latest_kubevirt_version() + log.info(f"Setting KubeVirt release version to latest: {version}") + else: + log.info(f"Using KubeVirt version: {version}") + + # Download and transform KubeVirt operator YAML + kubevirt_operator_yaml = download_kubevirt_operator_yaml(version) + transformed_yaml = _transform_yaml(kubevirt_operator_yaml, namespace) + + # Write transformed YAML to a temporary file + with tempfile.NamedTemporaryFile(delete=False, mode='w') as temp_file: + yaml.dump_all(transformed_yaml, temp_file) + temp_file_path = temp_file.name + + operator = None + try: + # Deploy KubeVirt operator using the helper function + operator = create_config_file( + name='kubevirt-operator', + file=temp_file_path, + opts=pulumi.ResourceOptions( + parent=namespace_resource, + custom_timeouts=pulumi.CustomTimeouts( + create="10m", + update="5m", + delete="5m", + ), + ), + k8s_provider=k8s_provider, + depends_on=depends_on, + ) + finally: + os.unlink(temp_file_path) + + # Wait for the CRDs to be registered + crds = wait_for_crds( + crd_names=[ + "kubevirts.kubevirt.io", + # Add other required CRD names here if needed + ], + k8s_provider=k8s_provider, + depends_on=depends_on, + parent=operator + ) + + # Create the KubeVirt resource always + kubevirt_resource = create_custom_resource( + name="kubevirt", + args={ + "apiVersion": "kubevirt.io/v1", + "kind": "KubeVirt", + "metadata": { + "name": "kubevirt", + "namespace": namespace, + }, + "spec": { + "configuration": { + "developerConfiguration": { + "useEmulation": use_emulation, + "featureGates": [ + "HostDevices", + "ExpandDisks", + "AutoResourceLimitsGate", + ], + }, + "smbios": { + "sku": "kargo-kc2", + "version": version, + "manufacturer": "ContainerCraft", + "product": "Kargo", + "family": "CCIO", + }, + }, + }, + }, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + parent=operator, + depends_on=depends_on + crds, + custom_timeouts=pulumi.CustomTimeouts( + create="5m", + update="5m", + delete="5m", + ), + ), + ) + + return version, kubevirt_resource + + +# Function to get the latest KubeVirt version if the version is set to 'latest' or no version configuration is supplied +def get_latest_kubevirt_version() -> str: + """ + Retrieves the latest stable version of KubeVirt. + """ + + # TODO: relocate this URL to a default in the KubevirtConfig class and allow for an override + url = 'https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt' + response = requests.get(url) + if response.status_code != 200: + raise Exception(f"Failed to fetch latest KubeVirt version from {url}") + return response.text.strip().lstrip("v") + +def download_kubevirt_operator_yaml(version: str) -> Any: + """ + Downloads the KubeVirt operator YAML for the specified version. + """ + + # TODO: relocate this URL to a default in the KubevirtConfig class and allow for an override + # TODO: support remote or local kubevirt-operator.yaml file + url = f'https://github.com/kubevirt/kubevirt/releases/download/v{version}/kubevirt-operator.yaml' + response = requests.get(url) + if response.status_code != 200: + raise Exception(f"Failed to download KubeVirt operator YAML from {url}") + return list(yaml.safe_load_all(response.text)) + +# Function to remove Namespace resources from the YAML data and replace other object namespaces with the specified namespace value as an override +def _transform_yaml(yaml_data: Any, namespace: str) -> List[Dict[str, Any]]: + """ + Transforms the YAML data to set the namespace and exclude Namespace resources. + """ + transformed = [] + for resource in yaml_data: + if resource.get('kind') == 'Namespace': + continue + if 'metadata' in resource: + resource['metadata']['namespace'] = namespace + transformed.append(resource) + return transformed diff --git a/pulumi/modules/kubevirt/types.py b/pulumi/modules/kubevirt/types.py new file mode 100644 index 0000000..a3fc64f --- /dev/null +++ b/pulumi/modules/kubevirt/types.py @@ -0,0 +1,51 @@ +# ./pulumi/modules/kubevirt/types.py +# TODO: +# - add the missing docstrings +# - re-evaluate type enforcement techniques + +""" +Defines the data structure for the KubeVirt module configuration. + +This module is responsible for deploying KubeVirt on the Kubernetes cluster. + +The configuration options are: + + namespace: str - The namespace where KubeVirt will be deployed. Default is 'kubevirt'. + version: Optional[str] - The version of KubeVirt to deploy. Default is None. + use_emulation: bool - Whether to use emulation or not. Default is False. + labels: Dict[str, str] - The labels to apply to the KubeVirt resources. Default is {}. + annotations: Dict[str, Any] - The annotations to apply to the KubeVirt resources. Default is {}. +""" + +from dataclasses import dataclass, field +from typing import Optional, Dict, Any + +import pulumi +from core.metadata import get_global_labels, get_global_annotations + +@dataclass +class KubeVirtConfig: + namespace: str = "kubevirt" + version: Optional[str] = "latest" + use_emulation: bool = False + labels: Dict[str, str] = field(default_factory=dict) + annotations: Dict[str, Any] = field(default_factory=dict) + + @classmethod + def merge(cls, user_config: Dict[str, Any]) -> 'KubeVirtConfig': + default_config = cls() + merged_config = default_config.__dict__.copy() + + for key, value in user_config.items(): + if hasattr(default_config, key): + merged_config[key] = value + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in kubevirt config.") + + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + merged_config['labels'].update(global_labels) + merged_config['annotations'].update(global_annotations) + + return cls(**merged_config) diff --git a/pulumi/src/openunison/__init__.py b/pulumi/modules/kv_manager/__init__.py similarity index 100% rename from pulumi/src/openunison/__init__.py rename to pulumi/modules/kv_manager/__init__.py diff --git a/pulumi/src/kv_manager/deploy.py b/pulumi/modules/kv_manager/deploy.py similarity index 100% rename from pulumi/src/kv_manager/deploy.py rename to pulumi/modules/kv_manager/deploy.py diff --git a/pulumi/src/prometheus/__init__.py b/pulumi/modules/local_path_storage/__init__.py similarity index 100% rename from pulumi/src/prometheus/__init__.py rename to pulumi/modules/local_path_storage/__init__.py diff --git a/pulumi/src/local_path_storage/deploy.py b/pulumi/modules/local_path_storage/deploy.py similarity index 100% rename from pulumi/src/local_path_storage/deploy.py rename to pulumi/modules/local_path_storage/deploy.py diff --git a/pulumi/src/vm/__init__.py b/pulumi/modules/multus/__init__.py similarity index 100% rename from pulumi/src/vm/__init__.py rename to pulumi/modules/multus/__init__.py diff --git a/pulumi/modules/multus/deploy.py b/pulumi/modules/multus/deploy.py new file mode 100644 index 0000000..b34f2a7 --- /dev/null +++ b/pulumi/modules/multus/deploy.py @@ -0,0 +1,146 @@ +# pulumi/modules/multus/deploy.py +# TODO: enhance logging and error handling consistent with documented best practices and other modules. + +""" +Deploys the Multus module. +""" + +from typing import List, Dict, Any, Tuple, Optional + +import pulumi +import pulumi_kubernetes as k8s + +from core.utils import get_latest_helm_chart_version +from core.metadata import get_global_labels, get_global_annotations +from core.resource_helpers import create_namespace, create_custom_resource + +from .types import MultusConfig + +def deploy_multus_module( + config_multus: MultusConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[str, Optional[pulumi.Resource]]: + """ + Deploys the Multus module and returns the version and the deployed resource. + """ + multus_version, multus_resource = deploy_multus( + config_multus=config_multus, + depends_on=global_depends_on, + k8s_provider=k8s_provider, + ) + + # Update global dependencies + global_depends_on.append(multus_resource) + + return multus_version, multus_resource + +def deploy_multus( + config_multus: MultusConfig, + depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[str, Optional[pulumi.Resource]]: + """ + Deploys Multus using YAML manifest and creates a NetworkAttachmentDefinition, + ensuring proper paths for host mounts. + """ + namespace_resource = create_namespace( + name=config_multus.namespace, + labels=config_multus.labels, + annotations=config_multus.annotations, + k8s_provider=k8s_provider, + parent=k8s_provider, + ) + + # Deploy Multus DaemonSet + resource_name = f"multus-daemonset" + version = config_multus.version or "master" + # TODO: consider moving url variable to config via new value in MultusConfig class with default value, may require helper function in multus/types.py to support `latest` + manifest_url = f"https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/{version}/deployments/multus-daemonset-thick.yml" + + multus = k8s.yaml.ConfigFile( + resource_name, + file=manifest_url, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + parent=namespace_resource, + transformations=[transform_host_path], + custom_timeouts=pulumi.CustomTimeouts( + create="8m", + update="8m", + delete="2m" + ) + ) + ) + + # Create NetworkAttachmentDefinition + # TODO: document the dependency on `br0` bridge interface, also consider making this configurable + network_attachment_definition = create_custom_resource( + name=config_multus.bridge_name, + args={ + "apiVersion": "k8s.cni.cncf.io/v1", + "kind": "NetworkAttachmentDefinition", + "metadata": { + "name": config_multus.bridge_name, + "namespace": config_multus.namespace, + }, + "spec": { + "config": pulumi.Output.all(config_multus.bridge_name, config_multus.bridge_name).apply(lambda args: f''' + {{ + "cniVersion": "0.3.1", + "name": "{args[0]}", + "plugins": [ + {{ + "type": "bridge", + "bridge": "{args[1]}", + "ipam": {{}} + }}, + {{ + "type": "tuning" + }} + ] + }}''') + }, + }, + opts=pulumi.ResourceOptions( + provider=k8s_provider, + parent=multus, + depends_on=namespace_resource, + custom_timeouts=pulumi.CustomTimeouts( + create="5m", + update="5m", + delete="5m", + ), + ), + ) + + pulumi.export('network_attachment_definition', network_attachment_definition.metadata['name']) + + return version, multus + +def transform_host_path(args: pulumi.ResourceTransformationArgs) -> pulumi.ResourceTransformationResult: + """ + Transforms the host paths in the Multus DaemonSet. + + The Multus DaemonSet mounts the host path `/run/netns` to kubelet pod at `/var/run/netns`. + This transformation ensures the unique path required is utilized for Talos Linux compatibility. + """ + obj = args.props + + if obj.get('kind', '') == 'DaemonSet' and obj.get('metadata', {}).get('name', '') == 'kube-multus-ds': + containers = obj['spec']['template']['spec'].get('containers', []) + for container in containers: + volume_mounts = container.get('volumeMounts', []) + for vm in volume_mounts: + current_path = vm.get('mountPath', '').rstrip('/') + if current_path == '/run/netns': + vm['mountPath'] = '/var/run/netns' + + volumes = obj['spec']['template']['spec'].get('volumes', []) + for vol in volumes: + if 'hostPath' in vol: + current_path = vol['hostPath'].get('path', '').rstrip('/') + if current_path == '/run/netns': + vol['hostPath']['path'] = '/var/run/netns' + + return pulumi.ResourceTransformationResult(props=obj, opts=args.opts) diff --git a/pulumi/modules/multus/types.py b/pulumi/modules/multus/types.py new file mode 100644 index 0000000..413d3e1 --- /dev/null +++ b/pulumi/modules/multus/types.py @@ -0,0 +1,27 @@ +# pulumi/modules/multus/types.py + +""" +Defines the data structure for the Multus module configuration. +""" + +from dataclasses import dataclass, field +from typing import Optional, Dict, Any +import pulumi + +@dataclass +class MultusConfig: + version: str = "master" + namespace: str = "multus" + bridge_name: str = "br0" + labels: Dict[str, str] = field(default_factory=dict) + annotations: Dict[str, Any] = field(default_factory=dict) + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'MultusConfig': + default_config = MultusConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + setattr(default_config, key, value) + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in multus config.") + return default_config diff --git a/pulumi/modules/openunison/__init__.py b/pulumi/modules/openunison/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pulumi/src/openunison/assets/alertmanager.png b/pulumi/modules/openunison/assets/alertmanager.png similarity index 100% rename from pulumi/src/openunison/assets/alertmanager.png rename to pulumi/modules/openunison/assets/alertmanager.png diff --git a/pulumi/src/openunison/assets/grafana.png b/pulumi/modules/openunison/assets/grafana.png similarity index 100% rename from pulumi/src/openunison/assets/grafana.png rename to pulumi/modules/openunison/assets/grafana.png diff --git a/pulumi/src/openunison/assets/kubevirt.png b/pulumi/modules/openunison/assets/kubevirt.png similarity index 100% rename from pulumi/src/openunison/assets/kubevirt.png rename to pulumi/modules/openunison/assets/kubevirt.png diff --git a/pulumi/src/openunison/assets/prometheus.png b/pulumi/modules/openunison/assets/prometheus.png similarity index 100% rename from pulumi/src/openunison/assets/prometheus.png rename to pulumi/modules/openunison/assets/prometheus.png diff --git a/pulumi/src/openunison/deploy.py b/pulumi/modules/openunison/deploy.py similarity index 100% rename from pulumi/src/openunison/deploy.py rename to pulumi/modules/openunison/deploy.py diff --git a/pulumi/src/openunison/encoded_assets.py b/pulumi/modules/openunison/encoded_assets.py similarity index 100% rename from pulumi/src/openunison/encoded_assets.py rename to pulumi/modules/openunison/encoded_assets.py diff --git a/pulumi/modules/prometheus/__init__.py b/pulumi/modules/prometheus/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pulumi/modules/prometheus/deploy.py b/pulumi/modules/prometheus/deploy.py new file mode 100644 index 0000000..a38a60c --- /dev/null +++ b/pulumi/modules/prometheus/deploy.py @@ -0,0 +1,202 @@ +# pulumi/modules/prometheus/deploy.py + +""" +Deploys the Prometheus module following the shared design patterns. +""" + +from typing import List, Dict, Any, Tuple, Optional + +import pulumi +import pulumi_kubernetes as k8s +from pulumi import log + +from core.resource_helpers import create_namespace, create_helm_release +from core.utils import get_latest_helm_chart_version + +from .types import PrometheusConfig + +def deploy_prometheus_module( + config_prometheus: PrometheusConfig, + global_depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[str, Optional[pulumi.Resource]]: + """ + Deploys the Prometheus module and returns the version and the deployed resource. + + Args: + config_prometheus (PrometheusConfig): Configuration for the Prometheus module. + global_depends_on (List[pulumi.Resource]): Global dependencies for all modules. + k8s_provider (k8s.Provider): The Kubernetes provider. + + Returns: + Tuple[str, Optional[pulumi.Resource]]: The version deployed and the deployed resource. + """ + prometheus_version, prometheus_resource = deploy_prometheus( + config_prometheus=config_prometheus, + depends_on=global_depends_on, + k8s_provider=k8s_provider, + ) + + # Update global dependencies + if prometheus_resource: + global_depends_on.append(prometheus_resource) + + return prometheus_version, prometheus_resource + +def deploy_prometheus( + config_prometheus: PrometheusConfig, + depends_on: List[pulumi.Resource], + k8s_provider: k8s.Provider, + ) -> Tuple[str, Optional[pulumi.Resource]]: + """ + Deploys Prometheus using Helm and sets up necessary services. + """ + namespace = config_prometheus.namespace + version = config_prometheus.version + openunison_enabled = config_prometheus.openunison_enabled + + # Create Namespace using the helper function + namespace_resource = create_namespace( + name=namespace, + labels=config_prometheus.labels, + annotations=config_prometheus.annotations, + k8s_provider=k8s_provider, + parent=k8s_provider, + depends_on=depends_on, + ) + + chart_name = "kube-prometheus-stack" + chart_url = "https://prometheus-community.github.io/helm-charts" + + # Get the latest version of the Helm chart + # TODO: re-implement into the get_module_config function and adopt across all modules to reduce code duplication + if version is None or version == "latest": + version = get_latest_helm_chart_version(chart_url, chart_name) + log.info(f"Setting Prometheus helm chart version to latest release: {version}") + else: + log.info(f"Using Prometheus helm release version: {version}") + + # Helm values customization based on OpenUnison integration + if openunison_enabled: + prometheus_helm_values = { + "grafana": { + "grafana.ini": { + "users": { + "allow_sign_up": False, + "auto_assign_org": True, + "auto_assign_org_role": "Admin" + }, + "auth.proxy": { + "enabled": True, + "header_name": "X-WEBAUTH-USER", + "auto_sign_up": True, + "headers": "Groups:X-WEBAUTH-GROUPS" + } + } + } + } + else : + prometheus_helm_values = {} + + # Create the Helm Release + release = create_helm_release( + name=chart_name, + args=k8s.helm.v3.ReleaseArgs( + chart=chart_name, + version=version, + namespace=namespace, + skip_await=False, + repository_opts=k8s.helm.v3.RepositoryOptsArgs(repo=chart_url), + values=prometheus_helm_values, + ), + opts=pulumi.ResourceOptions( + parent=namespace_resource, + ), + k8s_provider=k8s_provider, + depends_on=depends_on, + ) + + # Create Services with predictable names + services = create_prometheus_services( + config_prometheus=config_prometheus, + k8s_provider=k8s_provider, + namespace=namespace, + parent=namespace_resource, + depends_on=[release], + ) + + return version, release + +def create_prometheus_services( + config_prometheus: PrometheusConfig, + k8s_provider: k8s.Provider, + namespace: str, + parent: pulumi.Resource, + depends_on: List[pulumi.Resource], + ) -> List[k8s.core.v1.Service]: + """ + Creates Prometheus, Grafana, and Alertmanager services. + + Args: + config_prometheus (PrometheusConfig): Configuration for the Prometheus module. + k8s_provider (k8s.Provider): The Kubernetes provider. + namespace (str): The namespace to deploy services in. + parent (pulumi.Resource): The parent resource. + depends_on (List[pulumi.Resource]): Dependencies for the services. + + Returns: + List[k8s.core.v1.Service]: The created services. + """ + services = [] + + service_definitions = [ + { + "name": "grafana", + "port": 80, + "targetPort": 3000, + "selector": "app.kubernetes.io/name", + }, + { + "name": "alertmanager", + "port": 9093, + "targetPort": 9093, + "selector": "app.kubernetes.io/name", + }, + { + "name": "prometheus", + "port": 9090, + "targetPort": 9090, + "selector": "app.kubernetes.io/name", + } + ] + + # Create services from list of service definitions + # TODO: re-evaluate if this should be centralized into a pulumi/core/utils.py helper function + for service_def in service_definitions: + service = k8s.core.v1.Service( + f"service-{service_def['name']}", + metadata=k8s.meta.v1.ObjectMetaArgs( + name=service_def["name"], + namespace=namespace, + labels=config_prometheus.labels, + annotations=config_prometheus.annotations, + ), + spec=k8s.core.v1.ServiceSpecArgs( + type="ClusterIP", + ports=[k8s.core.v1.ServicePortArgs( + name="http-web", + port=service_def["port"], + protocol="TCP", + target_port=service_def["targetPort"], + )], + selector={service_def["selector"]: service_def["name"]}, + ), + opts=pulumi.ResourceOptions( + provider=k8s_provider, + parent=parent, + depends_on=depends_on, + ) + ) + services.append(service) + + return services diff --git a/pulumi/modules/prometheus/types.py b/pulumi/modules/prometheus/types.py new file mode 100644 index 0000000..72a3956 --- /dev/null +++ b/pulumi/modules/prometheus/types.py @@ -0,0 +1,26 @@ +# pulumi/modules/prometheus/types.py + +""" +Defines the data structure for the Prometheus module configuration. +""" + +from dataclasses import dataclass, field +from typing import Optional, Dict, Any + +@dataclass +class PrometheusConfig: + version: Optional[str] = None + namespace: str = "monitoring" + openunison_enabled: bool = False + labels: Dict[str, str] = field(default_factory=dict) + annotations: Dict[str, Any] = field(default_factory=dict) + + @staticmethod + def merge(user_config: Dict[str, Any]) -> 'PrometheusConfig': + default_config = PrometheusConfig() + for key, value in user_config.items(): + if hasattr(default_config, key): + setattr(default_config, key, value) + else: + pulumi.log.warn(f"Unknown configuration key '{key}' in prometheus config.") + return default_config diff --git a/pulumi/modules/vm/__init__.py b/pulumi/modules/vm/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/pulumi/src/vm/talos.py b/pulumi/modules/vm/talos.py similarity index 99% rename from pulumi/src/vm/talos.py rename to pulumi/modules/vm/talos.py index a99ed84..cd4ffb5 100644 --- a/pulumi/src/vm/talos.py +++ b/pulumi/modules/vm/talos.py @@ -4,8 +4,8 @@ def deploy_talos_cluster( config_talos: dict, k8s_provider: k8s.Provider, + parent, depends_on: pulumi.Output[list], - parent ): """ Deploy the Talos controlplane and worker VirtualMachinePools based on the provided configuration. diff --git a/pulumi/src/vm/ubuntu.py b/pulumi/modules/vm/ubuntu.py similarity index 100% rename from pulumi/src/vm/ubuntu.py rename to pulumi/modules/vm/ubuntu.py diff --git a/pulumi/mypy.ini b/pulumi/mypy.ini new file mode 100644 index 0000000..d0a1d65 --- /dev/null +++ b/pulumi/mypy.ini @@ -0,0 +1,7 @@ +[mypy] +python_version = 3.9 +disallow_untyped_calls = True +disallow_untyped_defs = True +check_untyped_defs = True +warn_unused_ignores = True + diff --git a/pulumi/src/cert_manager/deploy.py b/pulumi/src/cert_manager/deploy.py deleted file mode 100644 index b4cf261..0000000 --- a/pulumi/src/cert_manager/deploy.py +++ /dev/null @@ -1,205 +0,0 @@ -import pulumi -import pulumi_kubernetes as k8s -from pulumi_kubernetes.apiextensions.CustomResource import CustomResource -from src.lib.namespace import create_namespace -from src.lib.helm_chart_versions import get_latest_helm_chart_version - -def deploy_cert_manager( - ns_name: str, - version: str, - kubernetes_distribution: str, - depends: pulumi.Resource, - k8s_provider: k8s.Provider - ): - - # Create namespace - ns_retain = False - ns_protect = False - ns_annotations = {} - ns_labels = {} - namespace = create_namespace( - depends, - ns_name, - ns_retain, - ns_protect, - k8s_provider, - custom_labels=ns_labels, - custom_annotations=ns_annotations - ) - - chart_name = "cert-manager" - chart_index_path = "index.yaml" - chart_url = "https://charts.jetstack.io" - chart_index_url = f"{chart_url}/{chart_index_path}" - - # Fetch the latest version from the helm chart index - if version is None: - version = get_latest_helm_chart_version(chart_index_url, chart_name) - version = version.lstrip("v") - pulumi.log.info(f"Setting helm release version to latest: {chart_name}/{version}") - else: - # Log the version override - pulumi.log.info(f"Using helm release version: {chart_name}/{version}") - - # Deploy cert-manager using the Helm release with updated custom values - helm_values = gen_helm_values(kubernetes_distribution) - - # Deploy cert-manager using the Helm release with custom values - release = k8s.helm.v3.Release( - chart_name, - k8s.helm.v3.ReleaseArgs( - chart=chart_name, - version=version, - namespace=ns_name, - skip_await=False, - repository_opts= k8s.helm.v3.RepositoryOptsArgs( - repo=chart_url - ), - values=helm_values, - ), - opts=pulumi.ResourceOptions( - provider = k8s_provider, - parent=namespace, - depends_on=[], - custom_timeouts=pulumi.CustomTimeouts( - create="8m", - update="4m", - delete="4m" - ) - ) - ) - - # Create a self-signed ClusterIssuer resource - cluster_issuer_root = CustomResource( - "cluster-selfsigned-issuer-root", - api_version="cert-manager.io/v1", - kind="ClusterIssuer", - metadata={ - "name": "cluster-selfsigned-issuer-root", - "namespace": ns_name - }, - spec={ - "selfSigned": {} - }, - opts=pulumi.ResourceOptions( - provider = k8s_provider, - parent=release, - depends_on=[namespace], - custom_timeouts=pulumi.CustomTimeouts( - create="5m", - update="10m", - delete="10m" - ) - ) - ) - - cluster_issuer_ca_certificate = CustomResource( - "cluster-selfsigned-issuer-ca", - api_version="cert-manager.io/v1", - kind="Certificate", - metadata={ - "name": "cluster-selfsigned-issuer-ca", - "namespace": ns_name - }, - spec={ - "commonName": "cluster-selfsigned-issuer-ca", - "duration": "2160h0m0s", - "isCA": True, - "issuerRef": { - "group": "cert-manager.io", - "kind": "ClusterIssuer", - "name": cluster_issuer_root.metadata["name"], - }, - "privateKey": { - "algorithm": "ECDSA", - "size": 256 - }, - "renewBefore": "360h0m0s", - "secretName": "cluster-selfsigned-issuer-ca" - }, - opts=pulumi.ResourceOptions( - provider = k8s_provider, - parent=cluster_issuer_root, - depends_on=[namespace], - custom_timeouts=pulumi.CustomTimeouts( - create="5m", - update="10m", - delete="10m" - ) - ) - ) - - cluster_issuer = CustomResource( - "cluster-selfsigned-issuer", - api_version="cert-manager.io/v1", - kind="ClusterIssuer", - metadata={ - "name": "cluster-selfsigned-issuer", - "namespace": ns_name - }, - spec={ - "ca": { - "secretName": cluster_issuer_ca_certificate.spec["secretName"], - } - }, - opts=pulumi.ResourceOptions( - provider=k8s_provider, - parent=cluster_issuer_ca_certificate, - depends_on=[namespace], - custom_timeouts=pulumi.CustomTimeouts( - create="4m", - update="4m", - delete="4m" - ) - ) - ) - - # Retrieve the CA certificate secret - ca_secret = k8s.core.v1.Secret( - "cluster-selfsigned-issuer-ca-secret", - metadata={ - "namespace": ns_name, - "name": cluster_issuer_ca_certificate.spec["secretName"] - }, - opts=pulumi.ResourceOptions( - provider=k8s_provider, - parent=cluster_issuer, - depends_on=[cluster_issuer] - ) - ) - - # Extract the tls.crt value from the secret - ca_data_tls_crt_b64 = ca_secret.data.apply(lambda data: data["tls.crt"]) - - return version, release, ca_data_tls_crt_b64, ca_secret - -def gen_helm_values(kubernetes_distribution: str): - - # Define custom values for the cert-manager Helm chart - common_values = { - 'replicaCount': 1, - 'installCRDs': True, - 'resources': { - 'limits': { - 'cpu': '500m', - 'memory': '1024Mi' - }, - 'requests': { - 'cpu': '250m', - 'memory': '512Mi' - } - } - } - - if kubernetes_distribution == 'kind': - # Kind-specific Helm values - return { - **common_values, - } - elif kubernetes_distribution == 'talos': - # Talos-specific Helm values per the Talos Docs - return { - **common_values, - } - else: - raise ValueError(f"Unsupported Kubernetes distribution: {kubernetes_distribution}") diff --git a/pulumi/src/containerized_data_importer/deploy.py b/pulumi/src/containerized_data_importer/deploy.py deleted file mode 100644 index 3b7d587..0000000 --- a/pulumi/src/containerized_data_importer/deploy.py +++ /dev/null @@ -1,74 +0,0 @@ -import requests -import pulumi -import pulumi_kubernetes as k8s -from pulumi_kubernetes.apiextensions.CustomResource import CustomResource -from pulumi_kubernetes.meta.v1 import ObjectMetaArgs - -def deploy_cdi( - depends, - version: str, - k8s_provider: k8s.Provider - ): - - # Fetch the latest stable version of CDI - if version is None: - tag_url = 'https://github.com/kubevirt/containerized-data-importer/releases/latest' - tag = requests.get(tag_url, allow_redirects=False).headers.get('location') - version = tag.split('/')[-1] - version = version.lstrip('v') - pulumi.log.info(f"Setting helm release version to latest stable: cdi/{version}") - else: - # Log the version override - pulumi.log.info(f"Using helm release version: cdi/{version}") - - # Deploy the CDI operator - cdi_operator_url = f'https://github.com/kubevirt/containerized-data-importer/releases/download/v{version}/cdi-operator.yaml' - operator = k8s.yaml.ConfigFile( - 'cdi-operator', - file=cdi_operator_url, - opts=pulumi.ResourceOptions( - provider=k8s_provider - ) - ) - - # Deploy the default CDI custom resource - cdi_resource = CustomResource( - "cdi", - api_version="cdi.kubevirt.io/v1beta1", - kind="CDI", - metadata={ - "name": "cdi", - "namespace": "cdi", - }, - spec={ - "config": { - "featureGates": [ - "HonorWaitForFirstConsumer", - ], - }, - "imagePullPolicy": "IfNotPresent", - "infra": { - "nodeSelector": { - "kubernetes.io/os": "linux", - }, - "tolerations": [ - { - "key": "CriticalAddonsOnly", - "operator": "Exists", - }, - ], - }, - "workload": { - "nodeSelector": { - "kubernetes.io/os": "linux", - }, - }, - }, - opts=pulumi.ResourceOptions( - provider=k8s_provider, - parent=operator, - depends_on=depends - ) - ) - - return version, operator diff --git a/pulumi/src/hostpath_provisioner/deploy.py b/pulumi/src/hostpath_provisioner/deploy.py deleted file mode 100644 index 3b27c0d..0000000 --- a/pulumi/src/hostpath_provisioner/deploy.py +++ /dev/null @@ -1,181 +0,0 @@ -import requests -import pulumi -from pulumi import ResourceOptions -import pulumi_kubernetes as k8s -from pulumi_kubernetes.apiextensions import CustomResource -from pulumi_kubernetes.meta.v1 import ObjectMetaArgs -from pulumi_kubernetes.storage.v1 import StorageClass -from src.lib.namespace import create_namespace - -def deploy( - depends: pulumi.Output[list], - version: str, - ns_name: str, - hostpath: str, - default: bool, - k8s_provider: k8s.Provider, - ): - - # If version is not supplied, fetch the latest stable version - if version is None: - tag_url = 'https://github.com/kubevirt/hostpath-provisioner-operator/releases/latest' - tag = requests.get(tag_url, allow_redirects=False).headers.get('location') - version = tag.split('/')[-1] if tag else '0.17.0' - version = version.lstrip('v') - pulumi.log.info(f"Setting helm release version to latest stable: hostpath-provisioner/{version}") - else: - pulumi.log.info(f"Using helm release version: hostpath-provisioner/{version}") - - # Create namespace - ns_retain = True - ns_protect = False - ns_annotations = {} - ns_labels = { - "kubevirt.io": "", - "kubernetes.io/metadata.name": ns_name, - "pod-security.kubernetes.io/enforce": "privileged" - } - namespace = create_namespace( - depends, - ns_name, - ns_retain, - ns_protect, - k8s_provider, - ns_labels, - ns_annotations - ) - - # Function to add namespace to resource if not set - def add_namespace(args): - obj = args.props - - if 'metadata' in obj: - if isinstance(obj['metadata'], ObjectMetaArgs): - if not obj['metadata'].namespace: - obj['metadata'].namespace = ns_name - else: - if obj['metadata'] is None: - obj['metadata'] = {} - if not obj['metadata'].get('namespace'): - obj['metadata']['namespace'] = ns_name - else: - obj['metadata'] = {'namespace': ns_name} - - # Return the modified object wrapped in ResourceTransformationResult - return pulumi.ResourceTransformationResult(props=obj, opts=args.opts) - - # Deploy the webhook - url_webhook = f'https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/v{version}/webhook.yaml' - webhook = k8s.yaml.ConfigFile( - "hostpath-provisioner-webhook", - file=url_webhook, - opts=ResourceOptions( - parent=namespace, - depends_on=depends, - provider=k8s_provider, - transformations=[add_namespace], - custom_timeouts=pulumi.CustomTimeouts( - create="1m", - update="1m", - delete="1m" - ) - ) - ) - - # Deploy the operator with a namespace transformation - url_operator = f'https://github.com/kubevirt/hostpath-provisioner-operator/releases/download/v{version}/operator.yaml' - operator = k8s.yaml.ConfigFile( - "hostpath-provisioner-operator", - file=url_operator, - opts=ResourceOptions( - parent=webhook, - depends_on=depends, - provider=k8s_provider, - transformations=[add_namespace], - custom_timeouts=pulumi.CustomTimeouts( - create="8m", - update="8m", - delete="2m" - ) - ) - ) - - # Ensure the CRDs are created before the HostPathProvisioner resource - # TODO: solve for the case where child resources are created before parent exists - crd = k8s.apiextensions.v1.CustomResourceDefinition.get( - "hostpathprovisioners", - id="hostpathprovisioners.hostpathprovisioner.kubevirt.io", - opts=ResourceOptions( - parent=operator, - depends_on=depends, - provider=k8s_provider, - custom_timeouts=pulumi.CustomTimeouts( - create="9m", - update="9m", - delete="2m" - ) - ) - ) - - # Create a HostPathProvisioner resource - hostpath_provisioner = CustomResource( - "hostpath-provisioner-hpp", - api_version="hostpathprovisioner.kubevirt.io/v1beta1", - kind="HostPathProvisioner", - metadata={ - "name": "hostpath-provisioner-class-ssd", - "namespace": ns_name - }, - spec={ - "imagePullPolicy": "IfNotPresent", - "storagePools": [{ - "name": "ssd", - "path": hostpath - }], - "workload": { - "nodeSelector": { - "kubernetes.io/os": "linux" - } - } - }, - opts=pulumi.ResourceOptions( - parent=operator, - depends_on=crd, - provider=k8s_provider, - ignore_changes=["status"], - custom_timeouts=pulumi.CustomTimeouts( - create="8m", - update="8m", - delete="2m" - ) - ) - ) - - # Define the StorageClass - storage_class = StorageClass( - "hostpath-storage-class-ssd", - metadata=ObjectMetaArgs( - name="ssd", - annotations={ - "storageclass.kubernetes.io/is-default-class": "true" if default else "false" - } - ), - reclaim_policy="Delete", - provisioner="kubevirt.io.hostpath-provisioner", - volume_binding_mode="WaitForFirstConsumer", - parameters={ - "storagePool": "ssd", - }, - opts=ResourceOptions( - parent=hostpath_provisioner, - #depends_on=hostpath_provisioner, - provider=k8s_provider, - custom_timeouts=pulumi.CustomTimeouts( - create="8m", - update="8m", - delete="2m" - ) - ) - ) - - return version, webhook # operator diff --git a/pulumi/src/kubevirt/deploy.py b/pulumi/src/kubevirt/deploy.py deleted file mode 100644 index fc2f0a3..0000000 --- a/pulumi/src/kubevirt/deploy.py +++ /dev/null @@ -1,140 +0,0 @@ -import requests -import yaml -import tempfile -import os -import pulumi -import pulumi_kubernetes as k8s -from pulumi_kubernetes.apiextensions.CustomResource import CustomResource -from pulumi_kubernetes.meta.v1 import ObjectMetaArgs -from src.lib.namespace import create_namespace - -def deploy_kubevirt( - depends, - ns_name: str, - version: str, - use_emulation: bool, - k8s_provider: k8s.Provider, - kubernetes_distribution: str - ): - - # Create namespace - ns_retain = True - ns_protect = False - ns_annotations = {} - ns_labels = { - "kubevirt.io": "", - "kubernetes.io/metadata.name": ns_name, - "openshift.io/cluster-monitoring": "true", - "pod-security.kubernetes.io/enforce": "privileged" - } - namespace = create_namespace( - depends, - ns_name, - ns_retain, - ns_protect, - k8s_provider, - custom_labels=ns_labels, - custom_annotations=ns_annotations - ) - - # Fetch the latest stable version of KubeVirt - if version is None: - kubevirt_stable_version_url = 'https://storage.googleapis.com/kubevirt-prow/release/kubevirt/kubevirt/stable.txt' - version = requests.get(kubevirt_stable_version_url).text.strip() - version = version.lstrip("v") - pulumi.log.info(f"Setting version to latest stable: kubevirt/{version}") - else: - # Log the version override - pulumi.log.info(f"Using helm release version: kubevirt/{version}") - - # Download the KubeVirt operator YAML - kubevirt_operator_url = f'https://github.com/kubevirt/kubevirt/releases/download/v{version}/kubevirt-operator.yaml' - response = requests.get(kubevirt_operator_url) - kubevirt_yaml = yaml.safe_load_all(response.text) - - # Edit the YAML in memory to remove the Namespace and adjust other resources - transformed_yaml = [] - for resource in kubevirt_yaml: - if resource and resource.get('kind') == 'Namespace': - pulumi.log.debug(f"Transforming Namespace resource: {resource['metadata']['name']}") - continue # Skip adding this namespace to the edited YAML - if resource and 'metadata' in resource: - resource['metadata']['namespace'] = ns_name - pulumi.log.debug(f"Setting namespace for {resource['kind']} to {ns_name}") - transformed_yaml.append(resource) - - # Write the edited YAML to a temporary file - with tempfile.NamedTemporaryFile(delete=False, mode='w') as temp_file: - yaml.dump_all(transformed_yaml, temp_file) - temp_file_path = temp_file.name - #pulumi.log.info(f"Deploying KubeVirt from local file path: {temp_file_path}") - - # Ensure the tempfile is closed before passing it to ConfigFile - temp_file.close() - - # Pass the edited YAML directly to ConfigFile - operator = k8s.yaml.ConfigFile( - 'kubevirt-operator', - file=temp_file_path, - opts=pulumi.ResourceOptions( - parent=namespace, - depends_on=depends, - provider=k8s_provider, - ) - ) - - # Ensure the temporary file is deleted after Pulumi uses it - pulumi.Output.all().apply(lambda _: os.unlink(temp_file_path)) - - # Determine useEmulation based on the kubernetes_distribution - use_emulation = True if kubernetes_distribution == "kind" else use_emulation - if use_emulation: - pulumi.log.info("KVM Emulation configured for KubeVirt in development.") - - # Create the KubeVirt custom resource object - kubevirt_custom_resource_spec = { - "customizeComponents": {}, - "workloadUpdateStrategy": {}, - "certificateRotateStrategy": {}, - "imagePullPolicy": "IfNotPresent", - "configuration": { - "smbios": { - "sku": "kargo-kc2", - "version": version, - "manufacturer": "ContainerCraft", - "product": "Kargo", - "family": "CCIO" - }, - "developerConfiguration": { - "useEmulation": use_emulation, - "featureGates": [ - "HostDevices", - "ExpandDisks", - "AutoResourceLimitsGate" - ] - }, - "permittedHostDevices": { - "pciHostDevices": [ - ] - } - } - } - - # Create the KubeVirt custom resource - kubevirt = CustomResource( - "kubevirt", - api_version="kubevirt.io/v1", - kind="KubeVirt", - metadata=ObjectMetaArgs( - name="kubevirt", - namespace=ns_name, - ), - spec=kubevirt_custom_resource_spec, - opts=pulumi.ResourceOptions( - provider=k8s_provider, - parent=operator, - depends_on=depends - ) - ) - - return version, operator diff --git a/pulumi/src/lib/helm_chart_versions.py b/pulumi/src/lib/helm_chart_versions.py deleted file mode 100644 index 1aa006b..0000000 --- a/pulumi/src/lib/helm_chart_versions.py +++ /dev/null @@ -1,61 +0,0 @@ -import requests -import logging -import yaml -from packaging.version import parse as parse_version, InvalidVersion, Version - -# Set up basic logging -logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') - -def is_stable_version(version_str): - """Check if the version string is a valid and stable semantic version.""" - try: - parsed_version = parse_version(version_str) - # Check if it's a stable version (no pre-release or dev metadata) - return isinstance(parsed_version, Version) and not parsed_version.is_prerelease and not parsed_version.is_devrelease - except InvalidVersion: - return False - -def get_latest_helm_chart_version(url, chart_name): - """ - Fetches the latest stable version of a Helm chart from a given URL. - - Args: - url (str): The URL of the Helm chart repository. - chart_name (str): The name of the Helm chart. - - Returns: - str: The latest stable version of the Helm chart, or an error message if the chart is not found or an error occurs during fetching. - - Raises: - requests.RequestException: If an error occurs during the HTTP request. - - """ - try: - logging.info(f"Fetching URL: {url}") - response = requests.get(url) - response.raise_for_status() - - # Parse the YAML content - index = yaml.safe_load(response.content) - if chart_name in index['entries']: - chart_versions = index['entries'][chart_name] - # Filter out non-stable versions and sort - stable_versions = [v for v in chart_versions if is_stable_version(v['version'])] - if not stable_versions: - logging.info(f"No stable versions found for chart '{chart_name}'.") - return "No stable version found" - latest_chart = max(stable_versions, key=lambda x: parse_version(x['version'])) - return latest_chart['version'] - else: - logging.info(f"No chart named '{chart_name}' found in repository.") - return "Chart not found" - - except requests.RequestException as e: - logging.error(f"Error fetching data: {e}") - return f"Error fetching data: {e}" - -## Example usage -#url = "https://raw.githubusercontent.com/cilium/charts/master/index.yaml" -#chart = "cilium" -#latest_version = get_latest_helm_chart_version(url, chart) -#print(f"The latest version of {chart} is: {latest_version}") diff --git a/pulumi/src/lib/kubernetes_api_endpoint.py b/pulumi/src/lib/kubernetes_api_endpoint.py deleted file mode 100644 index e165039..0000000 --- a/pulumi/src/lib/kubernetes_api_endpoint.py +++ /dev/null @@ -1,30 +0,0 @@ -import pulumi -from pulumi_kubernetes import core, meta, Provider - -class KubernetesApiEndpointIp(pulumi.ComponentResource): - """ - Represents a Kubernetes API endpoint IP address. - - Args: - name (str): The name of the resource. - k8s_provider (Provider): The Kubernetes provider. - - Attributes: - endpoint (core.v1.Endpoints): The Kubernetes endpoint. - ips (pulumi.Output[str]): The comma-separated string of IP addresses. - - """ - def __init__(self, name: str, k8s_provider: Provider): - super().__init__('custom:x:KubernetesApiEndpointIp', name, {}, opts=pulumi.ResourceOptions(provider=k8s_provider)) - - self.endpoint = core.v1.Endpoints.get( - "kubernetes", - "kubernetes", - opts=pulumi.ResourceOptions(provider=k8s_provider) - ) - - self.ips = self.endpoint.subsets.apply( - lambda subsets: ','.join([address.ip for subset in subsets for address in subset.addresses]) if subsets else '' - ) - - self.register_outputs({"ip_string": self.ips}) diff --git a/pulumi/src/lib/namespace.py b/pulumi/src/lib/namespace.py deleted file mode 100644 index f4838aa..0000000 --- a/pulumi/src/lib/namespace.py +++ /dev/null @@ -1,62 +0,0 @@ -import pulumi -import pulumi_kubernetes as k8s - -def create_namespace( - depends, - ns_name: str, - ns_retain, - ns_protect, - k8s_provider: k8s.Provider, - custom_labels=None, - custom_annotations=None, - finalizers=None - ): - - # Default labels and annotations - default_labels = { - "ccio.v1/app": "kargo" - } - - default_annotations = {} - - # Merge custom labels and annotations with defaults - complete_labels = {**default_labels, **(custom_labels or {})} - complete_annotations = {**default_annotations, **(custom_annotations or {})} - - # Use default finalizers if none are provided - if finalizers is None: - finalizers = ["kubernetes"] - - # If `depends` is None, set it to an empty list - if depends == (None, None): - depends = [] - - # Create the namespace with merged labels, annotations, and finalizers - namespace_resource = k8s.core.v1.Namespace( - ns_name, - metadata=k8s.meta.v1.ObjectMetaArgs( - name=ns_name, - annotations=complete_annotations, - labels=complete_labels - ), - spec=k8s.core.v1.NamespaceSpecArgs( - finalizers=finalizers, - ), - opts=pulumi.ResourceOptions( - protect=ns_protect, - retain_on_delete=ns_retain, - provider=k8s_provider, - depends_on=depends, - ignore_changes=[ - "metadata", - "spec" - ], - custom_timeouts=pulumi.CustomTimeouts( - create="5m", - update="10m", - delete="10m" - ) - ) - ) - - return namespace_resource diff --git a/pulumi/src/multus/deploy.py b/pulumi/src/multus/deploy.py deleted file mode 100644 index 014786d..0000000 --- a/pulumi/src/multus/deploy.py +++ /dev/null @@ -1,100 +0,0 @@ -import pulumi -import pulumi_kubernetes as k8s - -def transform_host_path(args): - - # Get the object from the arguments - obj = args.props - pulumi.log.debug(f"Object keys: {list(obj.keys())}") - - # Transform DaemonSet named 'kube-multus-ds' - if obj.get('kind', '') == 'DaemonSet' and obj.get('metadata', {}).get('name', '') == 'kube-multus-ds': - # Ensure spec is present - if 'spec' in obj: - # Transform paths in containers - containers = obj['spec']['template']['spec'].get('containers', []) - for container in containers: - volume_mounts = container.get('volumeMounts', []) - for vm in volume_mounts: - # Normalize path before checking to handle potential trailing slash - current_path = vm.get('mountPath', '').rstrip('/') - if current_path == '/run/netns': - vm['mountPath'] = '/var/run/netns' - - # Transform paths in volumes - volumes = obj['spec']['template']['spec'].get('volumes', []) - for vol in volumes: - if 'hostPath' in vol: - # Normalize path before checking to handle potential trailing slash - current_path = vol['hostPath'].get('path', '').rstrip('/') - if current_path == '/run/netns': - vol['hostPath']['path'] = '/var/run/netns' - - # Return the modified object - return pulumi.ResourceTransformationResult(props=obj, opts=args.opts) - -def deploy_multus( - depends: pulumi.Input[list], - version: str, - bridge_name: str, - k8s_provider: k8s.Provider - ): - - resource_name = f"k8snetworkplumbingwg-multus-daemonset-thick" - manifest_url = f"https://raw.githubusercontent.com/k8snetworkplumbingwg/multus-cni/{version}/deployments/multus-daemonset-thick.yml" - - multus = k8s.yaml.ConfigFile( - resource_name, - file=manifest_url, - opts=pulumi.ResourceOptions( - provider=k8s_provider, - depends_on=depends, - transformations=[transform_host_path], - custom_timeouts=pulumi.CustomTimeouts( - create="8m", - update="8m", - delete="2m" - ) - ) - ) - - # Pulumi Kubernetes resource for NetworkAttachmentDefinition - network_attachment_definition = k8s.apiextensions.CustomResource( - "kargo-net-attach-def", - api_version="k8s.cni.cncf.io/v1", - kind="NetworkAttachmentDefinition", - metadata={ - "name": f"{bridge_name}", - "namespace": "default" - }, - spec={ - "config": pulumi.Output.all(bridge_name, bridge_name).apply(lambda args: f''' - {{ - "cniVersion": "0.3.1", - "name": "{bridge_name}", - "plugins": [ - {{ - "type": "bridge", - "bridge": "{bridge_name}", - "ipam": {{}} - }}, - {{ - "type": "tuning" - }} - ] - }}''') - }, - opts=pulumi.ResourceOptions( - depends_on=multus, - provider=k8s_provider, - custom_timeouts=pulumi.CustomTimeouts( - create="5m", - update="5m", - delete="5m" - ) - )) - - # Export the name of the resource - pulumi.export('network_attachment_definition', network_attachment_definition.metadata['name']) - - return "master", multus diff --git a/pulumi/src/prometheus/deploy.py b/pulumi/src/prometheus/deploy.py deleted file mode 100644 index c2becac..0000000 --- a/pulumi/src/prometheus/deploy.py +++ /dev/null @@ -1,193 +0,0 @@ -import pulumi -import pulumi_kubernetes as k8s -from src.lib.namespace import create_namespace -from src.lib.helm_chart_versions import get_latest_helm_chart_version - -def deploy_prometheus( - depends: pulumi.Input[list], - ns_name: str, - version: str, - k8s_provider: k8s.Provider, - openunison_enabled: bool - ): - - # Create the monitoring Namespace - ns_retain = True - ns_protect = False - ns_annotations = {} - ns_labels = { - "kubevirt.io": "", - "kubernetes.io/metadata.name": ns_name, - "openshift.io/cluster-monitoring": "true", - "pod-security.kubernetes.io/enforce": "privileged" - } - namespace = create_namespace( - depends, - ns_name, - ns_retain, - ns_protect, - k8s_provider, - ns_labels, - ns_annotations - ) - - prometheus_helm_values = {} - if openunison_enabled: - prometheus_helm_values = { - "grafana": { - "grafana.ini": { - "users": { - "allow_sign_up": False, - "auto_assign_org": True, - "auto_assign_org_role": "Admin" - }, - "auth.proxy": { - "enabled": True, - "header_name": "X-WEBAUTH-USER", - "auto_sign_up": True, - "headers": "Groups:X-WEBAUTH-GROUPS" - } - } - } - } - - # Fetch the latest version from the helm chart index - chart_name = "kube-prometheus-stack" - chart_index_path = "index.yaml" - chart_url = "https://prometheus-community.github.io/helm-charts" - if version is None: - chart_index_url = f"{chart_url}/{chart_index_path}" - version = get_latest_helm_chart_version(chart_index_url, chart_name) - pulumi.log.info(f"Setting helm release version to latest stable: {chart_name}/{version}") - else: - pulumi.log.info(f"Using helm release version: {chart_name}/{version}") - - release = k8s.helm.v3.Release( - 'helm-release-prometheus', - k8s.helm.v3.ReleaseArgs( - chart=chart_name, - version=version, - values=prometheus_helm_values, - namespace='monitoring', - skip_await=False, - repository_opts= k8s.helm.v3.RepositoryOptsArgs( - repo=chart_url - ), - ), - opts=pulumi.ResourceOptions( - provider = k8s_provider, - parent=namespace, - depends_on=depends, - custom_timeouts=pulumi.CustomTimeouts( - create="30m", - update="30m", - delete="30m" - ) - ) - ) - depends.append(release) - - # create services with predictable names - service_grafana = k8s.core.v1.Service( - "service-grafana", - metadata=k8s.meta.v1.ObjectMetaArgs( - name="grafana", - namespace=ns_name - ), - spec={ - "type":"ClusterIP", - "ports":[ - { - "name":"http-web", - "port": 80, - "protocol": "TCP", - "targetPort": 3000 - - } - ], - "selector":{ - "app.kubernetes.io/name":"grafana" - } - }, - opts=pulumi.ResourceOptions( - parent=namespace, - depends_on=depends, - retain_on_delete=False, - provider = k8s_provider, - custom_timeouts=pulumi.CustomTimeouts( - create="3m", - update="3m", - delete="3m" - ) - ) - ) - - service_alertmanager = k8s.core.v1.Service( - "service-alertmanager", - metadata=k8s.meta.v1.ObjectMetaArgs( - name="alertmanager", - namespace="monitoring" - ), - spec={ - "type":"ClusterIP", - "ports":[ - { - "name":"http-web", - "port": 9093, - "protocol": "TCP", - "targetPort": 9093 - - } - ], - "selector":{ - "app.kubernetes.io/name":"alertmanager" - } - }, - opts=pulumi.ResourceOptions( - parent=namespace, - depends_on=depends, - provider = k8s_provider, - retain_on_delete=False, - custom_timeouts=pulumi.CustomTimeouts( - create="3m", - update="3m", - delete="3m" - ) - ) - ) - - service_prometheus = k8s.core.v1.Service( - "service-prometheus", - metadata=k8s.meta.v1.ObjectMetaArgs( - name="prometheus", - namespace="monitoring" - ), - spec={ - "type":"ClusterIP", - "ports":[ - { - "name":"http-web", - "port": 9090, - "protocol": "TCP", - "targetPort": 9090 - - } - ], - "selector":{ - "app.kubernetes.io/name":"prometheus" - } - }, - opts=pulumi.ResourceOptions( - parent=namespace, - depends_on=depends, - provider = k8s_provider, - retain_on_delete=False, - custom_timeouts=pulumi.CustomTimeouts( - create="3m", - update="3m", - delete="3m" - ) - ) - ) - - return version, release diff --git a/pulumi/stacks/Pulumi.Kargo.yaml b/pulumi/stacks/Pulumi.Kargo.yaml new file mode 100644 index 0000000..8e937b8 --- /dev/null +++ b/pulumi/stacks/Pulumi.Kargo.yaml @@ -0,0 +1,6 @@ +config: + kargo:cert_manager: + enabled: true + kargo:kubernetes: + context: admin@talos-kargo-docker + kubeconfig: /workspaces/Kargo/.kube/config diff --git a/pulumi/stacks/Pulumi.ci.yaml b/pulumi/stacks/Pulumi.ci.yaml new file mode 100644 index 0000000..ef43a3b --- /dev/null +++ b/pulumi/stacks/Pulumi.ci.yaml @@ -0,0 +1,56 @@ +# Pulumi.ci.yaml +config: + kargo:cert_manager: + enabled: true + namespace: cert-manager + version: latest + kargo:cilium: + enabled: false + kargo:compliance: + fisma: + ato: + authorized: "2025-03-27T00:00:00Z" + renew: "2026-03-27T00:00:00Z" + review: "2028-03-27T00:00:00Z" + enabled: true + level: moderate + nist: + auxiliary: + - ac-6.1 + - ac-2.13 + controls: + - ac-1 + - ac-2 + enabled: true + exceptions: + - ca-7.1 + - ma-2.2 + - si-12 + scip: + environment: prod + ownership: + operator: + contacts: + - seti2@nasa.gov + - alien51@nasa.gov + name: science-team-seti2-obs2819 + provider: + contacts: + - scip@nasa.gov + - bobert@nasa.gov + name: scip-team-xyz + provider: + name: kubevirt + regions: + - scip-west-1 + - scip-east-1 + - scip-lunar-2 + version: v2.87.11 + kargo:kubernetes: + context: admin@talos-kargo-docker + distribution: talos + kubeconfig: /Users/usrbinkat/drive/Git/ccio/kargo/.kube/config + kargo:multus: + enabled: false + kargo:vm: + enabled: false diff --git a/pulumi/stacks/Pulumi.optiplexprime.yaml b/pulumi/stacks/Pulumi.optiplexprime.yaml index 39b66a5..0ecac62 100644 --- a/pulumi/stacks/Pulumi.optiplexprime.yaml +++ b/pulumi/stacks/Pulumi.optiplexprime.yaml @@ -1,6 +1,54 @@ config: + kargo:cert_manager: + version: latest + kargo:compliance: + fisma: + ato: + authorized: "2025-03-27T00:00:00Z" + renew: "2026-03-27T00:00:00Z" + review: "2028-03-27T00:00:00Z" + enabled: true + enforcing: warn # accepts: strict, warn, disabled + level: moderate + nist: + auxiliary: + - ac-6.1 + - ac-2.13 + controls: + - ac-1 + - ac-2 + enabled: true + exceptions: + - ca-7.1 + - ma-2.2 + - si-12 + scip: + environment: prod + ownership: + operator: + contacts: + - seti2@nasa.gov + - alien51@nasa.gov + name: science-team-seti2-obs2819 + provider: + contacts: + - scip@nasa.gov + - bobert@nasa.gov + name: scip-team-xyz + provider: + name: kubevirt + regions: + - scip-west-1 + - scip-east-1 + - scip-lunar-2 + version: v2.87.11 + kargo:hostpath_provisioner: + enabled: true + version: latest kargo:kubernetes: context: usrbinkat-optiplexprime # Kubernetes context for the stack + kargo:kubevirt: + version: latest kargo:talos: controlplane: cpu_cores: 1 # Controlplane CPU cores