From 5c027b532e6884ebf00ec5495c9aecfa8cc400be Mon Sep 17 00:00:00 2001 From: AJ Danelz Date: Tue, 5 Nov 2024 08:38:14 -0500 Subject: [PATCH 1/4] Zilla plus schema changes (#269) * update schema with zilla-plus * fix: QoL changes to zilla plus fargate instructions --- .check-schema/zilla-schema.json | 400 +++++++++++++++++++++++++++++++- 1 file changed, 390 insertions(+), 10 deletions(-) diff --git a/.check-schema/zilla-schema.json b/.check-schema/zilla-schema.json index f1b69406..90b3032a 100644 --- a/.check-schema/zilla-schema.json +++ b/.check-schema/zilla-schema.json @@ -93,6 +93,9 @@ "title": "Type", "type": "string", "enum": [ + "aws-acm", + "aws-secrets", + "aws", "filesystem" ] }, @@ -106,6 +109,46 @@ "type" ], "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "aws-acm" + } + } + }, + "then": { + "properties": { + "type": { + "const": "aws-acm" + }, + "options": false + } + } + }, + { + "if": { + "properties": { + "type": { + "enum": [ + "aws-secrets", + "aws" + ] + } + } + }, + "then": { + "properties": { + "type": { + "enum": [ + "aws-secrets", + "aws" + ] + }, + "options": false + } + } + }, { "if": { "properties": { @@ -258,6 +301,11 @@ "challenge": { "title": "Challenge", "type": "integer" + }, + "identity": { + "title": "Identity", + "type": "string", + "default": "sub" } }, "additionalProperties": false @@ -307,9 +355,11 @@ "title": "Type", "type": "string", "enum": [ + "aws-cloudwatch", "otlp", "prometheus", - "stdout" + "stdout", + "syslog" ] } }, @@ -317,6 +367,77 @@ "type" ], "allOf": [ + { + "if": { + "properties": { + "type": { + "const": "aws-cloudwatch" + } + } + }, + "then": { + "properties": { + "type": { + "const": "aws-cloudwatch" + }, + "options": { + "type": "object", + "properties": { + "metrics": { + "type": "object", + "properties": { + "namespace": { + "type": "string" + }, + "interval": { + "title": "Interval", + "type": "number", + "default": 30 + } + }, + "required": [ + "namespace" + ], + "additionalProperties": false + }, + "logs": { + "type": "object", + "properties": { + "group": { + "type": "string" + }, + "stream": { + "type": "string" + } + }, + "required": [ + "group", + "stream" + ], + "additionalProperties": false + } + }, + "anyOf": [ + { + "required": [ + "metrics" + ] + }, + { + "required": [ + "logs" + ] + } + ], + "additionalProperties": false + }, + "additionalProperties": false + }, + "required": [ + "options" + ] + } + }, { "if": { "properties": { @@ -456,6 +577,66 @@ } } } + }, + { + "if": { + "properties": { + "type": { + "const": "syslog" + } + } + }, + "then": { + "properties": { + "type": { + "const": "syslog" + }, + "options": { + "type": "object", + "properties": { + "host": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "protocol": { + "type": "string", + "enum": [ + "udp", + "tcp", + "tls" + ] + } + }, + "if": { + "properties": { + "protocol": { + "const": "tls" + } + } + }, + "then": { + "properties": { + "trustcacerts": { + "type": "boolean" + }, + "trust": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "required": [ + "host", + "port", + "protocol" + ] + } + } + } } ] } @@ -470,6 +651,8 @@ "enum": [ "apicurio-registry", "apicurio", + "aws-glue", + "confluent-schema-registry", "filesystem", "inline", "karapace-schema-registry", @@ -543,6 +726,62 @@ "additionalProperties": false } }, + { + "if": { + "properties": { + "type": { + "const": "aws-glue" + } + } + }, + "then": { + "properties": { + "type": { + "const": "aws-glue" + }, + "options": { + "properties": { + "registry": { + "type": "string" + }, + "max-age": { + "title": "Max Age", + "type": "number", + "default": 300 + }, + "compression": { + "enum": [ + "none", + "zlib" + ] + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false + } + }, + { + "if": { + "properties": { + "type": { + "const": "confluent-schema-registry" + } + } + }, + "then": { + "properties": { + "type": { + "const": "confluent-schema-registry" + }, + "options": { + "$ref": "#/$defs/options/catalog/schema-registry" + } + }, + "additionalProperties": false + } + }, { "if": { "properties": { @@ -945,7 +1184,7 @@ "extract-headers": { "type": "object", "patternProperties": { - "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": { + "^[a-zA-Z:]+[a-zA-Z0-9\\._\\-:]*$": { "type": "string", "pattern": "^\\$\\{message\\.(key|value)\\.([A-Za-z_][A-Za-z0-9_]*)\\}$" } @@ -967,7 +1206,7 @@ "extract-headers": { "type": "object", "patternProperties": { - "^[a-zA-Z]+[a-zA-Z0-9\\._\\-]*$": { + "^[a-zA-Z:]+[a-zA-Z0-9\\._\\-:]*$": { "type": "string", "pattern": "^\\$\\{message\\.(key|value)\\.([A-Za-z_][A-Za-z0-9_]*)\\}$" } @@ -1236,11 +1475,13 @@ "http-kafka", "kafka", "kafka-grpc", + "kafka-proxy", "mqtt", "mqtt-kafka", "openapi", "openapi-asyncapi", "pgsql", + "pgsql-kafka", "proxy", "risingwave", "sse", @@ -3094,6 +3335,85 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "kafka-proxy" + } + } + }, + "then": { + "properties": { + "type": { + "const": "kafka-proxy" + }, + "kind": { + "enum": [ + "proxy" + ] + }, + "vault": false, + "options": { + "properties": { + "external": { + "type": "object", + "properties": { + "host": { + "title": "Host", + "type": "string", + "pattern": "^[^:]+(?::(\\d+)\\+)?$" + }, + "port": { + "title": "Port", + "type": "integer" + } + }, + "required": [ + "host", + "port" + ], + "additionalProperties": false + }, + "internal": { + "type": "object", + "properties": { + "host": { + "title": "Host", + "type": "string", + "pattern": "^[^:]+$" + }, + "port": { + "title": "Port", + "type": "integer" + }, + "default": { + "title": "Default", + "type": "string", + "pattern": "^[^:]+$" + } + }, + "required": [ + "host", + "port" + ], + "additionalProperties": false + } + }, + "additionalProperties": false, + "required": [ + "internal", + "external" + ] + }, + "routes": false + }, + "required": [ + "options", + "exit" + ] + } + }, { "if": { "properties": { @@ -3151,7 +3471,8 @@ "properties": { "topic": { "title": "Topic", - "type": "string" + "type": "string", + "pattern": "^(\\/?([\\w{}-]*|\\+)(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?|#|\\/|\\$SYS(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?)$" } } } @@ -3165,7 +3486,8 @@ "properties": { "topic": { "title": "Topic", - "type": "string" + "type": "string", + "pattern": "^(\\/?([\\w{}-]*|\\+)(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?|#|\\/|\\$SYS(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?)$" } } } @@ -3366,13 +3688,15 @@ "properties": { "topic": { "title": "Topic", - "type": "string" + "type": "string", + "pattern": "^(\\/?([\\w{}-]*|\\+)(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?|#|\\/|\\$SYS(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?)$" } }, "additionalProperties": false } } - } + }, + "additionalProperties": false }, { "properties": { @@ -3384,13 +3708,15 @@ "properties": { "topic": { "title": "Topic", - "type": "string" + "type": "string", + "pattern": "^(\\/?([\\w{}-]*|\\+)(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?|#|\\/|\\$SYS(\\/((?![-_])[\\w{}-]*|\\+))*(\\/#)?)$" } }, "additionalProperties": false } } - } + }, + "additionalProperties": false } ] } @@ -3742,6 +4068,34 @@ ] } }, + { + "if": { + "properties": { + "type": { + "const": "pgsql-kafka" + } + } + }, + "then": { + "properties": { + "type": { + "const": "pgsql-kafka" + }, + "kind": { + "enum": [ + "proxy" + ] + }, + "vault": false, + "options": false, + "routes": false, + "required": [ + "catalog", + "exit" + ] + } + } + }, { "if": { "properties": { @@ -3931,6 +4285,31 @@ "$ref": "#/$defs/converter" } } + }, + "udf": { + "title": "UDF", + "type": "array", + "items": { + "type": "object", + "properties": { + "server": { + "title": "Server", + "type": "string", + "pattern": "^([a-zA-Z0-9\\\\.-]+)(:(\\\\{[a-zA-Z_]+\\\\}|[0-9]+))?$" + }, + "language": { + "title": "Language", + "type": "string", + "default": "java", + "enum": [ + "java", + "python" + ] + } + }, + "additionalProperties": false + }, + "minItems": 1 } }, "additionalProperties": false @@ -3948,7 +4327,8 @@ "items": { "type": "string", "enum": [ - "CREATE TOPIC" + "CREATE TOPIC", + "DROP TOPIC" ] } } From 9b4412d9d6624ebcbc2b59aa941a9e41cb04cf84 Mon Sep 17 00:00:00 2001 From: AJ Danelz Date: Wed, 6 Nov 2024 17:59:01 -0500 Subject: [PATCH 2/4] Onboarding instructions (#271) * fix: update schema check instructions, add search instructions * fix: remove link checking failures for quickstart --- .check-schema/README.md | 17 +++++++++++------ .lycheeignore | 5 +---- README.md | 40 ++++++++++++++-------------------------- 3 files changed, 26 insertions(+), 36 deletions(-) diff --git a/.check-schema/README.md b/.check-schema/README.md index b46ba7cc..0497f1ba 100644 --- a/.check-schema/README.md +++ b/.check-schema/README.md @@ -2,7 +2,17 @@ This project compares the JSON Schema from the Zilla to the [Reference](../src/reference) section of the docs. -## Update schema +## Generate a the schema + +You can generate the Zilla schema on startup by using the `zilla start` command with the `-Pzilla.engine.verbose.schema.plain` option. The `schema.plain` option is needed because we don't need to check the schema with the extra string validation options that get injected. + +```bash +zilla start -v -Pzilla.engine.verbose.schema.plain +``` + +### Update schema from Docker + +You can generate the schema from the docker image and pull it from the logs. Then just remove the none JSON lines from the beginning and end of each file. In the repository root directory run: @@ -18,9 +28,4 @@ docker stop $CONTAINER_ID; gsed -i '1,2d' ./.check-schema/zilla-schema.json; gsed -i '$d' ./.check-schema/zilla-schema.json; - ``` - -Once the docker container has printed "started" it must be deleted for the command to complete. - -Remove the none JSON lines from the beginning and end of each file. diff --git a/.lycheeignore b/.lycheeignore index d21d1214..9fa769cc 100644 --- a/.lycheeignore +++ b/.lycheeignore @@ -3,13 +3,10 @@ fonts.googleapis.com fonts.gstatic.com github.com/.+/zilla-docs/edit docs.aklivity.io +https://quickstart.aklivity.io www.linkedin.com/company/aklivity www.twitter.com/aklivityinc .+algolia.net amazonaws.com hub.docker.com .+\.name - -# These should be removed once the docsearch plugin is fixed -.+/assets/style.+ -.+/assets/docsearch.+ diff --git a/README.md b/README.md index 731d98dd..ae78d732 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,12 @@ pnpm i pnpm dev ``` +### Search + +This projects uses the free [Algolia Docsearch](https://docsearch.algolia.com/) tool to index the production site for all of the public versions. The crawler config is based on the recommended config from the [Docsearch Plugin](https://ecosystem.vuejs.press/plugins/search/docsearch.html). + +To see the current search index or crawler config login to the [Algolia Dashboard](https://dashboard.algolia.com/users/sign_in). + ### Running Lints - Markdown linting @@ -78,6 +84,14 @@ pnpm dev pnpm link-checker && lychee --exclude-mail --base="src/.vuepress/dist" src/.vuepress/dist ``` +- Schema checking + + You can automatically check the reference docs against the Zilla json schema. More instructions are in the [.check-schema/README.md]() + + ```bash + pnpm check-schema > schema-edits.txt + ``` + ### Reference docs Structure Pages in the reference section describe, as briefly as possible and in an orderly way, the properties and interface of a feature. @@ -114,20 +128,6 @@ parentArray: ## Section -:::: note ToC - -- [topLevelProp\*](#toplevelprop) - - [topLevelProp.child\*](#toplevelprop-child) -- [array](#array) -- [parentArray](#parentarray) - - [parentArray\[\].child](#parentarray-child) - -::: right -\* required -::: - -:::: - ### topLevelProp\* > `object` @@ -176,18 +176,6 @@ parentArray: Description. ```` -### Generate schema asset - -capture the output and delete the first and last lines - -```bash -docker run -it --rm -e ZILLA_INCUBATOR_ENABLED=true ghcr.io/aklivity/zilla:latest start -v -Pzilla.engine.verbose.schema > src/.vuepress/public/assets/zilla-schema.json -``` - -```bash -pnpm check-schema > schema-edits.txt -``` - ## Provide feedback We’d love to hear your feedback. Please file documentation issues only in the docs GitHub repository. You can file a new issue to suggest improvements or if you see any errors in the existing documentation. From 880a902f69670cbb0254bba11ddf6ddd3631f969 Mon Sep 17 00:00:00 2001 From: AJ Danelz Date: Fri, 8 Nov 2024 16:30:13 -0500 Subject: [PATCH 3/4] Add cookbooks (#272) * add examples as cookbooks * add auto scale cookbook * rework quickstart and mqtt broker guides to use cookbooks * add cookbooks to release assets * add the autoscaling cookbook and guide * replace the example references with cookbook * update readmes and compose * fix: link formatting and spelling errors --- .../vocabularies}/Base/accept.txt | 1 + .../vocabularies}/Base/reject.txt | 0 .github/workflows/cookbook_artifacts.yaml | 27 ++ .github/workflows/gitflow-release.yaml | 4 +- .gitignore | 1 + src/.vuepress/sidebar/en.ts | 6 +- src/concepts/kafka-proxies/mqtt-proxy.md | 4 +- src/cookbooks/http.kafka.sasl.scram/README.md | 75 ++++ .../http.kafka.sasl.scram/compose.yaml | 88 ++++ .../http.kafka.sasl.scram/kafka_jaas.conf | 5 + src/cookbooks/http.kafka.sasl.scram/setup.sh | 9 + .../http.kafka.sasl.scram/teardown.sh | 5 + .../http.kafka.sasl.scram/zilla.yaml | 59 +++ .../http.redpanda.sasl.scram/README.md | 94 ++++ .../http.redpanda.sasl.scram/compose.yaml | 78 ++++ .../http.redpanda.sasl.scram/setup.sh | 9 + .../http.redpanda.sasl.scram/teardown.sh | 5 + .../http.redpanda.sasl.scram/zilla.yaml | 62 +++ .../kubernetes.prometheus.autoscale/README.md | 31 ++ .../chart/Chart.yaml | 6 + .../chart/files/prometheus-adapter.yaml | 67 +++ .../chart/files/prometheus.yaml | 28 ++ .../templates/service-prometheus-adapter.yaml | 202 +++++++++ .../chart/templates/service-prometheus.yaml | 118 +++++ .../check_hpa.sh | 18 + .../check_metric.sh | 14 + .../kubernetes.prometheus.autoscale/setup.sh | 22 + .../teardown.sh | 10 + .../values.yaml | 39 ++ .../zilla.yaml | 55 +++ src/cookbooks/mqtt.kafka.broker/README.md | 48 ++ src/cookbooks/mqtt.kafka.broker/compose.yaml | 158 +++++++ src/cookbooks/mqtt.kafka.broker/setup.sh | 13 + src/cookbooks/mqtt.kafka.broker/teardown.sh | 5 + .../mqtt.kafka.broker/zilla.yaml} | 0 src/cookbooks/quickstart/README.md | 56 +++ src/cookbooks/quickstart/compose.yaml | 178 ++++++++ .../quickstart/grpc-zilla.yaml | 6 +- .../quickstart/http-zilla.yaml | 4 + .../quickstart/mqtt-zilla.yaml | 4 + .../quickstart/mqtt_sim_settings.json | 119 +++++ .../quickstart/protos}/route_guide.proto | 14 + src/cookbooks/quickstart/setup.sh | 13 + src/cookbooks/quickstart/teardown.sh | 5 + .../quickstart/telem-export-zilla.yaml | 3 - src/cookbooks/quickstart/zilla.yaml | 415 ++++++++++++++++++ src/how-tos/catalogs/index.md | 4 +- src/how-tos/connecting-to-kafka/aiven.md | 6 +- src/how-tos/connecting-to-kafka/amazon-msk.md | 6 +- .../connecting-to-kafka/apache-kafka.md | 26 +- .../connecting-to-kafka/confluent-cloud.md | 4 +- src/how-tos/connecting-to-kafka/redpanda.md | 12 +- src/how-tos/deploy-operate/autoscale-k8s.md | 366 +++++++++++++++ .../index.md} | 30 +- src/how-tos/models/index.md | 2 +- src/how-tos/mqtt/mqtt.kafka.broker.md | 104 ++--- src/how-tos/quickstart/echo.proto | 13 - src/how-tos/quickstart/index.md | 42 +- .../telemetry/opentelemetry-protocol.md | 8 +- src/reference/config/bindings/amqp/README.md | 2 +- src/reference/config/bindings/amqp/server.md | 2 +- src/reference/config/bindings/ws/server.md | 2 +- src/reference/config/zilla-cli.md | 50 +-- src/reference/manager/zpm-cli.md | 20 +- .../verify-mqtt-client-connectivity.md | 4 +- .../secure-public-access/create-topic.md | 2 +- .../secure-public-access/send-message.md | 4 +- .../verify-kafka-connect.md | 4 +- .../verify-zilla-plus-proxy-service.md | 10 +- .../secure-public-access/development.md | 18 +- .../production-mutual-tls.md | 16 +- .../secure-public-access/production.md | 14 +- .../create-client-certificate-acm.md | 10 +- .../create-server-certificate-acm.md | 8 +- .../create-server-certificate-letsencrypt.md | 10 +- .../aws-services/launch-ec2-instance.md | 2 +- .../how-tos/aws-services/troubleshooting.md | 10 +- .../confluent-cloud/secure-public-access.md | 20 +- src/tutorials/grpc/grpc-intro.md | 6 +- src/tutorials/mqtt/mqtt-intro.md | 10 +- src/tutorials/rest/rest-intro.md | 9 +- src/tutorials/sse/sse-intro.md | 4 +- src/tutorials/telemetry/telemetry-intro.md | 14 +- 83 files changed, 2780 insertions(+), 277 deletions(-) rename .config/vale/styles/{Vocab => config/vocabularies}/Base/accept.txt (99%) rename .config/vale/styles/{Vocab => config/vocabularies}/Base/reject.txt (100%) create mode 100644 .github/workflows/cookbook_artifacts.yaml create mode 100644 src/cookbooks/http.kafka.sasl.scram/README.md create mode 100644 src/cookbooks/http.kafka.sasl.scram/compose.yaml create mode 100644 src/cookbooks/http.kafka.sasl.scram/kafka_jaas.conf create mode 100755 src/cookbooks/http.kafka.sasl.scram/setup.sh create mode 100755 src/cookbooks/http.kafka.sasl.scram/teardown.sh create mode 100644 src/cookbooks/http.kafka.sasl.scram/zilla.yaml create mode 100644 src/cookbooks/http.redpanda.sasl.scram/README.md create mode 100644 src/cookbooks/http.redpanda.sasl.scram/compose.yaml create mode 100755 src/cookbooks/http.redpanda.sasl.scram/setup.sh create mode 100755 src/cookbooks/http.redpanda.sasl.scram/teardown.sh create mode 100644 src/cookbooks/http.redpanda.sasl.scram/zilla.yaml create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/README.md create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/chart/Chart.yaml create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus-adapter.yaml create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus.yaml create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus-adapter.yaml create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus.yaml create mode 100755 src/cookbooks/kubernetes.prometheus.autoscale/check_hpa.sh create mode 100755 src/cookbooks/kubernetes.prometheus.autoscale/check_metric.sh create mode 100755 src/cookbooks/kubernetes.prometheus.autoscale/setup.sh create mode 100755 src/cookbooks/kubernetes.prometheus.autoscale/teardown.sh create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/values.yaml create mode 100644 src/cookbooks/kubernetes.prometheus.autoscale/zilla.yaml create mode 100644 src/cookbooks/mqtt.kafka.broker/README.md create mode 100644 src/cookbooks/mqtt.kafka.broker/compose.yaml create mode 100644 src/cookbooks/mqtt.kafka.broker/setup.sh create mode 100644 src/cookbooks/mqtt.kafka.broker/teardown.sh rename src/{how-tos/mqtt/mqtt_kafka_broker_zilla.yaml => cookbooks/mqtt.kafka.broker/zilla.yaml} (100%) create mode 100644 src/cookbooks/quickstart/README.md create mode 100644 src/cookbooks/quickstart/compose.yaml rename src/{how-tos => cookbooks}/quickstart/grpc-zilla.yaml (96%) rename src/{how-tos => cookbooks}/quickstart/http-zilla.yaml (97%) rename src/{how-tos => cookbooks}/quickstart/mqtt-zilla.yaml (94%) create mode 100644 src/cookbooks/quickstart/mqtt_sim_settings.json rename src/{how-tos/quickstart => cookbooks/quickstart/protos}/route_guide.proto (83%) create mode 100755 src/cookbooks/quickstart/setup.sh create mode 100755 src/cookbooks/quickstart/teardown.sh rename src/{how-tos => cookbooks}/quickstart/telem-export-zilla.yaml (76%) create mode 100644 src/cookbooks/quickstart/zilla.yaml create mode 100644 src/how-tos/deploy-operate/autoscale-k8s.md rename src/how-tos/{deploy-operate.md => deploy-operate/index.md} (83%) delete mode 100644 src/how-tos/quickstart/echo.proto diff --git a/.config/vale/styles/Vocab/Base/accept.txt b/.config/vale/styles/config/vocabularies/Base/accept.txt similarity index 99% rename from .config/vale/styles/Vocab/Base/accept.txt rename to .config/vale/styles/config/vocabularies/Base/accept.txt index 9f698ea6..1b86d432 100644 --- a/.config/vale/styles/Vocab/Base/accept.txt +++ b/.config/vale/styles/config/vocabularies/Base/accept.txt @@ -79,6 +79,7 @@ EDAs enSidebar enum etag +Fargate fas gitea Grafana diff --git a/.config/vale/styles/Vocab/Base/reject.txt b/.config/vale/styles/config/vocabularies/Base/reject.txt similarity index 100% rename from .config/vale/styles/Vocab/Base/reject.txt rename to .config/vale/styles/config/vocabularies/Base/reject.txt diff --git a/.github/workflows/cookbook_artifacts.yaml b/.github/workflows/cookbook_artifacts.yaml new file mode 100644 index 00000000..d3b55d27 --- /dev/null +++ b/.github/workflows/cookbook_artifacts.yaml @@ -0,0 +1,27 @@ +name: Release Cookbook Artifacts + +on: + workflow_dispatch: + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + +permissions: + contents: write + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Tar all cookbooks + run: for i in src/cookbooks/*/; do tar -zcvf "${i%/}.tar.gz" "$i"; done + + - name: Release + uses: softprops/action-gh-release@v2 + if: startsWith(github.ref, 'refs/tags/') + with: + files: | + src/cookbooks/*.tar.gz + LICENSE diff --git a/.github/workflows/gitflow-release.yaml b/.github/workflows/gitflow-release.yaml index 0a2e7d8f..b0ed0246 100644 --- a/.github/workflows/gitflow-release.yaml +++ b/.github/workflows/gitflow-release.yaml @@ -41,7 +41,7 @@ jobs: - if: ${{ env.IS_CUSTOM_VERSION && steps.validate-custom-version.outputs.match != github.event.inputs.custom-version }} name: Custom Version must be "#.#.#" - run: echo "Custom Version must be #.#.#" exit 1; + run: echo "Custom Version must be \#.\#.\#" exit 1; release: runs-on: ubuntu-latest @@ -51,7 +51,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - token: ${{secrets.GITFLOW_RELEASES_TOKEN}} + token: ${{secrets.GITFLOW_RELEASES_TOKEN}} # The PAT used to push changes to protected branches fetch-depth: "0" - name: get new version diff --git a/.gitignore b/.gitignore index 4fe5ecce..51c2b4b4 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ dist src/.vuepress/.cache/ src/.vuepress/.temp/ .idea/ +src/cookbooks/quickstart/live-demo-deploy/.env diff --git a/src/.vuepress/sidebar/en.ts b/src/.vuepress/sidebar/en.ts index 18f146b7..bc8e12c8 100644 --- a/src/.vuepress/sidebar/en.ts +++ b/src/.vuepress/sidebar/en.ts @@ -363,7 +363,7 @@ export const enSidebar = sidebar({ }, { text: "Installing Zilla", - link: "how-tos/deploy-operate.md", + link: "how-tos/deploy-operate/index.md", children: [], }, { @@ -383,6 +383,10 @@ export const enSidebar = sidebar({ text: "Push to an OTLP Collector", link: "how-tos/telemetry/opentelemetry-protocol.md", }, + { + text: "Auto scaling on K8s", + link: "how-tos/deploy-operate/autoscale-k8s.md", + }, ], }, { diff --git a/src/concepts/kafka-proxies/mqtt-proxy.md b/src/concepts/kafka-proxies/mqtt-proxy.md index 80a98993..1174fb66 100644 --- a/src/concepts/kafka-proxies/mqtt-proxy.md +++ b/src/concepts/kafka-proxies/mqtt-proxy.md @@ -70,11 +70,11 @@ An MQTT client can Publish messages to any configured Kafka topics, marking spec ### Session Management -MQTT connect, disconnect, and other session messages are maintained on the the log compacted [sessions](../../reference/config/bindings/mqtt-kafka/proxy.md#topics-sessions) Kafka topic. A message keyed by the MQTT client ID on the topic is used to track client subscriptions across client reconnects. +MQTT connect, disconnect, and other session messages are maintained on the log compacted [sessions](../../reference/config/bindings/mqtt-kafka/proxy.md#topics-sessions) Kafka topic. A message keyed by the MQTT client ID on the topic is used to track client subscriptions across client reconnects. #### Kafka Consumer Groups for MQTT sessions -A consumer group is created for each unique client ID used by an MQTT session with the format `zilla:--`. Zilla minimizes the number of hearbeats required to approximately one per MQTT session expiry interval. When an MQTT session expires, perhaps because the MQTT client abruptly disconnected but did not reconnect, the corresponding consumer group also expires and the associated tracking state in the [sessions](../../reference/config/bindings/mqtt-kafka/proxy.md#topics-sessions) Kafka topic is cleaned up automatically. +A consumer group is created for each unique client ID used by an MQTT session with the format `zilla:--`. Zilla minimizes the number of heartbeats required to approximately one per MQTT session expiry interval. When an MQTT session expires, perhaps because the MQTT client abruptly disconnected but did not reconnect, the corresponding consumer group also expires and the associated tracking state in the [sessions](../../reference/config/bindings/mqtt-kafka/proxy.md#topics-sessions) Kafka topic is cleaned up automatically. ## Authorizing clients diff --git a/src/cookbooks/http.kafka.sasl.scram/README.md b/src/cookbooks/http.kafka.sasl.scram/README.md new file mode 100644 index 00000000..928951cf --- /dev/null +++ b/src/cookbooks/http.kafka.sasl.scram/README.md @@ -0,0 +1,75 @@ +# http.kafka.sasl.scram + +Listens on http port `7114` or https port `7143` and will produce messages to the `events` topic in `SASL/SCRAM` enabled Kafka, synchronously. + +## Running locally + +This cookbook runs using Docker compose. + +### Setup + +The `setup.sh` script will: + +- installs Zilla, Kafka and Zookeeper to the Kubernetes cluster with helm and waits for the pods to start up +- creates the `events` topic in Kafka +- creates SCRAM credential `user` (the default implementation of SASL/SCRAM in Kafka stores SCRAM credentials in ZooKeeper) +- starts port forwarding + +```bash +./setup.sh +``` + +### Verify behavior + +Send a `POST` request with an event body. + +```bash +curl -v \ + -X "POST" http://localhost:7114/events \ + -H "Content-Type: application/json" \ + -d "{\"greeting\":\"Hello, world\"}" +``` + +output: + +```text +... +> POST /events HTTP/1.1 +> Content-Type: application/json +... +< HTTP/1.1 204 No Content +``` + +Verify that the event has been produced to the `events` Kafka topic. + +```bash +docker compose -p zilla-http-kafka-sync exec kafkacat \ +kafkacat -C -b kafka:9092 -t events -J -u | jq . +``` + +output: + +```json +{ + "topic": "events", + "partition": 0, + "offset": 0, + "tstype": "create", + "ts": 1652465273281, + "broker": 1001, + "headers": [ + "content-type", + "application/json" + ], + "payload": "{\"greeting\":\"Hello, world\"}" +} +% Reached end of topic events [0] at offset 1 +``` + +### Teardown + +The `teardown.sh` script stops port forwarding, uninstalls Zilla, Kafka and Zookeeper and deletes the namespace. + +```bash +./teardown.sh +``` diff --git a/src/cookbooks/http.kafka.sasl.scram/compose.yaml b/src/cookbooks/http.kafka.sasl.scram/compose.yaml new file mode 100644 index 00000000..94aebaab --- /dev/null +++ b/src/cookbooks/http.kafka.sasl.scram/compose.yaml @@ -0,0 +1,88 @@ +name: ${NAMESPACE:-zilla-http-kafka-sasl-scram} +services: + zilla: + image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} + pull_policy: always + restart: unless-stopped + ports: + - 7114:7114 + healthcheck: + interval: 5s + timeout: 3s + retries: 5 + test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] + environment: + KAFKA_BOOTSTRAP_SERVER: kafka:29092 + SASL_USERNAME: user + SASL_PASSWORD: bitnami + volumes: + - ./zilla.yaml:/etc/zilla/zilla.yaml + command: start -v -e + + kafka: + image: bitnami/kafka:3.5 + restart: unless-stopped + ports: + - 9092:9092 + healthcheck: + test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka:29092 || exit 1 + interval: 1s + timeout: 60s + retries: 60 + environment: + ALLOW_PLAINTEXT_LISTENER: "yes" + KAFKA_CFG_NODE_ID: "1" + KAFKA_CFG_BROKER_ID: "1" + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:SASL_PLAINTEXT,DOCKER:PLAINTEXT,CONTROLLER:PLAINTEXT" + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" + KAFKA_CFG_LOG_DIRS: "/tmp/logs" + KAFKA_CFG_PROCESS_ROLES: "broker,controller" + KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" + KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka:29092" + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" + + kafka-init: + image: bitnami/kafka:3.5 + user: root + depends_on: + kafka: + condition: service_healthy + restart: true + deploy: + restart_policy: + condition: none + max_attempts: 0 + entrypoint: ["/bin/sh", "-c"] + command: + - | + echo -e "Creating kafka topic"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic items-requests + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic items-responses --config cleanup.policy=compact + echo -e "Successfully created the following topics:"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --list; + + kafka-ui: + image: ghcr.io/kafbat/kafka-ui:latest + restart: unless-stopped + ports: + - 8080:8080 + depends_on: + kafka: + condition: service_healthy + restart: true + environment: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 + + kafkacat: + image: confluentinc/cp-kafkacat:7.1.9 + command: "bash" + stdin_open: true + tty: true + +networks: + default: + driver: bridge diff --git a/src/cookbooks/http.kafka.sasl.scram/kafka_jaas.conf b/src/cookbooks/http.kafka.sasl.scram/kafka_jaas.conf new file mode 100644 index 00000000..949c7b21 --- /dev/null +++ b/src/cookbooks/http.kafka.sasl.scram/kafka_jaas.conf @@ -0,0 +1,5 @@ +KafkaServer { + org.apache.kafka.common.security.scram.ScramLoginModule required + username="user" + password="bitnami"; +}; diff --git a/src/cookbooks/http.kafka.sasl.scram/setup.sh b/src/cookbooks/http.kafka.sasl.scram/setup.sh new file mode 100755 index 00000000..80375039 --- /dev/null +++ b/src/cookbooks/http.kafka.sasl.scram/setup.sh @@ -0,0 +1,9 @@ +#!/bin/sh +set -e + +# Start or restart Zilla +if [ -z "$(docker compose ps -q zilla)" ]; then +docker compose up -d +else +docker compose up -d --force-recreate --no-deps zilla +fi diff --git a/src/cookbooks/http.kafka.sasl.scram/teardown.sh b/src/cookbooks/http.kafka.sasl.scram/teardown.sh new file mode 100755 index 00000000..9b505562 --- /dev/null +++ b/src/cookbooks/http.kafka.sasl.scram/teardown.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -e + +docker compose -p "${NAMESPACE:-zilla-http-kafka-sasl-scram}" down --remove-orphans + diff --git a/src/cookbooks/http.kafka.sasl.scram/zilla.yaml b/src/cookbooks/http.kafka.sasl.scram/zilla.yaml new file mode 100644 index 00000000..7d1006f6 --- /dev/null +++ b/src/cookbooks/http.kafka.sasl.scram/zilla.yaml @@ -0,0 +1,59 @@ +--- +name: example +bindings: + north_tcp_server: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 7114 + routes: + - when: + - port: 7114 + exit: north_http_server + north_http_server: + type: http + kind: server + routes: + - when: + - headers: + :scheme: http + + exit: north_http_kafka_mapping + north_http_kafka_mapping: + type: http-kafka + kind: proxy + routes: + - when: + - path: /events + exit: north_kafka_cache_client + with: + capability: produce + topic: events + north_kafka_cache_client: + type: kafka + kind: cache_client + exit: south_kafka_cache_server + south_kafka_cache_server: + type: kafka + kind: cache_server + exit: south_kafka_client + south_kafka_client: + type: kafka + kind: client + options: + servers: + - ${{env.KAFKA_BOOTSTRAP_SERVER}} + sasl: + mechanism: scram-sha-256 + username: ${{env.SASL_USERNAME}} + password: ${{env.SASL_PASSWORD}} + exit: south_tcp_client + south_tcp_client: + type: tcp + kind: client +telemetry: + exporters: + stdout_logs_exporter: + type: stdout diff --git a/src/cookbooks/http.redpanda.sasl.scram/README.md b/src/cookbooks/http.redpanda.sasl.scram/README.md new file mode 100644 index 00000000..85ba0d2b --- /dev/null +++ b/src/cookbooks/http.redpanda.sasl.scram/README.md @@ -0,0 +1,94 @@ +# http.redpanda.sasl.scram + +Listens on http port `7114` or https port `7143` and will produce messages to the `events` topic in `SASL/SCRAM` +enabled Redpanda cluster, synchronously. + +## Running locally + +This cookbook runs using Docker compose. + +### Setup + +The `setup.sh` script will: + +- installs Zilla and Redpanda to the Kubernetes cluster with helm and waits for the pods to start up +- creates the `user` user in Redpanda +- creates the `events` topic in Redpanda +- starts port forwarding + +```bash +./setup.sh +``` + +### Verify behavior + +Send a `POST` request with an event body. + +```bash +curl -v \ + -X "POST" http://localhost:7114/events \ + -H "Content-Type: application/json" \ + -d "{\"greeting\":\"Hello, world\"}" +``` + +output: + +```text +... +> POST /events HTTP/1.1 +> Content-Type: application/json +... +< HTTP/1.1 204 No Content +``` + +Verify that the event has been produced to the `events` topic in Redpanda cluster. + +```bash +docker compose -p zilla-http-kafka-sync exec kafkacat \ +kafkacat -b redpanda:29092 -X security.protocol=SASL_PLAINTEXT \ + -X sasl.mechanisms=SCRAM-SHA-256 \ + -X sasl.username=user \ + -X sasl.password=redpanda \ + -t events -J -u | jq . +``` + +output: + +```json +{ + "topic": "events", + "partition": 0, + "offset": 0, + "tstype": "create", + "ts": 1652465273281, + "broker": 1001, + "headers": [ + "content-type", + "application/json" + ], + "payload": "{\"greeting\":\"Hello, world\"}" +} +% Reached end of topic events [0] at offset 1 +``` + +### Teardown + +The `teardown.sh` script stops port forwarding, uninstalls Zilla and Redpanda and deletes the namespace. + +```bash +./teardown.sh +``` + +output: + +```text ++ pgrep kubectl +99999 +99998 ++ killall kubectl ++ helm uninstall zilla-http-redpanda-sasl-scram zilla-http-redpanda-sasl-scram-redpanda --namespace zilla-http-redpanda-sasl-scram +release "zilla-http-redpanda-sasl-scram" uninstalled +release "zilla-http-redpanda-sasl-scram-redpanda" uninstalled ++ kubectl delete namespace zilla-http-redpanda-sasl-scram +namespace "zilla-http-redpanda-sasl-scram" deleted +``` diff --git a/src/cookbooks/http.redpanda.sasl.scram/compose.yaml b/src/cookbooks/http.redpanda.sasl.scram/compose.yaml new file mode 100644 index 00000000..49a6992d --- /dev/null +++ b/src/cookbooks/http.redpanda.sasl.scram/compose.yaml @@ -0,0 +1,78 @@ +name: ${NAMESPACE:-zilla-http-redpanda-sasl-scram} +services: + zilla: + image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} + pull_policy: always + restart: unless-stopped + ports: + - 7114:7114 + healthcheck: + interval: 5s + timeout: 3s + retries: 5 + test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] + environment: + KAFKA_BOOTSTRAP_SERVER: redpanda:29092 + SASL_USERNAME: user + SASL_PASSWORD: redpanda + volumes: + - ./zilla.yaml:/etc/zilla/zilla.yaml + command: start -v -e + + redpanda: + image: docker.redpanda.com/redpandadata/redpanda:v23.2.17 + command: + - redpanda start + - --set redpanda.enable_sasl=true + - --set redpanda.superusers=["user"] + - --smp 1 + - --overprovisioned + - --kafka-addr internal://0.0.0.0:29092,docker://0.0.0.0:19092,external://0.0.0.0:9092 + - --advertise-kafka-addr internal://redpanda:29092,docker://host.docker.internal:19092,external://localhost:9092 + - --pandaproxy-addr internal://0.0.0.0:8082,external://0.0.0.0:18082 + - --advertise-pandaproxy-addr internal://redpanda:18082,external://localhost:8082 + - --schema-registry-addr internal://0.0.0.0:8081,external://0.0.0.0:18081 + - --rpc-addr redpanda:33145 + - --advertise-rpc-addr redpanda:33145 + - --mode dev-container + ports: + - 18081:18081 + - 18082:18082 + - 19092:19092 + - 29092:29092 + - 9092:9092 + - 19644:9644 + healthcheck: + test: ["CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1"] + interval: 15s + timeout: 3s + retries: 5 + start_period: 5s + + redpanda-console: + image: docker.redpanda.com/redpandadata/console:v2.3.6 + entrypoint: /bin/sh + command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console" + environment: + CONFIG_FILEPATH: /tmp/config.yml + CONSOLE_CONFIG_FILE: | + kafka: + brokers: ["redpanda:29092"] + schemaRegistry: + enabled: true + urls: ["http://redpanda:8081"] + redpanda: + adminApi: + enabled: true + urls: ["http://redpanda:9644"] + connect: + enabled: true + clusters: + - name: local-connect-cluster + url: http://connect:8083 + ports: + - 8080:8080 + +networks: + default: + driver: bridge diff --git a/src/cookbooks/http.redpanda.sasl.scram/setup.sh b/src/cookbooks/http.redpanda.sasl.scram/setup.sh new file mode 100755 index 00000000..80375039 --- /dev/null +++ b/src/cookbooks/http.redpanda.sasl.scram/setup.sh @@ -0,0 +1,9 @@ +#!/bin/sh +set -e + +# Start or restart Zilla +if [ -z "$(docker compose ps -q zilla)" ]; then +docker compose up -d +else +docker compose up -d --force-recreate --no-deps zilla +fi diff --git a/src/cookbooks/http.redpanda.sasl.scram/teardown.sh b/src/cookbooks/http.redpanda.sasl.scram/teardown.sh new file mode 100755 index 00000000..2ca58577 --- /dev/null +++ b/src/cookbooks/http.redpanda.sasl.scram/teardown.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -e + +docker compose -p "${NAMESPACE:-zilla-http-redpanda-sasl-scram}" down --remove-orphans + diff --git a/src/cookbooks/http.redpanda.sasl.scram/zilla.yaml b/src/cookbooks/http.redpanda.sasl.scram/zilla.yaml new file mode 100644 index 00000000..776a98db --- /dev/null +++ b/src/cookbooks/http.redpanda.sasl.scram/zilla.yaml @@ -0,0 +1,62 @@ +--- +name: example +bindings: + north_tcp_server: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 7114 + routes: + - when: + - port: 7114 + exit: north_http_server + north_http_server: + type: http + kind: server + routes: + - when: + - headers: + :scheme: http + exit: north_http_kafka_mapping + north_http_kafka_mapping: + type: http-kafka + kind: proxy + routes: + - when: + - path: /events + exit: north_kafka_cache_client + with: + capability: produce + topic: events + north_kafka_cache_client: + type: kafka + kind: cache_client + exit: south_kafka_cache_server + south_kafka_cache_server: + type: kafka + kind: cache_server + exit: south_kafka_client + south_kafka_client: + type: kafka + kind: client + options: + servers: + - ${{env.KAFKA_BOOTSTRAP_SERVER}} + sasl: + mechanism: scram-sha-256 + username: ${{env.SASL_USERNAME}} + password: ${{env.SASL_PASSWORD}} + exit: south_tls_client + south_tls_client: + type: tls + kind: client + exit: south_tcp_client + south_tcp_client: + type: tcp + kind: client +telemetry: + exporters: + stdout_logs_exporter: + type: stdout diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/README.md b/src/cookbooks/kubernetes.prometheus.autoscale/README.md new file mode 100644 index 00000000..04426484 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/README.md @@ -0,0 +1,31 @@ +# kubernetes.prometheus.autoscale + +This is the resource folder for the running the [Autoscale Zilla pods with Prometheus metrics guide](https://docs.aklivity.io/zilla/latest/how-tos/deploy-operate/autoscale-k8s.html) found on our docs. + +## Running locally + +This cookbook runs using Kubernetes. + +### Setup + +The `setup.sh` script will: + +- install Zilla, Prometheus and Prometheus Adapter to the Kubernetes cluster with helm and waits for the pods to start up +- set up horizontal pod autoscaling +- start port forwarding + +```bash +./setup.sh +``` + +### Using this cookbook + +Follow the steps on our [Autoscale Zilla pods with Prometheus metrics guide](https://docs.aklivity.io/zilla/latest/how-tos/deploy-operate/autoscale-k8s.html). + +### Teardown + +The `teardown.sh` script will remove any resources created. + +```bash +./teardown.sh +``` diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/chart/Chart.yaml b/src/cookbooks/kubernetes.prometheus.autoscale/chart/Chart.yaml new file mode 100644 index 00000000..ae236224 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/chart/Chart.yaml @@ -0,0 +1,6 @@ +apiVersion: v2 +name: zilla +description: zilla autoscaling example +type: application +version: 0.1.0 +appVersion: "latest" diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus-adapter.yaml b/src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus-adapter.yaml new file mode 100644 index 00000000..b574df86 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus-adapter.yaml @@ -0,0 +1,67 @@ +rules: + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: [] + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m])) + by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: + - isNot: ^container_.*_seconds_total$ + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>,container!="POD"}[5m])) + by (<<.GroupBy>>) + - seriesQuery: '{__name__=~"^container_.*",container!="POD",namespace!="",pod!=""}' + seriesFilters: + - isNot: ^container_.*_total$ + resources: + overrides: + namespace: + resource: namespace + pod: + resource: pod + name: + matches: ^container_(.*)$ + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>,container!="POD"}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_total$ + resources: + template: <<.Resource>> + name: + matches: "" + as: "" + metricsQuery: sum(<<.Series>>{<<.LabelMatchers>>}) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: + - isNot: .*_seconds_total + resources: + template: <<.Resource>> + name: + matches: ^(.*)_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>) + - seriesQuery: '{namespace!="",__name__!~"^container_.*"}' + seriesFilters: [] + resources: + template: <<.Resource>> + name: + matches: ^(.*)_seconds_total$ + as: "" + metricsQuery: sum(rate(<<.Series>>{<<.LabelMatchers>>}[5m])) by (<<.GroupBy>>) diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus.yaml b/src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus.yaml new file mode 100644 index 00000000..dcfc5d27 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/chart/files/prometheus.yaml @@ -0,0 +1,28 @@ +scrape_configs: + - job_name: zilla + scrape_interval: 10s + kubernetes_sd_configs: + - role: endpoints + namespaces: + names: + - zilla-kubernetes-prometheus-autoscale + relabel_configs: + - source_labels: + - __meta_kubernetes_pod_name + target_label: pod + - source_labels: + - __meta_kubernetes_service_name + target_label: service + - source_labels: + - namespace + target_label: exported_namespace + - source_labels: + - __meta_kubernetes_namespace + target_label: namespace + - target_label: endpoint + replacement: prometheus + - source_labels: [__address__] + action: replace + regex: ([^:]+):.* + replacement: $1:7190 # port you want to use + target_label: __address__ diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus-adapter.yaml b/src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus-adapter.yaml new file mode 100644 index 00000000..b64b25da --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus-adapter.yaml @@ -0,0 +1,202 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus-adapter + labels: + app.kubernetes.io/instance: prometheus-adapter +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: prometheus-adapter + template: + metadata: + labels: + app.kubernetes.io/instance: prometheus-adapter + spec: + containers: + - name: prometheus-adapter + image: "registry.k8s.io/prometheus-adapter/prometheus-adapter:v0.10.1" + args: + - /adapter + - --secure-port=443 + - --cert-dir=/tmp/cert + - --logtostderr=true + - --prometheus-url=http://prometheus.zilla-kubernetes-prometheus-autoscale.svc.cluster.local:9090 + - --metrics-relist-interval=1m + - --v=4 + - --config=/etc/prometheus-adapter/config.yaml + ports: + - name: https + containerPort: 443 + protocol: TCP + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: https + scheme: HTTPS + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + volumeMounts: + - name: config + mountPath: /etc/prometheus-adapter/ + readOnly: true + - mountPath: /tmp + name: tmp + serviceAccountName: prometheus-adapter + volumes: + - name: config + configMap: + name: prometheus-adapter + - name: tmp + emptyDir: {} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus-adapter +data: + config.yaml: |- +{{ .Files.Get "files/prometheus-adapter.yaml" | indent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus-adapter +spec: + selector: + app.kubernetes.io/instance: prometheus-adapter + ports: + - name: https + port: 443 + targetPort: https + type: ClusterIP +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + name: v1beta1.custom.metrics.k8s.io +spec: + service: + name: prometheus-adapter + namespace: zilla-kubernetes-prometheus-autoscale + port: 443 + group: custom.metrics.k8s.io + version: v1beta1 + insecureSkipTLSVerify: true + groupPriorityMinimum: 100 + versionPriority: 100 +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus-adapter + labels: + app.kubernetes.io/instance: prometheus-adapter +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus-adapter-resource-reader + labels: + app.kubernetes.io/instance: prometheus-adapter +rules: + - apiGroups: + - "" + resources: + - namespaces + - pods + - services + - configmaps + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: prometheus-adapter + name: prometheus-adapter-server-resources +rules: + - apiGroups: + - custom.metrics.k8s.io + resources: + - '*' + verbs: + - '*' +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: prometheus-adapter-auth-reader + namespace: kube-system + labels: + app.kubernetes.io/instance: prometheus-adapter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: prometheus-adapter + namespace: zilla-kubernetes-prometheus-autoscale +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus-adapter-system-auth-delegator + labels: + app.kubernetes.io/instance: prometheus-adapter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: prometheus-adapter + namespace: zilla-kubernetes-prometheus-autoscale +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus-adapter-resource-reader + labels: + app.kubernetes.io/instance: prometheus-adapter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-adapter-resource-reader +subjects: + - kind: ServiceAccount + name: prometheus-adapter + namespace: zilla-kubernetes-prometheus-autoscale +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus-adapter-hpa-controller + labels: + app.kubernetes.io/instance: prometheus-adapter +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus-adapter-server-resources +subjects: + - kind: ServiceAccount + name: prometheus-adapter + namespace: zilla-kubernetes-prometheus-autoscale diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus.yaml b/src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus.yaml new file mode 100644 index 00000000..36b36337 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/chart/templates/service-prometheus.yaml @@ -0,0 +1,118 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: prometheus + labels: + app.kubernetes.io/instance: prometheus +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/instance: prometheus + template: + metadata: + labels: + app.kubernetes.io/instance: prometheus + spec: + containers: + - name: prometheus + image: "prom/prometheus:v2.47.2" + ports: + - name: web + containerPort: 9090 + protocol: TCP + livenessProbe: + httpGet: + path: /-/healthy + port: web + initialDelaySeconds: 30 + timeoutSeconds: 30 + readinessProbe: + httpGet: + path: /-/ready + port: web + initialDelaySeconds: 5 + timeoutSeconds: 30 + volumeMounts: + - name: config + mountPath: /etc/prometheus/ + serviceAccountName: prometheus + volumes: + - name: config + configMap: + name: prometheus +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: prometheus +data: + prometheus.yml: |- +{{ .Files.Get "files/prometheus.yaml" | indent 4 }} +--- +apiVersion: v1 +kind: Service +metadata: + name: prometheus +spec: + selector: + app.kubernetes.io/instance: prometheus + ports: + - name: http + port: 9090 + type: ClusterIP +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: prometheus + labels: + app.kubernetes.io/instance: prometheus +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: prometheus + labels: + app.kubernetes.io/instance: prometheus +rules: + - apiGroups: + - "" + resources: + - nodes + - nodes/metrics + - services + - endpoints + - pods + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + - /metrics/cadvisor + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: prometheus + labels: + app.kubernetes.io/instance: prometheus +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: prometheus +subjects: + - kind: ServiceAccount + name: prometheus + namespace: zilla-kubernetes-prometheus-autoscale diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/check_hpa.sh b/src/cookbooks/kubernetes.prometheus.autoscale/check_hpa.sh new file mode 100755 index 00000000..2a046c8e --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/check_hpa.sh @@ -0,0 +1,18 @@ +#!/bin/bash +BOLD='\033[1;97m' +END='\033[0m' +NAMESPACE="${NAMESPACE:-zilla-kubernetes-prometheus-autoscale}" + +echo -e "${BOLD}The status of horizontal pod autoscaling${END}" +echo -e "${BOLD}----------------------------------------${END}\n" + +echo -e "${BOLD}HorizontalPodAutoscaler:${END}" +kubectl get hpa --namespace $NAMESPACE +echo + +echo -e "${BOLD}Deployment:${END}" +kubectl get deployment zilla --namespace $NAMESPACE +echo + +echo -e "${BOLD}Pods:${END}" +kubectl get pods --namespace $NAMESPACE --selector app.kubernetes.io/instance=zilla diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/check_metric.sh b/src/cookbooks/kubernetes.prometheus.autoscale/check_metric.sh new file mode 100755 index 00000000..5e198ac2 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/check_metric.sh @@ -0,0 +1,14 @@ +#!/bin/bash +BOLD='\033[1;97m' +END='\033[0m' +NAMESPACE="${NAMESPACE:-zilla-kubernetes-prometheus-autoscale}" + +echo -e "${BOLD}The value of stream_active_received metric${END}" +echo -e "${BOLD}------------------------------------------${END}\n" + +echo -e "${BOLD}Prometheus API:${END}" +curl -s http://localhost:9090/api/v1/query\?query\=stream_active_received | jq +echo + +echo -e "${BOLD}Kubernetes custom metrics API:${END}" +kubectl get --raw "/apis/custom.metrics.k8s.io/v1beta1/namespaces/$NAMESPACE/pod/*/stream_active_received" | jq diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/setup.sh b/src/cookbooks/kubernetes.prometheus.autoscale/setup.sh new file mode 100755 index 00000000..f551bc30 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/setup.sh @@ -0,0 +1,22 @@ +#!/bin/sh +set -e +! [ -x "$(command -v helm)" ] && echo "WARN: Helm is required to run this setup." +! [ -x "$(command -v kubectl)" ] && echo "WARN: Kubectl is required to run this setup." + +# Install Zilla to the Kubernetes cluster with helm and wait for the pod to start up +ZILLA_CHART="${ZILLA_CHART:-oci://ghcr.io/aklivity/charts/zilla}" +ZILLA_VERSION="${ZILLA_VERSION:-^0.9.0}" +NAMESPACE="${NAMESPACE:-zilla-kubernetes-prometheus-autoscale}" +helm upgrade --install zilla $ZILLA_CHART --version $ZILLA_VERSION --namespace $NAMESPACE --create-namespace --wait \ + --values values.yaml \ + --set-file zilla\\.yaml=zilla.yaml + +# Install Prometheus and Prometheus Adapter to the Kubernetes cluster with helm and wait for the pod to start up +helm upgrade --install prometheus chart --namespace $NAMESPACE --create-namespace --wait + +# Start port forwarding +kubectl port-forward --namespace $NAMESPACE service/zilla 7114 7190 > /tmp/kubectl-zilla.log 2>&1 & +kubectl port-forward --namespace $NAMESPACE service/prometheus 9090 > /tmp/kubectl-prometheus.log 2>&1 & +until nc -z localhost 7114; do sleep 1; done +until nc -z localhost 7190; do sleep 1; done +until nc -z localhost 9090; do sleep 1; done diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/teardown.sh b/src/cookbooks/kubernetes.prometheus.autoscale/teardown.sh new file mode 100755 index 00000000..4189bf20 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/teardown.sh @@ -0,0 +1,10 @@ +#!/bin/sh +set -e + +# Stop port forwarding +pgrep kubectl && killall kubectl + +# Uninstall Zilla, Prometheus and Prometheus Adapter +NAMESPACE="${NAMESPACE:-zilla-kubernetes-prometheus-autoscale}" +helm uninstall zilla prometheus --namespace $NAMESPACE +kubectl delete namespace $NAMESPACE diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/values.yaml b/src/cookbooks/kubernetes.prometheus.autoscale/values.yaml new file mode 100644 index 00000000..150d79ac --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/values.yaml @@ -0,0 +1,39 @@ +livenessProbePort: 7114 +readinessProbePort: 7114 + +service: + ports: + - port: 7114 + name: http + - port: 7190 + name: metrics + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + metrics: + - type: Pods + pods: + metric: + name: stream_active_received + target: + type: AverageValue + averageValue: 10 + behavior: + scaleDown: + stabilizationWindowSeconds: 10 + policies: + - type: Percent + value: 100 + periodSeconds: 15 + scaleUp: + stabilizationWindowSeconds: 0 + policies: + - type: Percent + value: 200 + periodSeconds: 15 + - type: Pods + value: 2 + periodSeconds: 15 + selectPolicy: Max diff --git a/src/cookbooks/kubernetes.prometheus.autoscale/zilla.yaml b/src/cookbooks/kubernetes.prometheus.autoscale/zilla.yaml new file mode 100644 index 00000000..be798ea8 --- /dev/null +++ b/src/cookbooks/kubernetes.prometheus.autoscale/zilla.yaml @@ -0,0 +1,55 @@ +--- +name: example +bindings: + north_tcp_server: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 7114 + routes: + - when: + - port: 7114 + exit: north_http_server + telemetry: + metrics: + - stream.* + north_http_server: + type: http + kind: server + routes: + - when: + - headers: + :scheme: http + exit: north_echo_server + telemetry: + metrics: + - http.* + north_echo_server: + type: echo + kind: server +telemetry: + metrics: + - http.request.size + - http.response.size + - stream.active.received + - stream.active.sent + - stream.opens.received + - stream.opens.sent + - stream.data.received + - stream.data.sent + - stream.errors.received + - stream.errors.sent + - stream.closes.received + - stream.closes.sent + exporters: + stdout_logs_exporter: + type: stdout + prometheus_endpoint: + type: prometheus + options: + endpoints: + - scheme: http + path: /metrics + port: 7190 diff --git a/src/cookbooks/mqtt.kafka.broker/README.md b/src/cookbooks/mqtt.kafka.broker/README.md new file mode 100644 index 00000000..8c3a34bf --- /dev/null +++ b/src/cookbooks/mqtt.kafka.broker/README.md @@ -0,0 +1,48 @@ +# mqtt.kafka.broker + +This is the resource folder for the running the [MQTT Kafka broker guide](https://docs.aklivity.io/zilla/latest/how-tos/mqtt/mqtt.kafka.broker.html) found on our docs. + +## Running locally + +This cookbook runs using Docker compose. + +### Setup + +The `setup.sh` script will: + +- create the necessary kafka topics +- create an MQTT broker at `mqtt://localhost:7183` + +- Setup with a bitnami Kafka cluster + + ```bash + ./setup.sh + ``` + +- Setup with a Redpanda cluster + + ```bash + KAFKA_VENDOR_PROFILE=redpanda ./setup.sh + ``` + +- alternatively with the plain docker compose command respectively + + ```bash + docker compose --profile kafka --profile init-kafka up -d + ``` + + ```bash + KAFKA_VENDOR_PROFILE=redpanda docker compose --profile redpanda --profile init-redpanda up -d + ``` + +### Using this cookbook + +Follow the steps on our [MQTT Kafka broker guide](https://docs.aklivity.io/zilla/latest/how-tos/mqtt/mqtt.kafka.broker.html#send-a-greeting) + +### Teardown + +The `teardown.sh` script will remove any resources created. + +```bash +./teardown.sh +``` diff --git a/src/cookbooks/mqtt.kafka.broker/compose.yaml b/src/cookbooks/mqtt.kafka.broker/compose.yaml new file mode 100644 index 00000000..eb696acb --- /dev/null +++ b/src/cookbooks/mqtt.kafka.broker/compose.yaml @@ -0,0 +1,158 @@ +name: ${NAMESPACE:-zilla-mqtt-kafka-broker} +services: + zilla: + image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} + pull_policy: always + restart: unless-stopped + ports: + - 7114:7114 + - 7183:7183 + healthcheck: + interval: 5s + timeout: 3s + retries: 5 + test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7183"] + environment: + KAFKA_BOOTSTRAP_SERVER: ${KAFKA_BOOTSTRAP_SERVER:-kafka:29092} + volumes: + - ./zilla.yaml:/etc/zilla/zilla.yaml + command: start -v -e + + # region Bitnami Kafka Vendor + kafka: + image: bitnami/kafka:3.5 + profiles: [kafka] + restart: unless-stopped + ports: + - 9092:9092 + healthcheck: + test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka:29092 || exit 1 + interval: 1s + timeout: 60s + retries: 60 + environment: + ALLOW_PLAINTEXT_LISTENER: "yes" + KAFKA_CFG_NODE_ID: "1" + KAFKA_CFG_BROKER_ID: "1" + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" + KAFKA_CFG_LOG_DIRS: "/tmp/logs" + KAFKA_CFG_PROCESS_ROLES: "broker,controller" + KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" + KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka:29092" + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" + + kafka-init: + image: bitnami/kafka:3.5 + profiles: [init-kafka] + user: root + deploy: + restart_policy: + condition: none + max_attempts: 0 + depends_on: + - kafka + entrypoint: ["/bin/sh", "-c"] + command: + - | + echo -e "blocks until kafka is reachable"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --list; + echo -e "Creating kafka topic"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic mqtt-messages + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic mqtt-devices --config cleanup.policy=compact + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic mqtt-retained --config cleanup.policy=compact + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic mqtt-sessions --config cleanup.policy=compact + echo -e "Successfully created the following topics:"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --list; + + kafka-ui: + image: ghcr.io/kafbat/kafka-ui:latest + profiles: [kafka] + restart: unless-stopped + ports: + - 8080:8080 + depends_on: + - kafka + environment: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 + # endregion Bitnami Kafka Vendor + + # region Redpanda Kafka Vendor + redpanda: + image: docker.redpanda.com/redpandadata/redpanda:v24.2.4 + profiles: [redpanda] + command: + - redpanda + - start + - --kafka-addr internal://0.0.0.0:29092,external://0.0.0.0:9092 + - --advertise-kafka-addr internal://redpanda:29092,external://localhost:9092 + - --pandaproxy-addr internal://0.0.0.0:28082,external://0.0.0.0:8082 + - --advertise-pandaproxy-addr internal://redpanda:28082,external://localhost:8082 + - --schema-registry-addr internal://0.0.0.0:28081,external://0.0.0.0:8081 + - --rpc-addr redpanda:33145 + - --advertise-rpc-addr redpanda:33145 + - --mode dev-container + - --smp 1 + - --default-log-level=info + ports: + - 8081:8081 + - 8082:8082 + - 9092:9092 + - 9644:9644 + healthcheck: + test: ["CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1"] + interval: 15s + timeout: 3s + retries: 5 + start_period: 5s + redpanda-init: + image: docker.redpanda.com/redpandadata/redpanda:v24.2.4 + profiles: [init-redpanda] + depends_on: + redpanda: + condition: service_healthy + entrypoint: [ '/bin/sh', '-c' ] + environment: + REDPANDA_ADMIN: redpanda:9644 + REDPANDA_BROKER: redpanda:29092 + command: | + " + rpk topic create mqtt-messages --brokers $${REDPANDA_BROKER} + rpk topic create mqtt-devices -c cleanup.policy=compact --brokers $${REDPANDA_BROKER} + rpk topic create mqtt-retained -c cleanup.policy=compact --brokers $${REDPANDA_BROKER} + rpk topic create mqtt-sessions -c cleanup.policy=compact --brokers $${REDPANDA_BROKER} + " + + redpanda-console: + image: docker.redpanda.com/redpandadata/console:v2.3.6 + profiles: [redpanda] + entrypoint: /bin/sh + command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console" + environment: + CONFIG_FILEPATH: /tmp/config.yml + CONSOLE_CONFIG_FILE: | + kafka: + brokers: ["redpanda:29092"] + schemaRegistry: + enabled: true + urls: ["http://redpanda:28081"] + redpanda: + adminApi: + enabled: true + urls: ["http://redpanda:9644"] + connect: + enabled: true + clusters: + - name: local-connect-cluster + url: http://connect:28083 + ports: + - 8080:8080 + # endregion Redpanda Kafka Vendor + +networks: + default: + driver: bridge diff --git a/src/cookbooks/mqtt.kafka.broker/setup.sh b/src/cookbooks/mqtt.kafka.broker/setup.sh new file mode 100644 index 00000000..2ab2d96b --- /dev/null +++ b/src/cookbooks/mqtt.kafka.broker/setup.sh @@ -0,0 +1,13 @@ +#!/bin/sh +set -e + +KAFKA_VENDOR_PROFILE="${KAFKA_VENDOR_PROFILE:-kafka}" +export COMPOSE_PROFILES="${COMPOSE_PROFILES:-$KAFKA_VENDOR_PROFILE,init-$KAFKA_VENDOR_PROFILE}" +export KAFKA_BOOTSTRAP_SERVER="${KAFKA_BOOTSTRAP_SERVER:-$KAFKA_VENDOR_PROFILE:29092}" + +# Start or restart Zilla +if [ -z "$(docker compose ps -q zilla)" ]; then +docker compose up -d +else +docker compose up -d --force-recreate --no-deps zilla +fi diff --git a/src/cookbooks/mqtt.kafka.broker/teardown.sh b/src/cookbooks/mqtt.kafka.broker/teardown.sh new file mode 100644 index 00000000..0a75864b --- /dev/null +++ b/src/cookbooks/mqtt.kafka.broker/teardown.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -e + +docker compose -p "${NAMESPACE:-zilla-mqtt-kafka-broker}" down --remove-orphans + diff --git a/src/how-tos/mqtt/mqtt_kafka_broker_zilla.yaml b/src/cookbooks/mqtt.kafka.broker/zilla.yaml similarity index 100% rename from src/how-tos/mqtt/mqtt_kafka_broker_zilla.yaml rename to src/cookbooks/mqtt.kafka.broker/zilla.yaml diff --git a/src/cookbooks/quickstart/README.md b/src/cookbooks/quickstart/README.md new file mode 100644 index 00000000..64e31ca5 --- /dev/null +++ b/src/cookbooks/quickstart/README.md @@ -0,0 +1,56 @@ +# quickstart + +Follow the [Zilla Quickstart](https://docs.aklivity.io/zilla/latest/how-tos/quickstart/) to discover some of what Zilla can do! + +## Running locally + +This quickstart runs using Docker compose. + +### Setup + +The `setup.sh` script will: + +- Configured Zilla instance with REST, SSE, gRPC, MQTT protocols configured +- Create Kafka topics +- Start a gRPC Route Guide server +- Start a MQTT message simulator + +- Setup with a bitnami Kafka cluster + + ```bash + ./setup.sh + ``` + +- Setup with a Redpanda cluster + + ```bash + KAFKA_VENDOR_PROFILE=redpanda ./setup.sh + ``` + +- alternatively with the plain docker compose command respectively + + ```bash + docker compose --profile kafka --profile init-kafka up -d + ``` + + ```bash + KAFKA_VENDOR_PROFILE=redpanda docker compose --profile redpanda --profile init-redpanda up -d + ``` + +### Using this quickstart + +You can interact with this quickstart using our [Postman collection](https://www.postman.com/aklivity-zilla/workspace/aklivity-zilla-quickstart/overview) + +### Generating combined zilla.yaml + +```bash +yq '.' *-zilla.yaml > zilla.yaml +``` + +### Teardown + +The `teardown.sh` script will remove any resources created. + +```bash +./teardown.sh +``` diff --git a/src/cookbooks/quickstart/compose.yaml b/src/cookbooks/quickstart/compose.yaml new file mode 100644 index 00000000..cca0da0c --- /dev/null +++ b/src/cookbooks/quickstart/compose.yaml @@ -0,0 +1,178 @@ +name: ${NAMESPACE:-zilla-quickstart} +services: + zilla: + image: ghcr.io/aklivity/zilla:${ZILLA_VERSION:-latest} + restart: unless-stopped + ports: + - 7114:7114 + - 7151:7151 + - 7183:7183 + - 7190:7190 + environment: + KAFKA_BOOTSTRAP_SERVER: ${KAFKA_BOOTSTRAP_SERVER:-kafka:29092} + ROUTE_GUIDE_SERVER_HOST: route-guide-server + ROUTE_GUIDE_SERVER_PORT: 50051 + healthcheck: + interval: 5s + timeout: 3s + retries: 5 + test: ["CMD", "bash", "-c", "echo -n '' > /dev/tcp/127.0.0.1/7114"] + volumes: + - ./zilla.yaml:/etc/zilla/zilla.yaml + - ./protos/:/etc/zilla/protos/ + command: start -v -e + + mqtt-simulator: + image: ghcr.io/vordimous/mqtt-simulator:zilla-quickstart + restart: unless-stopped + volumes: + - ./mqtt_sim_settings.json:/usr/src/app/config/settings.json + + route-guide-server: + image: ghcr.io/aklivity/extras-route-guide-server:sha-e49c994 + restart: unless-stopped + ports: + - 50051:50051 + + # region Bitnami Kafka Vendor + kafka: + image: bitnami/kafka:3.5 + profiles: [kafka] + restart: unless-stopped + ports: + - 9092:9092 + healthcheck: + test: /opt/bitnami/kafka/bin/kafka-cluster.sh cluster-id --bootstrap-server kafka:29092 || exit 1 + interval: 1s + timeout: 60s + retries: 60 + environment: + ALLOW_PLAINTEXT_LISTENER: "yes" + KAFKA_CFG_NODE_ID: "1" + KAFKA_CFG_BROKER_ID: "1" + KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: "0" + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS: "1@127.0.0.1:9093" + KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: "CLIENT:PLAINTEXT,INTERNAL:PLAINTEXT,CONTROLLER:PLAINTEXT" + KAFKA_CFG_CONTROLLER_LISTENER_NAMES: "CONTROLLER" + KAFKA_CFG_LOG_DIRS: "/tmp/logs" + KAFKA_CFG_PROCESS_ROLES: "broker,controller" + KAFKA_CFG_LISTENERS: "CLIENT://:9092,INTERNAL://:29092,CONTROLLER://:9093" + KAFKA_CFG_INTER_BROKER_LISTENER_NAME: "INTERNAL" + KAFKA_CFG_ADVERTISED_LISTENERS: "CLIENT://localhost:9092,INTERNAL://kafka:29092" + KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE: "true" + + kafka-init: + image: bitnami/kafka:3.5 + profiles: [init-kafka] + user: root + deploy: + restart_policy: + condition: none + max_attempts: 0 + depends_on: + - kafka + entrypoint: ["/bin/sh", "-c"] + command: + - | + echo -e "blocks until kafka is reachable"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --list; + echo -e "Creating kafka topic"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic http-messages --config cleanup.policy=compact + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic grpc-request + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic grpc-response + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic mqtt-messages + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic mqtt-retained --config cleanup.policy=compact + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --create --if-not-exists --topic mqtt-sessions --config cleanup.policy=compact + echo -e "Successfully created the following topics:"; + /opt/bitnami/kafka/bin/kafka-topics.sh --bootstrap-server kafka:29092 --list; + + kafka-ui: + image: ghcr.io/kafbat/kafka-ui:latest + profiles: [kafka] + restart: unless-stopped + ports: + - 8080:8080 + depends_on: + - kafka + environment: + KAFKA_CLUSTERS_0_NAME: local + KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: kafka:29092 + # endregion Bitnami Kafka Vendor + + # region Redpanda Kafka Vendor + redpanda: + image: docker.redpanda.com/redpandadata/redpanda:v24.2.4 + profiles: [redpanda] + command: + - redpanda + - start + - --kafka-addr internal://0.0.0.0:29092,external://0.0.0.0:9092 + - --advertise-kafka-addr internal://redpanda:29092,external://localhost:9092 + - --pandaproxy-addr internal://0.0.0.0:28082,external://0.0.0.0:8082 + - --advertise-pandaproxy-addr internal://redpanda:28082,external://localhost:8082 + - --schema-registry-addr internal://0.0.0.0:28081,external://0.0.0.0:8081 + - --rpc-addr redpanda:33145 + - --advertise-rpc-addr redpanda:33145 + - --mode dev-container + - --smp 1 + - --default-log-level=info + ports: + - 8081:8081 + - 8082:8082 + - 9092:9092 + - 9644:9644 + healthcheck: + test: ["CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1"] + interval: 15s + timeout: 3s + retries: 5 + start_period: 5s + redpanda-init: + image: docker.redpanda.com/redpandadata/redpanda:v24.2.4 + profiles: [init-redpanda] + depends_on: + redpanda: + condition: service_healthy + entrypoint: [ '/bin/sh', '-c' ] + environment: + REDPANDA_ADMIN: redpanda:9644 + REDPANDA_BROKER: redpanda:29092 + command: | + " + rpk topic create http-messages -c cleanup.policy=compact --brokers $${REDPANDA_BROKER} + rpk topic create grpc-request --brokers $${REDPANDA_BROKER}; + rpk topic create grpc-response --brokers $${REDPANDA_BROKER}; + rpk topic create mqtt-messages --brokers $${REDPANDA_BROKER}; + rpk topic create mqtt-retained -c cleanup.policy=compact --brokers $${REDPANDA_BROKER}; + rpk topic create mqtt-sessions -c cleanup.policy=compact --brokers $${REDPANDA_BROKER}; + " + + redpanda-console: + image: docker.redpanda.com/redpandadata/console:v2.3.6 + profiles: [redpanda] + entrypoint: /bin/sh + command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console" + environment: + CONFIG_FILEPATH: /tmp/config.yml + CONSOLE_CONFIG_FILE: | + kafka: + brokers: ["redpanda:29092"] + schemaRegistry: + enabled: true + urls: ["http://redpanda:28081"] + redpanda: + adminApi: + enabled: true + urls: ["http://redpanda:9644"] + connect: + enabled: true + clusters: + - name: local-connect-cluster + url: http://connect:28083 + ports: + - 8080:8080 + # endregion Redpanda Kafka Vendor + +networks: + default: + driver: bridge diff --git a/src/how-tos/quickstart/grpc-zilla.yaml b/src/cookbooks/quickstart/grpc-zilla.yaml similarity index 96% rename from src/how-tos/quickstart/grpc-zilla.yaml rename to src/cookbooks/quickstart/grpc-zilla.yaml index 89267d32..02be6e0d 100644 --- a/src/how-tos/quickstart/grpc-zilla.yaml +++ b/src/cookbooks/quickstart/grpc-zilla.yaml @@ -7,7 +7,7 @@ catalogs: options: subjects: route_guide: - path: /proto/route_guide.proto + path: protos/route_guide.proto #endregion route_guide_proto bindings: @@ -152,3 +152,7 @@ telemetry: - grpc.active.requests - grpc.requests.per.rpc - grpc.responses.per.rpc + exporters: + # Enable Standard Out logs + stdout_logs_exporter: + type: stdout diff --git a/src/how-tos/quickstart/http-zilla.yaml b/src/cookbooks/quickstart/http-zilla.yaml similarity index 97% rename from src/how-tos/quickstart/http-zilla.yaml rename to src/cookbooks/quickstart/http-zilla.yaml index 76b7c36e..5501f266 100644 --- a/src/how-tos/quickstart/http-zilla.yaml +++ b/src/cookbooks/quickstart/http-zilla.yaml @@ -164,3 +164,7 @@ telemetry: - stream.errors.received - stream.active.sent - stream.active.received + exporters: + # Enable Standard Out logs + stdout_logs_exporter: + type: stdout diff --git a/src/how-tos/quickstart/mqtt-zilla.yaml b/src/cookbooks/quickstart/mqtt-zilla.yaml similarity index 94% rename from src/how-tos/quickstart/mqtt-zilla.yaml rename to src/cookbooks/quickstart/mqtt-zilla.yaml index 3de75abd..ffe3e1b4 100644 --- a/src/how-tos/quickstart/mqtt-zilla.yaml +++ b/src/cookbooks/quickstart/mqtt-zilla.yaml @@ -80,3 +80,7 @@ telemetry: - stream.errors.received - stream.active.sent - stream.active.received + exporters: + # Enable Standard Out logs + stdout_logs_exporter: + type: stdout diff --git a/src/cookbooks/quickstart/mqtt_sim_settings.json b/src/cookbooks/quickstart/mqtt_sim_settings.json new file mode 100644 index 00000000..0e7c5a9e --- /dev/null +++ b/src/cookbooks/quickstart/mqtt_sim_settings.json @@ -0,0 +1,119 @@ +{ + "BROKER_URL": "zilla", + "BROKER_PORT": 7183, + "PROTOCOL_VERSION": 5, + "CLEAN_SESSION": false, + "QOS": 0, + "TOPICS": [ + { + "TYPE": "multiple", + "PREFIX": "lamp", + "RANGE_START": 1, + "RANGE_END": 2, + "TIME_INTERVAL": 4, + "DATA": [ + { + "NAME": "on", + "TYPE": "bool", + "RETAIN_PROBABILITY": 0.85 + }, + { + "NAME": "temperature", + "TYPE": "int", + "INITIAL_VALUE": 2750, + "MIN_VALUE": 2700, + "MAX_VALUE": 6500, + "MAX_STEP": 250, + "RETAIN_PROBABILITY": 0.3, + "RESET_PROBABILITY": 0.1, + "INCREASE_PROBABILITY": 0.8, + "RESTART_ON_BOUNDARIES": true + } + ] + }, + { + "TYPE": "single", + "PREFIX": "air_quality", + "TIME_INTERVAL": 6, + "DATA": [ + { + "NAME": "pollution_particles", + "TYPE": "float", + "MIN_VALUE": 0, + "MAX_VALUE": 1, + "MAX_STEP": 0.15, + "RETAIN_PROBABILITY": 0.9 + }, + { + "NAME": "alert", + "TYPE": "bool", + "RETAIN_PROBABILITY": 0.9 + } + ] + }, + { + "TYPE": "list", + "PREFIX": "temperature", + "LIST": ["roof", "basement"], + "TIME_INTERVAL": 8, + "DATA": [ + { + "NAME": "temperature", + "TYPE": "float", + "MIN_VALUE": 20, + "MAX_VALUE": 55, + "MAX_STEP": 3, + "RETAIN_PROBABILITY": 0.5, + "INCREASE_PROBABILITY": 0.6 + } + ] + }, + { + "TYPE": "single", + "PREFIX": "freezer", + "TIME_INTERVAL": 6, + "DATA": [ + { + "NAME": "temperature", + "TYPE": "math_expression", + "RETAIN_PROBABILITY": 0.1, + "MATH_EXPRESSION": "2*math.pow(x,2)+1", + "INTERVAL_START": 0, + "INTERVAL_END": 5, + "MIN_DELTA": 0.3, + "MAX_DELTA": 0.5 + } + ] + }, + { + "TYPE": "single", + "PREFIX": "location", + "TIME_INTERVAL": 5, + "DATA": [ + { + "NAME": "coordinate", + "TYPE": "raw_values", + "RESTART_ON_END": true, + "VALUES": [ + { + "lat": -121.883682, + "long": 37.354635 + }, + { + "lat": -121.883352, + "long": 37.354192 + }, + { + "lat": -121.884284, + "long": 37.353757 + }, + { + "lat": -121.885227, + "long": 37.353324 + } + ] + } + ] + } + ] +} diff --git a/src/how-tos/quickstart/route_guide.proto b/src/cookbooks/quickstart/protos/route_guide.proto similarity index 83% rename from src/how-tos/quickstart/route_guide.proto rename to src/cookbooks/quickstart/protos/route_guide.proto index d8557902..966c434a 100644 --- a/src/how-tos/quickstart/route_guide.proto +++ b/src/cookbooks/quickstart/protos/route_guide.proto @@ -1,3 +1,17 @@ +// Copyright 2015 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + syntax = "proto3"; option go_package = "google.golang.org/grpc/examples/route_guide/routeguide"; diff --git a/src/cookbooks/quickstart/setup.sh b/src/cookbooks/quickstart/setup.sh new file mode 100755 index 00000000..2ab2d96b --- /dev/null +++ b/src/cookbooks/quickstart/setup.sh @@ -0,0 +1,13 @@ +#!/bin/sh +set -e + +KAFKA_VENDOR_PROFILE="${KAFKA_VENDOR_PROFILE:-kafka}" +export COMPOSE_PROFILES="${COMPOSE_PROFILES:-$KAFKA_VENDOR_PROFILE,init-$KAFKA_VENDOR_PROFILE}" +export KAFKA_BOOTSTRAP_SERVER="${KAFKA_BOOTSTRAP_SERVER:-$KAFKA_VENDOR_PROFILE:29092}" + +# Start or restart Zilla +if [ -z "$(docker compose ps -q zilla)" ]; then +docker compose up -d +else +docker compose up -d --force-recreate --no-deps zilla +fi diff --git a/src/cookbooks/quickstart/teardown.sh b/src/cookbooks/quickstart/teardown.sh new file mode 100755 index 00000000..35d12f5a --- /dev/null +++ b/src/cookbooks/quickstart/teardown.sh @@ -0,0 +1,5 @@ +#!/bin/sh +set -e + +docker compose -p "${NAMESPACE:-zilla-quickstart}" down --remove-orphans + diff --git a/src/how-tos/quickstart/telem-export-zilla.yaml b/src/cookbooks/quickstart/telem-export-zilla.yaml similarity index 76% rename from src/how-tos/quickstart/telem-export-zilla.yaml rename to src/cookbooks/quickstart/telem-export-zilla.yaml index 940750e8..11aa16e9 100644 --- a/src/how-tos/quickstart/telem-export-zilla.yaml +++ b/src/cookbooks/quickstart/telem-export-zilla.yaml @@ -1,9 +1,6 @@ name: telemetry-exporters telemetry: exporters: - # Enable Standard Out logs - stdout_logs_exporter: - type: stdout # Prometheus endpoint definition prometheus_metric_exporter: type: prometheus diff --git a/src/cookbooks/quickstart/zilla.yaml b/src/cookbooks/quickstart/zilla.yaml new file mode 100644 index 00000000..585be831 --- /dev/null +++ b/src/cookbooks/quickstart/zilla.yaml @@ -0,0 +1,415 @@ +# Generated with: yq '.' *-zilla.yaml > zilla.yaml +--- +name: grpc-quickstart +#region route_guide_proto +catalogs: + host_filesystem: + type: filesystem + options: + subjects: + route_guide: + path: protos/route_guide.proto +#endregion route_guide_proto + +bindings: + # Proxy service entrypoint + north_tcp_server: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 7151 + routes: + - when: + - port: 7151 + exit: north_grpc_http_server + telemetry: + metrics: + - stream.* + north_grpc_http_server: + type: http + kind: server + options: + versions: + - h2 + access-control: + policy: cross-origin + exit: north_grpc_server + telemetry: + metrics: + - stream.* + - http.* + # gRPC service definition to Kafka topics + #region route_guide_service_definition + north_grpc_server: + type: grpc + kind: server + catalog: + host_filesystem: + - subject: route_guide + exit: north_grpc_kafka_mapping + #endregion route_guide_service_definition + telemetry: + metrics: + - stream.* + - grpc.* + #region route_guide_service_mapping + north_grpc_kafka_mapping: + type: grpc-kafka + kind: proxy + routes: + - when: + - method: routeguide.RouteGuide/* + exit: north_kafka_cache_client + with: + capability: produce + topic: grpc-request + acks: leader_only + reply-to: grpc-response + #endregion route_guide_service_mapping + + # Kafka sync layer + north_kafka_cache_client: + type: kafka + kind: cache_client + exit: south_kafka_cache_server + south_kafka_cache_server: + type: kafka + kind: cache_server + options: + bootstrap: + - grpc-request + - grpc-response + exit: south_kafka_client + # Connect to local Kafka + south_kafka_client: + type: kafka + kind: client + options: + servers: + - ${{env.KAFKA_BOOTSTRAP_SERVER}} + exit: south_tcp_client + south_tcp_client: + type: tcp + kind: client + # Kafka to external gRPC server + #region route_guide_interface + west_kafka_grpc_remote_server: + type: kafka-grpc + kind: remote_server + entry: north_kafka_cache_client + options: + acks: leader_only + routes: + - when: + - topic: grpc-request + reply-to: grpc-response + method: routeguide.RouteGuide/* + with: + scheme: http + authority: ${{env.ROUTE_GUIDE_SERVER_HOST}}:${{env.ROUTE_GUIDE_SERVER_PORT}} + #endregion route_guide_interface + exit: west_route_guide_grpc_client + # gRPC RoutGuide server config + west_route_guide_grpc_client: + type: grpc + kind: client + exit: west_route_guide_http_client + west_route_guide_http_client: + type: http + kind: client + options: + versions: + - h2 + exit: west_route_guide_tcp_client + #region route_guide_server + west_route_guide_tcp_client: + type: tcp + kind: client + options: + host: ${{env.ROUTE_GUIDE_SERVER_HOST}} + port: ${{env.ROUTE_GUIDE_SERVER_PORT}} + #endregion route_guide_server +telemetry: + # Desired metrics to track + metrics: + - http.active.requests + - http.request.size + - http.response.size + - stream.opens.sent + - stream.opens.received + - stream.closes.sent + - stream.closes.received + - stream.errors.sent + - stream.errors.received + - stream.active.sent + - stream.active.received + - grpc.active.requests + - grpc.requests.per.rpc + - grpc.responses.per.rpc + exporters: + # Enable Standard Out logs + stdout_logs_exporter: + type: stdout +--- +name: http-quickstart +bindings: + # Proxy service entrypoint + north_tcp_server: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 7114 + routes: + - when: + - port: 7114 + exit: north_http_server + telemetry: + metrics: + - stream.* + north_http_server: + type: http + kind: server + options: + versions: + - h2 + - http/1.1 + access-control: + policy: cross-origin + routes: + - when: + - headers: + :path: /api/stream + exit: north_sse_server + - when: + - headers: + :path: /api/stream/* + exit: north_sse_server + - when: + - headers: + :path: /api/* + exit: north_rest_api_http_kafka_mapping + telemetry: + metrics: + - stream.* + - http.* + # REST proxy endpoints to Kafka a topic + north_rest_api_http_kafka_mapping: + type: http-kafka + kind: proxy + routes: + #region rest_create + - when: + - method: POST + path: /api/items + exit: north_kafka_cache_client + with: + capability: produce + topic: http-messages + key: ${idempotencyKey} + #endregion rest_create + #region rest_update + - when: + - method: PUT + path: /api/items/{id} + exit: north_kafka_cache_client + with: + capability: produce + topic: http-messages + key: ${params.id} + #endregion rest_update + #region rest_delete + - when: + - method: DELETE + path: /api/items/{id} + exit: north_kafka_cache_client + with: + capability: produce + topic: http-messages + key: ${params.id} + #endregion rest_delete + #region rest_retrieve_all + - when: + - method: GET + path: /api/items + exit: north_kafka_cache_client + with: + capability: fetch + topic: http-messages + merge: + content-type: application/json + #endregion rest_retrieve_all + #region rest_retrieve_id + - when: + - method: GET + path: /api/items/{id} + exit: north_kafka_cache_client + with: + capability: fetch + topic: http-messages + filters: + - key: ${params.id} + #endregion rest_retrieve_id + # SSE Server to Kafka topics + north_sse_server: + type: sse + kind: server + exit: north_sse_kafka_mapping + north_sse_kafka_mapping: + type: sse-kafka + kind: proxy + routes: + #region sse_stream_all + - when: + - path: /api/stream + exit: north_kafka_cache_client + with: + topic: http-messages + #endregion sse_stream_all + #region sse_stream_id + - when: + - path: /api/stream/{id} + exit: north_kafka_cache_client + with: + topic: http-messages + filters: + - key: ${params.id} + #endregion sse_stream_id + # Kafka sync layer + north_kafka_cache_client: + type: kafka + kind: cache_client + exit: south_kafka_cache_server + south_kafka_cache_server: + type: kafka + kind: cache_server + options: + bootstrap: + - http-messages + exit: south_kafka_client + # Connect to local Kafka + south_kafka_client: + type: kafka + kind: client + options: + servers: + - ${{env.KAFKA_BOOTSTRAP_SERVER}} + exit: south_tcp_client + south_tcp_client: + type: tcp + kind: client +telemetry: + # Desired metrics to track + metrics: + - http.active.requests + - http.request.size + - http.response.size + - stream.opens.sent + - stream.opens.received + - stream.closes.sent + - stream.closes.received + - stream.errors.sent + - stream.errors.received + - stream.active.sent + - stream.active.received + exporters: + # Enable Standard Out logs + stdout_logs_exporter: + type: stdout +--- +name: mqtt-quickstart +bindings: + # Proxy service entrypoint + north_tcp_server: + type: tcp + kind: server + options: + host: 0.0.0.0 + port: + - 7183 + routes: + - when: + - port: 7183 + exit: north_mqtt_server + telemetry: + metrics: + - stream.* + # MQTT Server to Kafka topics + #region mqtt_broker_mapping + north_mqtt_server: + type: mqtt + kind: server + exit: north_mqtt_kafka_mapping + north_mqtt_kafka_mapping: + type: mqtt-kafka + kind: proxy + options: + topics: + sessions: mqtt-sessions + messages: mqtt-messages + retained: mqtt-retained + #endregion mqtt_broker_mapping + exit: north_kafka_cache_client + telemetry: + metrics: + - stream.* + # Kafka sync layer + north_kafka_cache_client: + type: kafka + kind: cache_client + exit: south_kafka_cache_server + telemetry: + metrics: + - stream.* + south_kafka_cache_server: + type: kafka + kind: cache_server + options: + bootstrap: + - mqtt-messages + - mqtt-retained + exit: south_kafka_client + telemetry: + metrics: + - stream.* + # Connect to local Kafka + south_kafka_client: + type: kafka + kind: client + options: + servers: + - ${{env.KAFKA_BOOTSTRAP_SERVER}} + exit: south_tcp_client + south_tcp_client: + type: tcp + kind: client +telemetry: + # Desired metrics to track + metrics: + - stream.opens.sent + - stream.opens.received + - stream.closes.sent + - stream.closes.received + - stream.errors.sent + - stream.errors.received + - stream.active.sent + - stream.active.received + exporters: + # Enable Standard Out logs + stdout_logs_exporter: + type: stdout +--- +name: telemetry-exporters +telemetry: + exporters: + # Prometheus endpoint definition + prometheus_metric_exporter: + type: prometheus + options: + endpoints: + - scheme: http + path: /metrics + port: 7190 diff --git a/src/how-tos/catalogs/index.md b/src/how-tos/catalogs/index.md index 06cc13fd..d5d15ec7 100644 --- a/src/how-tos/catalogs/index.md +++ b/src/how-tos/catalogs/index.md @@ -6,7 +6,7 @@ A registered catalog is a namespace scoped item in a zilla config that can be re Local catalogs are used to quickly bootstrap a Zilla config or package a standard schema that doesn't change often with a Zilla install. -A simple way to reference a file in Zilla is from the local filesystem. Adding [files to a Zilla pod](../deploy-operate.md#adding-files-to-the-zilla-pod) relative to the Zilla install directory. +A simple way to reference a file in Zilla is from the local filesystem. Adding [files to a Zilla pod](../deploy-operate/index.md#adding-files-to-the-zilla-pod) relative to the Zilla install directory. ```yaml catalogs: @@ -18,7 +18,7 @@ catalogs: path: relative/path/to/local.file ``` -In some environments the local filesystem isn't accessible. Schema subjects can be definined inside of the Zilla config to keep all of the relevant model information in one place. +In some environments the local filesystem isn't accessible. Schema subjects can be defined inside of the Zilla config to keep all of the relevant model information in one place. ```yaml catalogs: diff --git a/src/how-tos/connecting-to-kafka/aiven.md b/src/how-tos/connecting-to-kafka/aiven.md index 3b880ba4..155195ed 100644 --- a/src/how-tos/connecting-to-kafka/aiven.md +++ b/src/how-tos/connecting-to-kafka/aiven.md @@ -42,7 +42,7 @@ You can use the scripts shown below to generate `truststore.p12` and `keystore.p @tab truststore.p12 -```bash:no-line-numbers +```bash keytool -import -file ca.pem \ -alias YOUR_KAFKA_SIGNED_CLIENT_CERT_ALIAS \ -keystore truststore.p12 @@ -50,7 +50,7 @@ keytool -import -file ca.pem \ @tab keystore.p12 -```bash:no-line-numbers +```bash openssl pkcs12 -export -in service.cert -inkey service.key \ -out keystore.p12 -name YOUR_KAFKA_CA_CERT_ALIAS \ -CAfile ca.pem @@ -109,4 +109,4 @@ bindings: SNI adds the domain name to the TLS handshake process so that the Zilla process reaches the right domain name and receives the correct SSL certificate. ::: -To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram example](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sasl.scram). +To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram](https://github.com/aklivity/zilla-docs/releases/latest/download/http.kafka.sasl.scram.tar.gz) cookbook. diff --git a/src/how-tos/connecting-to-kafka/amazon-msk.md b/src/how-tos/connecting-to-kafka/amazon-msk.md index 419e12a6..b803fa18 100644 --- a/src/how-tos/connecting-to-kafka/amazon-msk.md +++ b/src/how-tos/connecting-to-kafka/amazon-msk.md @@ -34,7 +34,7 @@ Follow the [Create Client Certificate guide](../../solutions/how-tos/aws-service First, you will export the Client Certificate to a local file called `client.cert`. To do this you will need the `ARN` of the client certificate as well as of the [certificate authority](../../solutions/how-tos/aws-services/create-certificate-authority-acm.md) used to issue the certificate, and run the following command: -```bash:no-line-numbers +```bash aws acm-pca get-certificate --certificate-authority-arn CERTIFICATE_AUTHORITY_ARN \ --certificate-arn CERTIFICATE_ARN --output text ``` @@ -80,7 +80,7 @@ With the `bootstrap server name` in hand, run the following command to create th #### keystore.p12 -```bash:no-line-numbers +```bash openssl pkcs12 -export -in client.cert -inkey client-1.key.pem \ -out keystore.p12 -name SIGNED_CLIENT_CERT_ALIES \ -CAfile ca.pem @@ -142,4 +142,4 @@ bindings: SNI adds the domain name to the TLS handshake process so that the Zilla process reaches the right domain name and receives the correct SSL certificate. ::: -To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram example](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sasl.scram). +To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram](https://github.com/aklivity/zilla-docs/releases/latest/download/http.kafka.sasl.scram.tar.gz) cookbook. diff --git a/src/how-tos/connecting-to-kafka/apache-kafka.md b/src/how-tos/connecting-to-kafka/apache-kafka.md index f398c05f..96d5959b 100644 --- a/src/how-tos/connecting-to-kafka/apache-kafka.md +++ b/src/how-tos/connecting-to-kafka/apache-kafka.md @@ -125,7 +125,15 @@ bindings: However, if the `Kafka` cluster is secured by a TLS server certificate that is signed by a private certificate authority then you need to add a `vault` [config](../../reference/config/vaults/filesystem.md) to provide access to certificates needed by the TLS client binding. -To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram example](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sasl.scram). +To test the above config you Download and run the Zilla `http.kafka.sasl.scram` cookbook using this install script. It will start Zilla and Kafka. + +```bash +wget -qO- https://raw.githubusercontent.com/aklivity/zilla-examples/main/startup.sh | sh -s -- http.kafka.sasl.scram +``` + +::: note +Alternatively, download [http.kafka.sasl.scram](https://github.com/aklivity/zilla-docs/releases/latest/download/http.kafka.sasl.scram.tar.gz) and follow the `README` yourself. +::: ### Kafka over TLS/SSL using client certificates @@ -144,14 +152,14 @@ You can use the scripts shown below to generate `truststore.p12` and `keystore.p @tab truststore.p12 -```bash:no-line-numbers +```bash keytool -import -file ca.pem -alias YOUR_KAFKA_SIGNED_CLIENT_CERT_ALIAS \ -keystore truststore.p12 ``` @tab keystore.p12 -```bash:no-line-numbers +```bash openssl pkcs12 -export -in service.cert -inkey service.key -out keystore.p12 -name YOUR_KAFKA_CA_CERT_ALIAS \ -CAfile ca.pem @@ -208,7 +216,7 @@ bindings: SNI adds the domain name to the TLS handshake process so that the Zilla process reaches the right domain name and receives the correct SSL certificate. ::: -To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram example](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sasl.scram). +To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram](https://github.com/aklivity/zilla-docs/releases/latest/download/http.kafka.sasl.scram.tar.gz) cookbook. ### Kafka over SASL @@ -258,4 +266,12 @@ bindings: ::: -To test the above SASL config you can try out the [http.kafka.sasl.scram example](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sasl.scram). +To test the above SASL config you Download and run the Zilla `http.kafka.sasl.scram` cookbook using this install script. It will start Zilla and Kafka. + +```bash +wget -qO- https://raw.githubusercontent.com/aklivity/zilla-examples/main/startup.sh | sh -s -- http.kafka.sasl.scram +``` + +::: note +Alternatively, download [http.kafka.sasl.scram](https://github.com/aklivity/zilla-docs/releases/latest/download/http.kafka.sasl.scram.tar.gz) and follow the `README` yourself. +::: diff --git a/src/how-tos/connecting-to-kafka/confluent-cloud.md b/src/how-tos/connecting-to-kafka/confluent-cloud.md index 2b620211..f217ddd3 100644 --- a/src/how-tos/connecting-to-kafka/confluent-cloud.md +++ b/src/how-tos/connecting-to-kafka/confluent-cloud.md @@ -21,7 +21,7 @@ Confluent Cloud is exposed over `SASL_SSL` authentication protocols and the `Con Before we proceed further let's use the below command to verify connectivity to your Kafka. -```bash:no-line-numbers +```bash kcat -b $KAFKA_BOOTSTRAP_SERVER \ -X security.protocol=sasl_ssl -X sasl.mechanisms=PLAIN \ -X sasl.username=$KAFKA_API_KEY -X sasl.password=$KAFKA_API_SECRET \ @@ -65,4 +65,4 @@ bindings: SNI adds the domain name to the TLS handshake process so that the Zilla process reaches the right domain name and receives the correct SSL certificate. ::: -To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram example](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sasl.scram). +To test the above config you can use it to add or replace the necessary bindings in the [http.kafka.sasl.scram](https://github.com/aklivity/zilla-docs/releases/latest/download/http.kafka.sasl.scram.tar.gz) cookbook. diff --git a/src/how-tos/connecting-to-kafka/redpanda.md b/src/how-tos/connecting-to-kafka/redpanda.md index 7972132c..470edd9b 100644 --- a/src/how-tos/connecting-to-kafka/redpanda.md +++ b/src/how-tos/connecting-to-kafka/redpanda.md @@ -14,7 +14,7 @@ This is how to connect to Redpanda from Zilla. The examples use the below Enviro Before we proceed further let's use the below command to verify connectivity to your Kafka. -```bash:no-line-numbers +```bash kcat -b $KAFKA_BOOTSTRAP_SERVER \ -X security.protocol=sasl_ssl -X sasl.mechanisms=PLAIN \ -X sasl.username=$SASL_USERNAME -X sasl.password=$SASL_PASSWORD \ @@ -58,4 +58,12 @@ bindings: SNI adds the domain name to the TLS handshake process so that the Zilla process reaches the right domain name and receives the correct SSL certificate. ::: -To test the above SASL config you can try out the [http.redpanda.sasl.scram example](https://github.com/aklivity/zilla-examples/tree/main/http.redpanda.sasl.scram). +To test the above config you Download and run the Zilla `http.redpanda.sasl.scram` cookbook using this install script. It will start Zilla and Kafka. + +```bash +wget -qO- https://raw.githubusercontent.com/aklivity/zilla-examples/main/startup.sh | sh -s -- http.redpanda.sasl.scram +``` + +::: note +Alternatively, download [http.redpanda.sasl.scram](https://github.com/aklivity/zilla-docs/releases/latest/download/http.redpanda.sasl.scram.tar.gz) and follow the `README` yourself. +::: diff --git a/src/how-tos/deploy-operate/autoscale-k8s.md b/src/how-tos/deploy-operate/autoscale-k8s.md new file mode 100644 index 00000000..819b9f26 --- /dev/null +++ b/src/how-tos/deploy-operate/autoscale-k8s.md @@ -0,0 +1,366 @@ +--- +description: In this guide, run zilla in kubernetes and autoscale the number of pods based on prometheus metrics. +--- + +# Autoscale Zilla pods with Prometheus metrics + +In this guide, run zilla in kubernetes and autoscale the number of pods based on prometheus metrics. The Kubernetes horizontal pod autoscaler is set up to enable the zilla deployment to scale from 1 to 5 pods with the goal of an average load of 10 active connections per pod. + +## Tl;Dr + +Download and run the Zilla `kubernetes.prometheus.autoscale` cookbook using this install script. It will start Zilla and everything you need for this guide. + +```bash +wget -qO- https://raw.githubusercontent.com/aklivity/zilla-examples/main/startup.sh | sh -s -- kubernetes.prometheus.autoscale +``` + +::: note +Alternatively, download [kubernetes.prometheus.autoscale](https://github.com/aklivity/zilla-docs/releases/latest/download/kubernetes.prometheus.autoscale.tar.gz) and follow the `README` yourself. +::: + +### Prerequisites + +- Install [jq](https://jqlang.github.io/jq/) and [netcat](https://netcat.sourceforge.net/) +- [Kubernetes](https://kubernetes.io/) (e.g. Docker Desktop with Kubernetes enabled) +- Install [kubectl](https://kubernetes.io/docs/reference/kubectl/) +- Install [helm](https://helm.sh/docs/intro/install/#helm) 3.0+ + +## Autoscaling Zilla + +Run the commands from this guide from a shell in the `kubernetes.prometheus.autoscale` directory that you downloaded. + +```bash +curl -d "Hello, world" -X "POST" http://localhost:7114 +``` + +output: + +```text +Hello, world +``` + +The initial status is: + +- no open connections +- the value of the `stream_active_received` metric should be 0 +- there should be 1 zilla pod in the deployment + +> If the kubernetes custom metrics API response does not appear correctly please wait a few seconds and try again before proceeding further. + +```bash +./check_metric.sh +``` + +output: + +```text +The value of stream_active_received metric +------------------------------------------ + +Prometheus API: +{ +... + "metric": { + "__name__": "stream_active_received", + }, + "value": [ + 1683013504.619, # timestamp + "0" # value +... +} + +Kubernetes custom metrics API: +{ +... + "metricName": "stream_active_received", + "value": "0", +... +} +``` + +The zilla deployment should consist of 1 pod. + +```bash +./check_hpa.sh +``` + +output: + +```text +The status of horizontal pod autoscaling +---------------------------------------- + +HorizontalPodAutoscaler: +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +zilla Deployment/zilla 0/10 1 5 1 4m24s + +Deployment: +NAME READY UP-TO-DATE AVAILABLE AGE +zilla 1/1 1 1 4m25s + +Pods: +NAME READY STATUS RESTARTS AGE +zilla-6db8d879f5-2wxgw 1/1 Running 0 4m25s +``` + +Open 21 connections to zilla as instances of netcat in the background. + +```bash +for i in `seq 1 21`; do nc localhost 7114 &; done +``` + +output: + +```text +[42] 88886 +[43] 88887 +[44] 88888 +... +``` + +There should be 21 open connections in the background now. + +```bash +ps auxw | grep "nc localhost 7114" | grep -v grep | wc -l +``` + +output: + +```text +21 +``` + +Wait for a few seconds so the metrics get updated. The value of stream_active_received metric should be 21 for one of the pods. + +```bash +./check_metric.sh +``` + +output: + +```text +The value of stream_active_received metric +------------------------------------------ + +Prometheus API: +{ +... + "metric": { + "__name__": "stream_active_received", + }, + "value": [ + 1683013504.619, # timestamp + "21" # value +... +} + +Kubernetes custom metrics API: +{ +... + "metricName": "stream_active_received", + "value": "21", +... +} +``` + +Wait for a minute so the autoscaler can catch up. The zilla deployment should be soon scaled up to 3 pods. + +```bash +./check_hpa.sh +``` + +output: + +```text +The status of horizontal pod autoscaling +---------------------------------------- + +HorizontalPodAutoscaler: +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +zilla Deployment/zilla 7/10 1 5 3 7m14s + +Deployment: +NAME READY UP-TO-DATE AVAILABLE AGE +zilla 3/3 3 3 7m15s + +Pods: +NAME READY STATUS RESTARTS AGE +zilla-6db8d879f5-2wxgw 1/1 Running 0 7m15s +zilla-6db8d879f5-9bnkh 1/1 Running 0 75s +zilla-6db8d879f5-fmgqx 1/1 Running 0 75s +``` + +Open 21 connections to zilla as instances of netcat in the background. + +```bash +for i in `seq 1 21`; do nc localhost 7114 &; done +``` + +output: + +````text +[77] 77775 +[78] 77776 +[79] 77777 +... + +There should be 42 open connections in the background now. + +```bash +ps auxw | grep "nc localhost 7114" | grep -v grep | wc -l +```` + +output: + +```text +42 +``` + +Wait for a few seconds so the metrics get updated. The value of stream_active_received metric should be 42 for one of the pods. + +```bash +./check_metric.sh +``` + +output: + +```text +The value of stream_active_received metric +------------------------------------------ + +Prometheus API: +{ +... + "metric": { + "__name__": "stream_active_received", + }, + "value": [ + 1683013504.619, # timestamp + "42" # value +... +} + +Kubernetes custom metrics API: +{ +... + "metricName": "stream_active_received", + "value": "42", +... +} +``` + +Wait for a minute so the autoscaler can catch up. The zilla deployment should be soon scaled up to 5 pods. + +```bash +./check_hpa.sh +``` + +output: + +```text +The status of horizontal pod autoscaling +---------------------------------------- + +HorizontalPodAutoscaler: +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +zilla Deployment/zilla 8400m/10 1 5 5 12m + +Deployment: +NAME READY UP-TO-DATE AVAILABLE AGE +zilla 5/5 5 5 12m + +Pods: +NAME READY STATUS RESTARTS AGE +zilla-6db8d879f5-2wxgw 1/1 Running 0 12m +zilla-6db8d879f5-9bnkh 1/1 Running 0 6m3s +zilla-6db8d879f5-fmgqx 1/1 Running 0 6m3s +zilla-6db8d879f5-g74hl 1/1 Running 0 63s +zilla-6db8d879f5-q5fmm 1/1 Running 0 63s +``` + +Shut down all running netcat instances. + +```bash +ps auxw | grep "nc localhost 7114" | grep -v grep | awk '{print $2}' | xargs kill +``` + +output: + +````text +[23] + 55555 terminated nc localhost 7114 +[22] + 55554 terminated nc localhost 7114 +[21] + 55553 terminated nc localhost 7114 +... + +There should be no open connections in the background now. + +```bash +ps auxw | grep "nc localhost 7114" | grep -v grep | wc -l +```` + +output: + +```text +0 +``` + +Wait for a few seconds so the metrics get updated. The value of stream_active_received metric should be 0 for all pods. + +```bash +./check_metric.sh +``` + +output: + +````text +The value of stream_active_received metric +------------------------------------------ + +Prometheus API: +{ +... + "metric": { + "__name__": "stream_active_received", + }, + "value": [ + 1683013504.619, # timestamp + "0" # value +... +} + +Kubernetes custom metrics API: +{ +... + "metricName": "stream_active_received", + "value": "0", +... +} + +Wait for a a minute so the autoscaler can catch up. The zilla deployment should be soon scaled down to 1 pod. + +```bash +./check_hpa.sh +```` + +output: + +```text +The status of horizontal pod autoscaling +---------------------------------------- + +HorizontalPodAutoscaler: +NAME REFERENCE TARGETS MINPODS MAXPODS REPLICAS AGE +zilla Deployment/zilla 0/10 1 5 1 14m + +Deployment: +NAME READY UP-TO-DATE AVAILABLE AGE +zilla 1/1 1 1 14m + +Pods: +NAME READY STATUS RESTARTS AGE +zilla-6db8d879f5-2wxgw 1/1 Running 0 14m +``` + +## Remove the running namespace + +Find the path to the `teardown.sh` script(s) in the `use the teardown script(s) to clean up` section of the example output and run it. diff --git a/src/how-tos/deploy-operate.md b/src/how-tos/deploy-operate/index.md similarity index 83% rename from src/how-tos/deploy-operate.md rename to src/how-tos/deploy-operate/index.md index 418fb7e0..a36daf92 100644 --- a/src/how-tos/deploy-operate.md +++ b/src/how-tos/deploy-operate/index.md @@ -4,22 +4,22 @@ You can install Zilla using our [homebrew tap](https://github.com/aklivity/homebrew-tap). -```bash:no-line-numbers +```bash brew tap aklivity/tap brew install zilla ``` Now you can run any `zilla.yaml` config. -```bash:no-line-numbers +```bash zilla start -ve -c ./zilla.yaml ``` ## Running Zilla via Docker -You can run your `zilla.yaml` config inside a container. If you want to deploy on Kubernetes, use our [helm chart](./deploy-operate.md). +You can run your `zilla.yaml` config inside a container. If you want to deploy on Kubernetes, use our [helm chart](./index.md). -```bash:no-line-numbers +```bash docker run -v ./zilla.yaml:/etc/zilla/zilla.yaml ghcr.io/aklivity/zilla:latest start -ve ``` @@ -27,7 +27,7 @@ docker run -v ./zilla.yaml:/etc/zilla/zilla.yaml ghcr.io/aklivity/zilla:latest s Go to the [Zilla artifacthub](https://artifacthub.io/packages/helm/zilla/zilla) page to learn more about installing Zilla using Helm. -```bash:no-line-numbers +```bash helm install zilla oci://ghcr.io/aklivity/charts/zilla --namespace zilla --create-namespace --wait \ --values values.yaml \ --set-file zilla\\.yaml=zilla.yaml @@ -39,7 +39,7 @@ The Zilla configuration is in the `zilla.yaml` file, which is added to the Helm You can define your TCP ports to services mapping in a `tcp-services` ConfigMap. Official documentation on this method can be found in the [Exposing TCP and UDP services](https://kubernetes.github.io/ingress-nginx/user-guide/exposing-tcp-udp-services/) guide. -```bash:no-line-numbers +```bash kubectl create configmap tcp-services \ --from-literal=7183="$NAMESPACE/$SERVICE_NAME:7183" \ --from-literal=7151="$NAMESPACE/$SERVICE_NAME:7151" \ @@ -48,7 +48,7 @@ kubectl create configmap tcp-services \ You will need to download the YAML manifest for the ingress controller. You can find an example on the [Ingress Nginx Quickstart guide](https://kubernetes.github.io/ingress-nginx/deploy/#quick-start) -```bash:no-line-numbers +```bash curl https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.10.1/deploy/static/provider/cloud/deploy.yaml > ingress-deploy.yaml ``` @@ -93,7 +93,7 @@ spec: Create the ingress controller: -```bash:no-line-numbers +```bash kubectl apply -f ingress-deploy.yaml ``` @@ -105,13 +105,13 @@ All local files referenced in a `zilla.yaml` config should be found in a locatio - From a single file. - ```bash:no-line-numbers + ```bash kubectl create configmap my-files-configmap --from-file=my-file.txt -n $NAMESPACE -o yaml --dry-run=client | kubectl apply -f - ``` - All files in a folder. This does not add folders recursively and each folder needs to be individually mapped - ```bash:no-line-numbers + ```bash kubectl create configmap my-folder-configmap --from-file=path/to/my-folder/ -n $NAMESPACE -o yaml --dry-run=client | kubectl apply -f - ``` @@ -146,7 +146,7 @@ volumes: For every running Zilla pod you will need to first copy the `/var/run/zilla` directory to make sure no additional files are written while it is compressed then compress the full directory to make it easier to copy. -```bash:no-line-numbers +```bash kubectl get pod \ -l "app.kubernetes.io/name=zilla" \ -n $NAMESPACE \ @@ -157,7 +157,7 @@ kubectl get pod \ Copy the compressed `/var/run/zilla` directory off of the pod into your local directory using the pod name. -```bash:no-line-numbers +```bash kubectl get pod \ -l "app.kubernetes.io/name=zilla" \ -n $NAMESPACE \ @@ -176,11 +176,11 @@ Errors and misconfigured parts of the `zilla.yaml` file are detected by Zilla an This feature is demonstrated in the above Helm install command. Running a `helm update ...` with changes to the `zilla.yaml`, k8s will update the config map, which writes the new content into the running pods. Zilla will detect those file changes and load the new config. -Zilla can load the `zilla.yaml` config from a remote source using the `-c` or `--config` [CLI flag](../reference/config/zilla-cli.md#c-config). The auto reconfigure feature will still work when pulling the config remotely. +Zilla can load the `zilla.yaml` config from a remote source using the `-c` or `--config` [CLI flag](../../reference/config/zilla-cli.md#c-config). The auto reconfigure feature will still work when pulling the config remotely. ## Auto Scaling -Zilla will start workers that default to the CPU cores it is allowed to use. This makes horizontal scaling easy with a 1:1 ratio of instances to workers. Any of the default scaling metrics based on server CPU usage will enable Zilla to handle traffic spikes. Additionally, Zilla [Telemetry](../reference/config/overview.md#telemetry) configuration provides more data when determining how to scale. The [Prometheus autoscale example](https://github.com/aklivity/zilla-examples/tree/main/kubernetes.prometheus.autoscale) demonstrates using metrics from the [Prometheus exporter](../reference/config/telemetry/exporters/prometheus.md) to horizontally scale Zilla on k8s. +Zilla will start workers that default to the CPU cores it is allowed to use. This makes horizontal scaling easy with a 1:1 ratio of instances to workers. Any of the default scaling metrics based on server CPU usage will enable Zilla to handle traffic spikes. Additionally, Zilla [Telemetry](../../reference/config/overview.md#telemetry) configuration provides more data when determining how to scale. The [Autoscaling on K8s](../../how-tos/deploy-operate/autoscale-k8s.md) guide demonstrates using metrics from the [Prometheus exporter](../../reference/config/telemetry/exporters/prometheus.md) to horizontally scale Zilla on k8s. ## Enable Incubator Features @@ -196,4 +196,4 @@ ZILLA_INCUBATOR_ENABLED=true ## Export `TRACE` level Log Dump -The [zilla dump](../reference/config/zilla-cli.md#zilla-dump) command will capture all of the internal events at the stream level for a detailed analysis of what zilla was doing. These logs are captured down to the nanosecond and are exported as a `.pcap` file to be used with [Wireshark](https://wiki.wireshark.org/SampleCaptures). You can find instructions on how to view the capture in wireshark in the zilla dump [plugin install section](../reference/config/zilla-cli.md#i-install-plugin-directory). +The [zilla dump](../../reference/config/zilla-cli.md#zilla-dump) command will capture all of the internal events at the stream level for a detailed analysis of what zilla was doing. These logs are captured down to the nanosecond and are exported as a `.pcap` file to be used with [Wireshark](https://wiki.wireshark.org/SampleCaptures). You can find instructions on how to view the capture in wireshark in the zilla dump [plugin install section](../../reference/config/zilla-cli.md#i-install-plugin-directory). diff --git a/src/how-tos/models/index.md b/src/how-tos/models/index.md index d632c6d4..792eaea8 100644 --- a/src/how-tos/models/index.md +++ b/src/how-tos/models/index.md @@ -41,7 +41,7 @@ The `kafka cache_client` binding can parse the message value, or body of the mes ### Enforcing a schema on Fetch -The `kafka cache_server` can enforce a schema on messages Fetched from a topic. This will prevent any messages that are pruduced on a Kafka topic from getting cosumed by a client if that messages doesn't match to the specified schema. +The `kafka cache_server` can enforce a schema on messages Fetched from a topic. This will prevent any messages that are produced on a Kafka topic from getting consumed by a client if that messages doesn't match to the specified schema. ```yaml south_kafka_cache_server: diff --git a/src/how-tos/mqtt/mqtt.kafka.broker.md b/src/how-tos/mqtt/mqtt.kafka.broker.md index ccbddb7f..5e107faf 100644 --- a/src/how-tos/mqtt/mqtt.kafka.broker.md +++ b/src/how-tos/mqtt/mqtt.kafka.broker.md @@ -17,19 +17,19 @@ Specifically, you will: ## Tl;Dr -Download and run the Zilla [zilla-examples/mqtt.kafka.broker](https://github.com/aklivity/zilla-examples/tree/main/mqtt.kafka.broker) example using this install script. It will start Zilla and everything you need for this guide. +Download and run the Zilla `mqtt.kafka.broker` cookbook using this install script. It will start Zilla and everything you need for this guide. -```bash:no-line-numbers +```bash wget -qO- https://raw.githubusercontent.com/aklivity/zilla-examples/main/startup.sh | sh -s -- mqtt.kafka.broker ``` ::: note -Alternatively, download [mqtt.kafka.broker](https://github.com/aklivity/zilla-examples/releases/latest/download/mqtt.kafka.broker.tar.gz) or the [startup.sh](https://github.com/aklivity/zilla-examples/releases/latest/download/startup.sh) script yourself. +Alternatively, download [mqtt.kafka.broker](https://github.com/aklivity/zilla-docs/releases/latest/download/mqtt.kafka.broker.tar.gz) and follow the `README` yourself. ::: ### Prerequisites -Before proceeding, you should have [Compose](https://docs.docker.com/compose/gettingstarted/) or optionally [Helm](https://helm.sh/docs/intro/install/) and [Kubernetes](https://kubernetes.io/docs/tasks/tools/) installed. +Before proceeding, you should have [Compose](https://docs.docker.com/compose/gettingstarted/) installed. ::: details Detailed prerequisites @@ -41,8 +41,6 @@ Before proceeding, you should have [Compose](https://docs.docker.com/compose/get Optional: - Kafka 3.0+ hosted with the Docker network allowed to communicate -- Helm 3.0+ -- Kubernetes 1.13.0+ ::: @@ -52,9 +50,9 @@ Run the docker command under the `Verify the Kafka topics created` section of th ```output:no-line-numbers mqtt-messages +mqtt-devices mqtt-retained mqtt-sessions -mqtt-devices ``` ### Listen for messages @@ -65,21 +63,21 @@ Run the docker command under the `Start a topic consumer to listen for messages` Using [eclipse-mosquitto](https://hub.docker.com/_/eclipse-mosquitto) subscribe to the `zilla` topic. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_sub --url mqtt://host.docker.internal:7183/zilla ``` In a separate session, publish a message on the `zilla` topic. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_pub --url mqtt://host.docker.internal:7183/zilla --message 'Hello, world' ``` Send messages with the retained flag. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_pub --url mqtt://host.docker.internal:7183/zilla --message 'Hello, retained' --retain ``` @@ -90,12 +88,12 @@ Then restart the `mosquitto_sub` above. The latest retained message is delivered Send a message from a device and a sensor. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_pub --url mqtt://host.docker.internal:7183/place/01/device/01 --message 'I am device01' ``` -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_pub --url mqtt://host.docker.internal:7183/place/01/sensor/01 --message 'I am sensor01' ``` @@ -104,25 +102,15 @@ You can check the [Kafka UI](http://localhost:8080/ui/clusters/local/all-topics) ## Creating this example yourself -### Start a Kafka instance - -You can use your own Kafka or set up a local Kafka with [kafka.broker](https://github.com/aklivity/zilla-examples/releases/latest/download/kafka.broker.tar.gz) and follow the setup instructions in the `README.md`. +### Start a Kafka or Redpanda instance -Export these environment variables or overwrite them with your remote Kafka if you skipped the local setup. +You will need to create the required topics below. ```output:no-line-numbers -export KAFKA_HOST=host.docker.internal -export KAFKA_PORT=9092 -``` - -### Bootstrap Kafka - -Create these topics in the Kafka environment. - -```bash:no-line-numbers -/bin/kafka-topics.sh --bootstrap-server $KAFKA_HOST:$KAFKA_PORT --create --if-not-exists --topic mqtt-sessions -/bin/kafka-topics.sh --bootstrap-server $KAFKA_HOST:$KAFKA_PORT --create --if-not-exists --topic mqtt-messages --config cleanup.policy=compact -/bin/kafka-topics.sh --bootstrap-server $KAFKA_HOST:$KAFKA_PORT --create --if-not-exists --topic mqtt-retained --config cleanup.policy=compact +mqtt-messages +mqtt-devices cleanup.policy=compact +mqtt-retained cleanup.policy=compact +mqtt-sessions cleanup.policy=compact ``` ### Create your config @@ -134,100 +122,68 @@ Create a new file called `zilla.yaml` and append the below yaml to it. This will configure Zilla for accepting all of the `mqtt` traffic. The [tcp](../../reference/config/bindings/tcp/README.md) binding defines the ports Zilla will accept traffic for both MQTT and WebSocket connections. ```yaml{12-13,15-16} - + ``` -::: right -[More on binding-tcp](../../reference/config/bindings/tcp/README.md) -::: +> [More on binding-tcp](../../reference/config/bindings/tcp/README.md) A [ws](../../reference/config/bindings/tcp/) binding is added to handle any MQTT over WebSocket using the `mqtt` protocol. The [mqtt](../../reference/config/bindings/mqtt/README.md) binding then handles all of the MQTT message traffic that needs to go to Kafka. ```yaml{17,22} - + ``` -::: right -[More on binding-mqtt](../../reference/config/bindings/mqtt/README.md) -[More on binding-ws](../../reference/config/bindings/tcp/README.md) -::: +> [More on binding-mqtt](../../reference/config/bindings/mqtt/README.md) | [More on binding-ws](../../reference/config/bindings/tcp/README.md) ### Service definition The service definition defines how the clients using this service will interact with Kafka through Zilla. The required set of Kafka topics are defined in the [options.topics](../../reference/config/bindings/mqtt-kafka/proxy.md#options-topics) where Zilla manages any MQTT required features. A client identity can be determined by pulling the identifier out of the topic using the [options.clients](../../reference/config/bindings/mqtt-kafka/proxy.md#options-clients) property. ```yaml{7-9,21} - + ``` -::: right -[More on binding-mqtt-kafka](../../reference/config/bindings/mqtt-kafka/README.md) -[More on topic data](../../concepts/kafka-proxies/mqtt-proxy.md#step-2-pub-sub-message-reflect-with-kafka) -::: +> [More on binding-mqtt-kafka](../../reference/config/bindings/mqtt-kafka/README.md) | [More on topic data](../../concepts/kafka-proxies/mqtt-proxy.md#step-2-pub-sub-message-reflect-with-kafka) Additionally, a route is defined to capture any "device" messages and route them to a specific topic called `mqtt-devices`. Here Zilla enables routing different topic patterns into one Kafka topic using MQTT supported wildcards. All other messages will use the default `exit` and end up in the `mqtt-messages` topic. ```yaml{4,5,7,8,10} - + ``` -::: right -[More on When a route matches](../../concepts/bindings.md#when-a-route-matches) -[More on mqtt-kafka binding routes](../../reference/config/bindings/mqtt-kafka/proxy.md#routes) -::: +> [More on When a route matches](../../concepts/bindings.md#when-a-route-matches) | [More on mqtt-kafka binding routes](../../reference/config/bindings/mqtt-kafka/proxy.md#routes) ### Add a Kafka sync layer The Zilla [cache_client](../../reference/config/bindings/kafka/cache_client.md) and [cache_server](../../reference/config/bindings/kafka/cache_server.md) helps manage the smooth data transfer between the service definition and Kafka. It is important to bootstrap the topics that will be brokering MQTT messages. ```yaml{11-13} - + ``` -::: right -[More on kafka binding cache](../../reference/config/bindings/kafka/README.md#cache-behavior) -::: +> [More on kafka binding cache](../../reference/config/bindings/kafka/README.md#cache-behavior) ### Point to a Running Kafka instance This will define the location and connection for Zilla to communicate with Kafka. ```yaml{7} - + ``` ::: details Full zilla.yaml ```yaml - + ``` ::: -::: right -[More on kafka cache_client binding](../../reference/config/bindings/kafka/cache_client.md) -::: +> [More on kafka cache_client binding](../../reference/config/bindings/kafka/cache_client.md) ### Start Zilla -With your `zilla.yaml` config, follow the [Zilla install instructions](../deploy-operate.md) using your method of choice. Set the necessary Kafka environment variables. - -::: code-tabs#bash - -@tab Docker - -```bash:no-line-numbers ---env KAFKA_BOOTSTRAP_SERVER="host.docker.internal:9092" -``` - -@tab Helm values.yaml - -```yaml:no-line-numbers -env: - KAFKA_BOOTSTRAP_SERVER: "kafka.zilla-kafka-broker.svc.cluster.local:9092" -``` - -::: +With your `zilla.yaml` config, follow the [Zilla install instructions](../deploy-operate/index.md) using your method of choice. Set the necessary `KAFKA_BOOTSTRAP_SERVER` environment variable to your running Kafka instance. ### Adding TLS diff --git a/src/how-tos/quickstart/echo.proto b/src/how-tos/quickstart/echo.proto deleted file mode 100644 index 56e685fc..00000000 --- a/src/how-tos/quickstart/echo.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; - -package example; - -service EchoService -{ - rpc Echo(EchoMessage) returns (EchoMessage); -} - -message EchoMessage -{ - string message = 1; -} diff --git a/src/how-tos/quickstart/index.md b/src/how-tos/quickstart/index.md index b8f4ac6a..c07a53be 100644 --- a/src/how-tos/quickstart/index.md +++ b/src/how-tos/quickstart/index.md @@ -50,7 +50,7 @@ Create a new message. type: http-kafka kind: proxy routes: - + ``` @tab Read:all @@ -62,7 +62,7 @@ Fetch all messages on the topic. type: http-kafka kind: proxy routes: - + ``` @tab Read:key @@ -74,7 +74,7 @@ Fetch one message by its key. type: http-kafka kind: proxy routes: - + ``` @tab Update @@ -86,7 +86,7 @@ Update a message based on its key. type: http-kafka kind: proxy routes: - + ``` @tab Delete @@ -98,7 +98,7 @@ Produce a blank message for a key. type: http-kafka kind: proxy routes: - + ``` @tab Stream:all @@ -110,7 +110,7 @@ Stream all of the messages published on a Kafka topic. type: sse-kafka kind: proxy routes: - + ``` @tab Stream:key @@ -122,7 +122,7 @@ Stream messages for a specific key published on a Kafka. type: sse-kafka kind: proxy routes: - + ``` ::: @@ -130,7 +130,7 @@ Stream messages for a specific key published on a Kafka. ::: details Full HTTP Proxy zilla.yaml Config ```yaml - + ``` ::: @@ -156,7 +156,7 @@ A Zilla MQTT broker is defined using three specific Kafka topics. The [messages] @tab MQTT broker ```yaml{10-12} - + ``` ::: @@ -164,7 +164,7 @@ A Zilla MQTT broker is defined using three specific Kafka topics. The [messages] ::: details Full MQTT proxy zilla.yaml Config ```yaml - + ``` ::: @@ -195,25 +195,25 @@ Zilla is routing all RouteGuide protobuf messages from any gRPC client to a gRPC @tab RouteGuide Server ```yaml{7,15,24,27-30} - + ... - + ... - + ``` @tab RouteGuide Remote Server ```yaml{9-11,14,21-22} - + ... - + ``` @tab route_guide.proto ```protobuf{18,26,32,38} - + ``` ::: @@ -221,7 +221,7 @@ Zilla is routing all RouteGuide protobuf messages from any gRPC client to a gRPC ::: details Full gRPC proxy zilla.yaml Config ```yaml - + ``` ::: @@ -232,12 +232,16 @@ Zilla is routing all RouteGuide protobuf messages from any gRPC client to a gRPC ## Run the Quickstart locally -You can see all of the features in the hosted Quickstart running locally. Download and run the Zilla [Quickstart](https://github.com/aklivity/zilla-examples/tree/main/quickstart). This [startup.sh](https://github.com/aklivity/zilla-examples/releases/latest/download/startup.sh) script will start Zilla and everything you need. +Download and run the Zilla `quickstart` cookbook using this install script. It will start Zilla and everything you need for this guide. -```bash:no-line-numbers +```bash wget -qO- https://raw.githubusercontent.com/aklivity/zilla-examples/main/startup.sh | sh - ``` +::: note +Alternatively, download [quickstart](https://github.com/aklivity/zilla-docs/releases/latest/download/quickstart.tar.gz) and follow the `README` yourself. +::: + The key components this script will setup: - Configured Zilla instance diff --git a/src/how-tos/telemetry/opentelemetry-protocol.md b/src/how-tos/telemetry/opentelemetry-protocol.md index 08c5a05b..8282c54d 100644 --- a/src/how-tos/telemetry/opentelemetry-protocol.md +++ b/src/how-tos/telemetry/opentelemetry-protocol.md @@ -195,7 +195,7 @@ bindings: Run the Demo using Docker Compose from inside the demo directory: -```bash:no-line-numbers +```bash docker compose up --force-recreate --remove-orphans --detach ``` @@ -215,7 +215,7 @@ The running `zillaproxy` is configured to expose kafka topics passed in through You can fetch all of the messages on the Kafka topic from a `curl` command. -```bash:no-line-numbers +```bash curl http://localhost:7114/orders ``` @@ -252,7 +252,7 @@ USD�����] Create lots of request with a `while` loop: -```bash:no-line-numbers +```bash while true; do; curl http://localhost:7114/orders; done ``` @@ -268,6 +268,6 @@ The `http.duration` metrics are being used to track the Latency and Requests Rat You have seen Zilla successfully export metrics to an `otlp` collector. You can teardown the demo using the below command. -```bash:no-line-numbers +```bash docker compose down ``` diff --git a/src/reference/config/bindings/amqp/README.md b/src/reference/config/bindings/amqp/README.md index df32eb9d..63ef1724 100644 --- a/src/reference/config/bindings/amqp/README.md +++ b/src/reference/config/bindings/amqp/README.md @@ -16,7 +16,7 @@ tag: Defines a binding with [AMQP 1.0](https://docs.oasis-open.org/amqp/core/v1.0/os/amqp-core-overview-v1.0-os.html) protocol support, with `server` behavior. Conditional routes based on the link address are used to route these application streams to an `exit` binding. ::: important Feature is in Incubator -Read how to [enable incubator features](../../../../how-tos/deploy-operate.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! +Read how to [enable incubator features](../../../../how-tos/deploy-operate/index.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! ::: ## server diff --git a/src/reference/config/bindings/amqp/server.md b/src/reference/config/bindings/amqp/server.md index d75c0418..13acd47d 100644 --- a/src/reference/config/bindings/amqp/server.md +++ b/src/reference/config/bindings/amqp/server.md @@ -11,7 +11,7 @@ The amqp server binding decodes the AMQP protocol on the inbound network stream, ``` ::: important Feature is in Incubator -Read how to [enable incubator features](../../../../how-tos/deploy-operate.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! +Read how to [enable incubator features](../../../../how-tos/deploy-operate/index.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! ::: ## Configuration (\* required) diff --git a/src/reference/config/bindings/ws/server.md b/src/reference/config/bindings/ws/server.md index 51e30d75..6ead2aa1 100644 --- a/src/reference/config/bindings/ws/server.md +++ b/src/reference/config/bindings/ws/server.md @@ -4,7 +4,7 @@ shortTitle: server # ws server -The ws server binding binding converts inbound `http` request-response streams into `ws` full-duplex streams. +The ws server binding converts inbound `http` request-response streams into `ws` full-duplex streams. ```yaml {3} diff --git a/src/reference/config/zilla-cli.md b/src/reference/config/zilla-cli.md index 3048dc3b..e654be36 100644 --- a/src/reference/config/zilla-cli.md +++ b/src/reference/config/zilla-cli.md @@ -37,7 +37,7 @@ The Zilla Runtime command line interface uses the [Zilla Runtime Configuration]( ### zilla dump ::: important Feature is in Incubator -Read how to [enable incubator features](../../how-tos/deploy-operate.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! +Read how to [enable incubator features](../../how-tos/deploy-operate/index.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! ::: The `zilla dump` command creates a `pcap` file that can be opened in Wireshark. Using the Zilla dissector plugin, Wireshark shows detailed information about the internal state of the current Zilla instance. @@ -56,13 +56,13 @@ Install the dissector plugin `zilla.lua` to the plugin directory of Wireshark. T To find the Wireshark plugin directory navigate the menu: About Wireshark -> Folders -> Personal Lua Plugins; or use this command: -```bash:no-line-numbers +```bash tshark -G folders | grep "Personal Lua Plugins" ``` To find out the plugin version navigate the menu: About Wireshark -> Plugins -> search: zilla; or use this command: -```bash:no-line-numbers +```bash tshark -G plugins | grep zilla ``` @@ -70,7 +70,7 @@ You may need to reload Lua plugins from the menu: Analyze -> Reload Lua Plugins Example: -```bash:no-line-numbers +```bash ./zilla dump -v -w zilla.pcap -i ~/.local/lib/wireshark/plugins ``` @@ -84,7 +84,7 @@ Write the `pcap` output to this file. Example: -```bash:no-line-numbers +```bash ./zilla dump -v -w zilla.pcap ``` @@ -92,13 +92,13 @@ Example: The `zilla help` command shows help information about available commands, or more information for a specific command. -```bash:no-line-numbers +```bash zilla help [command] ``` Examples: -```bash:no-line-numbers +```bash ./zilla help start ``` @@ -106,7 +106,7 @@ Examples: The `zilla version` command prints the version information of Zilla. -```bash:no-line-numbers +```bash zilla version ``` @@ -118,13 +118,13 @@ zilla version 0.9.85 The `zilla metrics` command provides metrics for each binding in the configuration. -```bash:no-line-numbers +```bash zilla metrics ``` Optionally specify a binding name to output metrics for that binding only. -```bash:no-line-numbers +```bash zilla metrics [binding-name] ``` @@ -134,7 +134,7 @@ Filters bindings by namespace Examples: -```bash:no-line-numbers +```bash ./zilla metrics echo_server ``` @@ -154,7 +154,7 @@ example echo_server stream.errors.sent 0 The `zilla start` command resolves the [Zilla Runtime Configuration](./overview.md) in a `zilla.yaml` to start the runtime engine. -```bash:no-line-numbers +```bash zilla start -ve ``` @@ -182,11 +182,11 @@ started Set the path to the local `zilla.yaml` configuration file or remote URI. -```bash:no-line-numbers +```bash zilla start -c ./path/to/zilla.yaml ``` -```bash:no-line-numbers +```bash zilla start -c http://example.com/zilla.yaml ``` @@ -196,7 +196,7 @@ zilla start -c http://example.com/zilla.yaml Log exception traces to `stdout`. -```bash:no-line-numbers +```bash zilla start -e ``` @@ -206,7 +206,7 @@ zilla start -e Set Zilla properties via a file. -```bash:no-line-numbers +```bash zilla start -p /path/to/zilla.properties ``` @@ -216,7 +216,7 @@ zilla start -p /path/to/zilla.properties Set individual Zilla properties. -```bash:no-line-numbers +```bash zilla start -P zilla.engine.prop=value -P zilla.other.thing=value ``` @@ -227,7 +227,7 @@ zilla start -P zilla.engine.prop=value -P zilla.other.thing=value Log verbose output to `stdout`. -```bash:no-line-numbers +```bash zilla start -v ``` @@ -238,7 +238,7 @@ zilla start -v Set the Worker count in Zilla. Defaults to the number of CPU cores available. -```bash:no-line-numbers +```bash zilla start -w 2 ``` @@ -246,7 +246,7 @@ zilla start -w 2 The `zilla stop` command signals the runtime engine to stop. -```bash:no-line-numbers +```bash zilla stop ``` @@ -255,16 +255,16 @@ zilla stop ### zilla tune ::: important Feature is in Incubator -Read how to [enable incubator features](../../how-tos/deploy-operate.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! +Read how to [enable incubator features](../../how-tos/deploy-operate/index.md#enable-incubator-features). Star and watch the [Zilla repo](https://github.com/aklivity/zilla/releases) for new releases! ::: The `zilla tune` command tunes the mapping from runtime engine workers to bindings. -```bash:no-line-numbers +```bash zilla tune [NAME=VALUE] ``` -```bash:no-line-numbers +```bash ./zilla tune ``` @@ -273,7 +273,7 @@ xxxx example.tcp xxxx example.echo ``` -```bash:no-line-numbers +```bash ./zilla tune example.echo=2 ``` @@ -281,7 +281,7 @@ xxxx example.echo ..x. example.echo ``` -```bash:no-line-numbers +```bash ./zilla tune ``` diff --git a/src/reference/manager/zpm-cli.md b/src/reference/manager/zpm-cli.md index 598fbaee..d0f0ba21 100644 --- a/src/reference/manager/zpm-cli.md +++ b/src/reference/manager/zpm-cli.md @@ -31,13 +31,13 @@ The Zilla Manager command line interface uses the [Zilla Manager Configuration]( The `zpm help` command shows help information about available commands, or more information for a specific command. -```bash:no-line-numbers +```bash zpm help [command] ``` Examples: -```bash:no-line-numbers +```bash ./zpmw help install ``` @@ -47,7 +47,7 @@ The `zpm clean` command removes files from its `.zpm/` output directory. Optionally, only the files necessary to execute the [Zilla Runtime](../config/zilla-cli.md) are kept intact, leaving a minimal installation footprint for deployment. -```bash:no-line-numbers +```bash zpm clean ``` @@ -59,7 +59,7 @@ Clean up everything except runtime image Examples: -```bash:no-line-numbers +```bash ./zpmw clean --keep-image ``` @@ -71,13 +71,13 @@ When Maven repositories requiring authorization are listed in [Zilla Manager Con If a master secret does not already exist, it is generated in [Zilla Manager Security](./overview.md#security.json). -```bash:no-line-numbers +```bash zpm encrypt ``` Examples: -```bash:no-line-numbers +```bash ./zpmw encrypt ``` @@ -87,7 +87,7 @@ Enter a password to `zpmw encrypt` via standard input, then the base64-encoded e The `zpm install` command resolves the [Zilla Manager Configuration](./overview.md) to create a runtime with minimal dependencies, generating the `zilla` runtime executable. -```bash:no-line-numbers +```bash zpm install ``` @@ -107,7 +107,7 @@ Exclude remote Maven repositories when resolving dependencies Examples: -```bash:no-line-numbers +```bash ./zpmw install ``` @@ -117,7 +117,7 @@ The `zpm wrap` command generates an executable `zpmw` wrapper script that automa This approach avoids the need to manually install `zpm` and allows greater control over the version of `zpm` being used. -```bash:no-line-numbers +```bash zpm wrap ``` @@ -139,7 +139,7 @@ Require `zpm` wrapper to use `` Example: -```bash:no-line-numbers +```bash ./zpmw wrap --version 0.9.8 ``` diff --git a/src/solutions/_partials/iot-ingest-control/verify-mqtt-client-connectivity.md b/src/solutions/_partials/iot-ingest-control/verify-mqtt-client-connectivity.md index 1104ca1f..1c4f8bf8 100644 --- a/src/solutions/_partials/iot-ingest-control/verify-mqtt-client-connectivity.md +++ b/src/solutions/_partials/iot-ingest-control/verify-mqtt-client-connectivity.md @@ -14,7 +14,7 @@ Replace these TLS server names accordingly for your own custom wildcard DNS patt Using [eclipse-mosquitto](https://hub.docker.com/_/eclipse-mosquitto) subscribe to the `zilla` topic. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_sub --url mqtts://mqtt.example.aklivity.io/zilla ``` @@ -23,7 +23,7 @@ mosquitto_sub --url mqtts://mqtt.example.aklivity.io/zilla In a separate session, publish a message on the `zilla` topic. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_pub --url mqtts://mqtt.example.aklivity.io/zilla --message 'Hello, world' ``` diff --git a/src/solutions/_partials/secure-public-access/create-topic.md b/src/solutions/_partials/secure-public-access/create-topic.md index b0bb1711..3a2c0ac2 100644 --- a/src/solutions/_partials/secure-public-access/create-topic.md +++ b/src/solutions/_partials/secure-public-access/create-topic.md @@ -1,5 +1,5 @@ Use the Kafka client to create a topic called `zilla-proxy-test`, replacing `` in the command below with the TLS proxy names of your proxy: -```bash:no-line-numbers +```bash bin/kafka-topics.sh --create --topic zilla-proxy-test --partitions 3 --replication-factor 3 --command-config client.properties --bootstrap-server ``` diff --git a/src/solutions/_partials/secure-public-access/send-message.md b/src/solutions/_partials/secure-public-access/send-message.md index 892a8dbe..b41ce5fc 100644 --- a/src/solutions/_partials/secure-public-access/send-message.md +++ b/src/solutions/_partials/secure-public-access/send-message.md @@ -2,7 +2,7 @@ Publish two messages to the newly created topic via the following producer command: -```bash:no-line-numbers +```bash bin/kafka-console-producer.sh --topic zilla-proxy-test --producer.config client.properties --broker-list ``` @@ -17,7 +17,7 @@ A prompt will appear for you to type in the messages: Read these messages back via the following consumer command: -```bash:no-line-numbers +```bash bin/kafka-console-consumer.sh --topic zilla-proxy-test --from-beginning --consumer.config client.properties --bootstrap-server ``` diff --git a/src/solutions/_partials/secure-public-access/verify-kafka-connect.md b/src/solutions/_partials/secure-public-access/verify-kafka-connect.md index 6abb48a4..6627b973 100644 --- a/src/solutions/_partials/secure-public-access/verify-kafka-connect.md +++ b/src/solutions/_partials/secure-public-access/verify-kafka-connect.md @@ -4,13 +4,13 @@ To verify that we have successfully enabled public internet connectivity to our First, we must install a Java runtime that can be used by the Kafka client. -```bash:no-line-numbers +```bash sudo yum install java-1.8.0 ``` Now we are ready to install the Kafka client: -```bash:no-line-numbers +```bash wget https://archive.apache.org/dist/kafka/2.8.0/kafka_2.13-2.8.0.tgz tar -xzf kafka_2.13-2.8.0.tgz cd kafka_2.13-2.8.0 diff --git a/src/solutions/_partials/zilla-plus-proxy/verify-zilla-plus-proxy-service.md b/src/solutions/_partials/zilla-plus-proxy/verify-zilla-plus-proxy-service.md index 3ad388e3..40827caa 100644 --- a/src/solutions/_partials/zilla-plus-proxy/verify-zilla-plus-proxy-service.md +++ b/src/solutions/_partials/zilla-plus-proxy/verify-zilla-plus-proxy-service.md @@ -14,7 +14,7 @@ They each have an IAM Role name starting with `my-zilla-iot-role`. Find the `Public IPv4 Address` and then SSH into the instance. -```bash:no-line-numbers +```bash ssh -i ~/.ssh/ ec2-user@ ``` @@ -26,7 +26,7 @@ After logging in via SSH, check the status of the `zilla-plus` system service. Verify that the `zilla-plus` service is active and logging output similar to that shown below. -```bash:no-line-numbers +```bash systemctl status zilla-plus.service ``` @@ -40,7 +40,7 @@ zilla-plus.service - Zilla Plus Check for the active ports with `netstat`. -```bash:no-line-numbers +```bash netstat -ntlp ``` @@ -52,7 +52,7 @@ tcp6 0 0 :::9092 :::* LISTEN 1726/.zpm/image/bin You can get an stdout dump of the `zilla-plus.service` using `journalctl`. -```bash:no-line-numbers +```bash journalctl -e -u zilla-plus.service | tee -a /tmp/zilla.log ``` @@ -65,7 +65,7 @@ systemd[1]: Started zilla-plus.service - Zilla Plus. All output from cloud-init is captured by default to `/var/log/cloud-init-output.log`. There shouldn't be any errors in this log. -```bash:no-line-numbers +```bash cat /var/log/cloud-init-output.log ``` diff --git a/src/solutions/how-tos/amazon-msk/secure-public-access/development.md b/src/solutions/how-tos/amazon-msk/secure-public-access/development.md index 584c30ef..e79da95e 100644 --- a/src/solutions/how-tos/amazon-msk/secure-public-access/development.md +++ b/src/solutions/how-tos/amazon-msk/secure-public-access/development.md @@ -281,7 +281,7 @@ They each have an IAM Role name starting with `aklivity-zilla-proxy`. Find the `Public IPv4 Address` and then SSH into the instance. -```bash:no-line-numbers +```bash ssh -i ~/.ssh/ ec2-user@ ``` @@ -293,7 +293,7 @@ After logging in via SSH, check the status of the `zilla-plus` system service. Verify that the `zilla-plus` service is active and logging output similar to that shown below. -```bash:no-line-numbers +```bash systemctl status zilla-plus.service ``` @@ -307,7 +307,7 @@ zilla-plus.service - Zilla Plus Check for the active ports with `netstat`. -```bash:no-line-numbers +```bash netstat -ntlp ``` @@ -319,7 +319,7 @@ tcp6 0 0 :::9092 :::* LISTEN 1726/.zpm/image/bin You can get an stdout dump of the `zilla-plus.service` using `journalctl`. -```bash:no-line-numbers +```bash journalctl -e -u zilla-plus.service | tee -a /tmp/zilla.log ``` @@ -332,7 +332,7 @@ systemd[1]: Started zilla-plus.service - Zilla Plus. All output from cloud-init is captured by default to `/var/log/cloud-init-output.log`. There shouldn't be any errors in this log. -```bash:no-line-numbers +```bash cat /var/log/cloud-init-output.log ``` @@ -350,7 +350,7 @@ Check the networking of the proxy instances to MSK. Verify that the instance can resolve the private Route53 DNS address. -```bash:no-line-numbers +```bash nslookup *.aklivity.[...].amazonaws.com ``` @@ -367,7 +367,7 @@ Address: *** Check the communication over necessary ports with `netcat`. -```bash:no-line-numbers +```bash nc -vz *.aklivity.[...].amazonaws.com 9096 ``` @@ -387,7 +387,7 @@ Repeat these steps for each of the other proxies launched by the Cl Import the private CA certificate into your trust store. -```bash:no-line-numbers +```bash keytool -importcert -keystore /tmp/kafka.client.truststore.jks -storetype jks -storepass generated -alias pca -file Certificate.pem ``` @@ -424,7 +424,7 @@ Make sure you have selected the desired region, ex: `US East (N. Virginia) us-ea In the stack `Outputs` tab, find the public DNS name of the `NetworkLoadBalancer`, and lookup the public IP addresses, as shown in the following example. -```bash:no-line-numbers +```bash nslookup my-pu-Netwo-xxxxxxxxxxxx-yyyyyyyyyyyyyyyy.elb.us-east-1.amazonaws.com ``` diff --git a/src/solutions/how-tos/amazon-msk/secure-public-access/production-mutual-tls.md b/src/solutions/how-tos/amazon-msk/secure-public-access/production-mutual-tls.md index b3de2210..716f9b20 100644 --- a/src/solutions/how-tos/amazon-msk/secure-public-access/production-mutual-tls.md +++ b/src/solutions/how-tos/amazon-msk/secure-public-access/production-mutual-tls.md @@ -312,7 +312,7 @@ They each have an IAM Role name starting with `aklivity-zilla-proxy`. Find the `Public IPv4 Address` and then SSH into the instance. -```bash:no-line-numbers +```bash ssh -i ~/.ssh/ ec2-user@ ``` @@ -324,7 +324,7 @@ After logging in via SSH, check the status of the `zilla-plus` system service. Verify that the `zilla-plus` service is active and logging output similar to that shown below. -```bash:no-line-numbers +```bash systemctl status zilla-plus.service ``` @@ -338,7 +338,7 @@ zilla-plus.service - Zilla Plus Check for the active ports with `netstat`. -```bash:no-line-numbers +```bash netstat -ntlp ``` @@ -350,7 +350,7 @@ tcp6 0 0 :::9092 :::* LISTEN 1726/.zpm/image/bin You can get an stdout dump of the `zilla-plus.service` using `journalctl`. -```bash:no-line-numbers +```bash journalctl -e -u zilla-plus.service | tee -a /tmp/zilla.log ``` @@ -363,7 +363,7 @@ systemd[1]: Started zilla-plus.service - Zilla Plus. All output from cloud-init is captured by default to `/var/log/cloud-init-output.log`. There shouldn't be any errors in this log. -```bash:no-line-numbers +```bash cat /var/log/cloud-init-output.log ``` @@ -381,7 +381,7 @@ Check the networking of the proxy instances to MSK. Verify that the instance can resolve the private Route53 DNS address. -```bash:no-line-numbers +```bash nslookup *.aklivity.[...].amazonaws.com ``` @@ -398,7 +398,7 @@ Address: *** Check the communication over necessary ports with `netcat`. -```bash:no-line-numbers +```bash nc -vz *.aklivity.[...].amazonaws.com 9094 ``` @@ -424,7 +424,7 @@ With the Kaka client now installed we are ready to configure it and point it at We need to import the trusted client certificate and corresponding private key into the local key store used by the Kafka client when connecting to the proxy. -```bash:no-line-numbers +```bash openssl pkcs12 -export -in client-1.cert -inkey client-1.pkcs8.key.pem -out client-1.p12 -name client-1 keytool -importkeystore -destkeystore /tmp/kafka.client.keystore.jks -deststorepass generated -srckeystore client-1.p12 -srcstoretype PKCS12 -srcstorepass generated -alias client-1 ``` diff --git a/src/solutions/how-tos/amazon-msk/secure-public-access/production.md b/src/solutions/how-tos/amazon-msk/secure-public-access/production.md index acda08a5..70e9e4a9 100644 --- a/src/solutions/how-tos/amazon-msk/secure-public-access/production.md +++ b/src/solutions/how-tos/amazon-msk/secure-public-access/production.md @@ -282,7 +282,7 @@ They each have an IAM Role name starting with `aklivity-zilla-proxy`. Find the `Public IPv4 Address` and then SSH into the instance. -```bash:no-line-numbers +```bash ssh -i ~/.ssh/ ec2-user@ ``` @@ -294,7 +294,7 @@ After logging in via SSH, check the status of the `zilla-plus` system service. Verify that the `zilla-plus` service is active and logging output similar to that shown below. -```bash:no-line-numbers +```bash systemctl status zilla-plus.service ``` @@ -308,7 +308,7 @@ zilla-plus.service - Zilla Plus Check for the active ports with `netstat`. -```bash:no-line-numbers +```bash netstat -ntlp ``` @@ -320,7 +320,7 @@ tcp6 0 0 :::9092 :::* LISTEN 1726/.zpm/image/bin You can get an stdout dump of the `zilla-plus.service` using `journalctl`. -```bash:no-line-numbers +```bash journalctl -e -u zilla-plus.service | tee -a /tmp/zilla.log ``` @@ -333,7 +333,7 @@ systemd[1]: Started zilla-plus.service - Zilla Plus. All output from cloud-init is captured by default to `/var/log/cloud-init-output.log`. There shouldn't be any errors in this log. -```bash:no-line-numbers +```bash cat /var/log/cloud-init-output.log ``` @@ -351,7 +351,7 @@ Check the networking of the proxy instances to MSK. Verify that the instance can resolve the private Route53 DNS address. -```bash:no-line-numbers +```bash nslookup *.aklivity.[...].amazonaws.com ``` @@ -368,7 +368,7 @@ Address: *** Check the communication over necessary ports with `netcat`. -```bash:no-line-numbers +```bash nc -vz *.aklivity.[...].amazonaws.com 9096 ``` diff --git a/src/solutions/how-tos/aws-services/create-client-certificate-acm.md b/src/solutions/how-tos/aws-services/create-client-certificate-acm.md index 92d4afd2..0453380b 100644 --- a/src/solutions/how-tos/aws-services/create-client-certificate-acm.md +++ b/src/solutions/how-tos/aws-services/create-client-certificate-acm.md @@ -107,7 +107,7 @@ Note the ARN of the private certificate authority. We need to create a new key that will be used with the certificate, and store the key in `pkcs8` format. In this example we will be creating the key for a client certificate with `client-1` as the common name. -```bash:no-line-numbers +```bash openssl genrsa -out client-1.key.pem 4096 openssl pkcs8 -topk8 -nocrypt -in client-1.key.pem -out client-1.pkcs8.pem ``` @@ -116,7 +116,7 @@ openssl pkcs8 -topk8 -nocrypt -in client-1.key.pem -out client-1.pkcs8.pem Next we need to create a certificate corresponding to the key, with metadata about the owner of the certificate and the common name. This is done by first creating a certificate signing request. -```bash:no-line-numbers +```bash openssl req -new -key client-1.key.pem -out client-1.csr ``` @@ -152,7 +152,7 @@ Now that the certificate signing request has been prepared, it can be used to is In this example, we issue the certificate to be valid for `365 days`. You should choose a validity period that best suits your specific use case. -```bash:no-line-numbers +```bash aws acm-pca issue-certificate \ --region us-east-1 \ --certificate-authority-arn \ @@ -172,7 +172,7 @@ This command returns the ARN of the newly signed certificate. Now the signed certificate can be retrieved from AWS Private Certificate Authority using the . -```bash:no-line-numbers +```bash aws acm-pca get-certificate \ --region us-east-1 \ --certificate-arn @@ -186,7 +186,7 @@ This returns the public signed client certificate chain associated with the clie Now we need to create the secret value using the `pkcs8` encoded private key as the secret value and with secret tags `certificate-authority-arn` referencing the private certificate authority, and `certificate-arn` referencing the newly signed certificate. -```bash:no-line-numbers +```bash aws secretsmanager create-secret \ --region us-east-1 \ --name "client-1" \ diff --git a/src/solutions/how-tos/aws-services/create-server-certificate-acm.md b/src/solutions/how-tos/aws-services/create-server-certificate-acm.md index 1e97d24a..f3d515e6 100644 --- a/src/solutions/how-tos/aws-services/create-server-certificate-acm.md +++ b/src/solutions/how-tos/aws-services/create-server-certificate-acm.md @@ -20,7 +20,7 @@ Note the ARN of the private certificate authority. We need to create a new key that will be used with the certificate, and store the key in `pkcs8` format. In this example we will be creating the key for a wildcard certificate with `*.aklivity.example.com` as the common name. -```bash:no-line-numbers +```bash openssl genrsa -out wildcard.aklivity.example.com.key.pem 4096 openssl pkcs8 -topk8 -nocrypt -in wildcard.aklivity.example.com.key.pem -out wildcard.aklivity.example.com.pkcs8.pem ``` @@ -29,7 +29,7 @@ openssl pkcs8 -topk8 -nocrypt -in wildcard.aklivity.example.com.key.pem -out wil Next we need to create a certificate corresponding to the key, with metadata about the owner of the certificate and the common name. This is done by first creating a certificate signing request. -```bash:no-line-numbers +```bash openssl req -new -key wildcard.aklivity.example.com.key.pem -out wildcard.aklivity.example.com.csr ``` @@ -65,7 +65,7 @@ Now that the certificate signing request has been prepared, it can be used to is In this example, we issue the certificate to be valid for `365 days`. You should choose a validity period that best suits your specific use case. -```bash:no-line-numbers +```bash aws acm-pca issue-certificate \ --region us-east-1 \ --certificate-authority-arn \ @@ -99,7 +99,7 @@ make sure that you have retrieved and set [your AWS credentials](https://aws.ama Now we need to create the secret value using the `pkcs8` encoded private key as the secret value and with secret tags `certificate-authority-arn` referencing the private certificate authority, and `certificate-arn` referencing the newly signed certificate. -```bash:no-line-numbers +```bash aws secretsmanager create-secret \ --region us-east-1 \ --name "wildcard.aklivity.example.com" \ diff --git a/src/solutions/how-tos/aws-services/create-server-certificate-letsencrypt.md b/src/solutions/how-tos/aws-services/create-server-certificate-letsencrypt.md index 2925d180..b1f4107b 100644 --- a/src/solutions/how-tos/aws-services/create-server-certificate-letsencrypt.md +++ b/src/solutions/how-tos/aws-services/create-server-certificate-letsencrypt.md @@ -12,17 +12,17 @@ Follow the [Launch EC2 Instance](./launch-ec2-instance.md) guide to launch an Am After logging into the launched EC2 instance via SSH, install `certbot` to interact with [LetsEncrypt](https://letsencrypt.org/). -```bash:no-line-numbers +```bash sudo amazon-linux-extras install -y epel ``` -```bash:no-line-numbers +```bash sudo yum install -y certbot ``` Then issue the wildcard certificate such as `*.example.aklivity.io`. -```bash:no-line-numbers +```bash sudo certbot -d *.example.aklivity.io --manual --preferred-challenges dns --key-type rsa certonly ``` @@ -39,7 +39,7 @@ When `certbot` completes, the relevant files for the certificate chain and priva Now we need to prepare the secret value by combining these together: -```bash:no-line-numbers +```bash touch wildcard.example.aklivity.io.pem sudo cat /etc/letsencrypt/live/example.aklivity.io/privkey.pem >> wildcard.example.aklivity.io.pem sudo cat /etc/letsencrypt/live/example.aklivity.io/fullchain.pem >> wildcard.example.aklivity.io.pem @@ -47,7 +47,7 @@ sudo cat /etc/letsencrypt/live/example.aklivity.io/fullchain.pem >> wildcard.exa Then we can create the secret, for example: -```bash:no-line-numbers +```bash aws secretsmanager create-secret \ --region us-east-1 \ --name wildcard.example.aklivity.io \ diff --git a/src/solutions/how-tos/aws-services/launch-ec2-instance.md b/src/solutions/how-tos/aws-services/launch-ec2-instance.md index 908f439b..570951a3 100644 --- a/src/solutions/how-tos/aws-services/launch-ec2-instance.md +++ b/src/solutions/how-tos/aws-services/launch-ec2-instance.md @@ -83,6 +83,6 @@ Select your recently launched EC2 instance to see the instance details. Copy the Execute the following `ssh` command to access your EC2 instance. -```bash:no-line-numbers +```bash ssh -i ~/.ssh/.pem ec2-user@ ``` diff --git a/src/solutions/how-tos/aws-services/troubleshooting.md b/src/solutions/how-tos/aws-services/troubleshooting.md index 5fbec17e..7329167d 100644 --- a/src/solutions/how-tos/aws-services/troubleshooting.md +++ b/src/solutions/how-tos/aws-services/troubleshooting.md @@ -59,7 +59,7 @@ Make sure your DNS and `client.properties` are configured correctly, then try ag You can verify TCP connectivity using the following command: -```bash:no-line-numbers +```bash nc -v 9094 ``` @@ -73,7 +73,7 @@ Also, `nc` should stay connected. You can verify TLS connectivity with client key and signed client certificate using the following command: -```bash:no-line-numbers +```bash openssl s_client \ -connect :9094 \ -servername \ @@ -83,7 +83,7 @@ openssl s_client \ Note: if you followed [Create Server Certificate](./create-server-certificate-acm.md) to create the server certificate instead of [Create Server Certificate (LetsEncrypt)](./create-server-certificate-letsencrypt.md), then you will need to [Export the CA Certificate](./create-certificate-authority-acm.md#export-the-ca-certificate) and have `openssl` trust the exported CA certificate. -```bash:no-line-numbers +```bash openssl s_client \ -connect :9094 \ -servername \ @@ -106,7 +106,7 @@ Note: If the backend TLS handshake from proxy-to-msk fails for any reason, then You can verify Kafka connectivity with client key and signed client certificate using the following command: -```bash:no-line-numbers +```bash kcat \ -L \ -b :9094 \ @@ -117,7 +117,7 @@ kcat \ Note: if you followed [Create Server Certificate](./create-server-certificate-acm.md) to create the server certificate instead of [Create Server Certificate (LetsEncrypt)](./create-server-certificate-letsencrypt.md), then you will need to [Export the CA Certificate](./create-certificate-authority-acm.md#export-the-ca-certificate) and have `kcat` trust the exported CA certificate. -```bash:no-line-numbers +```bash kcat \ -L \ -b :9094 \ diff --git a/src/solutions/how-tos/confluent-cloud/secure-public-access.md b/src/solutions/how-tos/confluent-cloud/secure-public-access.md index c88136e9..69205890 100644 --- a/src/solutions/how-tos/confluent-cloud/secure-public-access.md +++ b/src/solutions/how-tos/confluent-cloud/secure-public-access.md @@ -312,7 +312,7 @@ They each have an IAM Role name starting with `aklivity-zilla-proxy`. Find the `Public IPv4 Address` and then SSH into the instance. -```bash:no-line-numbers +```bash ssh -i ~/.ssh/ ec2-user@ ``` @@ -324,7 +324,7 @@ After logging in via SSH, check the status of the `zilla-plus` system service. Verify that the `zilla-plus` service is active and logging output similar to that shown below. -```bash:no-line-numbers +```bash systemctl status zilla-plus.service ``` @@ -338,7 +338,7 @@ zilla-plus.service - Zilla Plus Check for the active ports with `netstat`. -```bash:no-line-numbers +```bash netstat -ntlp ``` @@ -350,7 +350,7 @@ tcp6 0 0 :::9092 :::* LISTEN 1726/.zpm/image/bin You can get an stdout dump of the `zilla-plus.service` using `journalctl`. -```bash:no-line-numbers +```bash journalctl -e -u zilla-plus.service | tee -a /tmp/zilla.log ``` @@ -363,7 +363,7 @@ systemd[1]: Started zilla-plus.service - Zilla Plus. All output from cloud-init is captured by default to `/var/log/cloud-init-output.log`. There shouldn't be any errors in this log. -```bash:no-line-numbers +```bash cat /var/log/cloud-init-output.log ``` @@ -381,7 +381,7 @@ Check the networking of the proxy instances to confluent cloud. Verify that the instance can resolve the private Route53 DNS address. -```bash:no-line-numbers +```bash nslookup ..aws.private.confluent.cloud ``` @@ -398,7 +398,7 @@ Address: *** Check the communication over necessary ports with `netcat`. -```bash:no-line-numbers +```bash nc -vz ..aws.private.confluent.cloud 9092 ``` @@ -461,7 +461,7 @@ Replace these TLS bootstrap server names accordingly for your own custom wildcar Use the Kafka client to create a topic called `zilla-proxy-test`, replacing `` in the command below with the TLS proxy names of your proxy: -```bash:no-line-numbers +```bash bin/kafka-topics.sh \ --create \ --topic zilla-plus-test \ @@ -486,7 +486,7 @@ bin/kafka-topics.sh \ Publish two messages to the newly created topic via the following producer command: -```bash:no-line-numbers +```bash bin/kafka-console-producer.sh \ --topic zilla-plus-test \ --producer.config confluent.properties \ @@ -504,7 +504,7 @@ A prompt will appear for you to type in the messages: Read these messages back via the following consumer command: -```bash:no-line-numbers +```bash bin/kafka-console-consumer.sh \ --topic zilla-plus-test \ --from-beginning \ diff --git a/src/tutorials/grpc/grpc-intro.md b/src/tutorials/grpc/grpc-intro.md index 36979f43..3934fb05 100644 --- a/src/tutorials/grpc/grpc-intro.md +++ b/src/tutorials/grpc/grpc-intro.md @@ -38,20 +38,20 @@ Create each of these files `zilla.yaml`, `docker-compose.yaml`, and `echo.proto` ### Run Zilla and Kafka -```bash:no-line-numbers +```bash docker-compose up --detach ``` ### Send a greeting -```bash:no-line-numbers +```bash docker run -v ./echo.proto:/proto/echo.proto -it --rm fullstorydev/grpcurl \ -plaintext -proto proto/echo.proto -d '{"message":"Hello World"}' host.docker.internal:7151 example.EchoService.EchoSimple ``` ### Remove the running containers -```bash:no-line-numbers +```bash docker-compose down ``` diff --git a/src/tutorials/mqtt/mqtt-intro.md b/src/tutorials/mqtt/mqtt-intro.md index 63f4b780..07b650e9 100644 --- a/src/tutorials/mqtt/mqtt-intro.md +++ b/src/tutorials/mqtt/mqtt-intro.md @@ -32,7 +32,7 @@ Create these files, `zilla.yaml` and `docker-compose.yaml`, in the same director ### Run Zilla and Kafka -```bash:no-line-numbers +```bash docker-compose up --detach ``` @@ -40,21 +40,21 @@ docker-compose up --detach Using [eclipse-mosquitto](https://hub.docker.com/_/eclipse-mosquitto) subscribe to the `zilla` topic. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_sub --url mqtt://host.docker.internal:7183/zilla ``` In a separate session, publish a message on the `zilla` topic. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_pub --url mqtt://host.docker.internal:7183/zilla --message 'Hello, world' ``` Send messages with the retained flag. -```bash:no-line-numbers +```bash docker run -it --rm eclipse-mosquitto \ mosquitto_pub --url mqtt://host.docker.internal:7183/zilla --message 'Hello, retained' --retain ``` @@ -63,7 +63,7 @@ Then restart the `mosquitto_sub` above. The latest retained message is delivered ### Remove the running containers -```bash:no-line-numbers +```bash docker-compose down ``` diff --git a/src/tutorials/rest/rest-intro.md b/src/tutorials/rest/rest-intro.md index 27490c7e..bae96fd5 100644 --- a/src/tutorials/rest/rest-intro.md +++ b/src/tutorials/rest/rest-intro.md @@ -35,19 +35,19 @@ Create these files, `zilla.yaml` and `docker-compose.yaml`, in the same director ## Run Zilla and Kafka -```bash:no-line-numbers +```bash docker-compose up --detach ``` ## Use `curl` to send a greeting -```bash:no-line-numbers +```bash curl -X POST http://localhost:7114/items -H 'Content-Type: application/json' -H 'Idempotency-Key: 1234' -d '{"greeting":"Hello, world"}' ``` ## Use `curl` to list all of the greetings -```bash:no-line-numbers +```bash curl http://localhost:7114/items ``` @@ -57,7 +57,7 @@ curl http://localhost:7114/items ## Remove the running containers -```bash:no-line-numbers +```bash docker-compose down ``` @@ -73,5 +73,4 @@ Try out more HTTP Kafka examples: - [http.kafka.cache](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.cache) - [http.kafka.crud](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.crud) - [http.kafka.oneway](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.oneway) -- [http.kafka.sasl.scram](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sasl.scram) - [http.kafka.sync](https://github.com/aklivity/zilla-examples/tree/main/http.kafka.sync) diff --git a/src/tutorials/sse/sse-intro.md b/src/tutorials/sse/sse-intro.md index 27aa11b4..7f1cc790 100644 --- a/src/tutorials/sse/sse-intro.md +++ b/src/tutorials/sse/sse-intro.md @@ -36,7 +36,7 @@ Create these files, `zilla.yaml`, `docker-compose.yaml` and `index.html`, in the ## Run Zilla and Kafka -```bash:no-line-numbers +```bash docker-compose up --detach ``` @@ -59,7 +59,7 @@ open: - Remove the running containers -```bash:no-line-numbers +```bash docker-compose down ``` diff --git a/src/tutorials/telemetry/telemetry-intro.md b/src/tutorials/telemetry/telemetry-intro.md index 1b334f60..27e7b2ee 100644 --- a/src/tutorials/telemetry/telemetry-intro.md +++ b/src/tutorials/telemetry/telemetry-intro.md @@ -24,13 +24,13 @@ Running this Zilla sample will collect basic metrics for an http service. Run the Zilla docker image as a daemon with the `zilla.yaml` file volume mounted. -```bash:no-line-numbers +```bash ``` ### Send an HTTP POST -```bash:no-line-numbers +```bash ``` @@ -40,7 +40,7 @@ Hello, world ### Viewing Standard Out Logs -```bash:no-line-numbers +```bash ``` @@ -54,7 +54,7 @@ Metrics-example.north_http_server [08/May/2024:18:46:14 +0000] REQUEST_ACCEPTED Go to to see the collected data or run the below `curl` command. -```bash:no-line-numbers +```bash ``` @@ -78,13 +78,15 @@ http_response_size_bytes_sum{namespace="Metrics-example",binding="http_server"} Try out the other [Zilla examples](https://github.com/aklivity/zilla-examples). +Check out our other guides: + - [Push to an OTLP Collector](../../how-tos/telemetry/opentelemetry-protocol.md) -- [kubernetes.prometheus.autoscale](https://github.com/aklivity/zilla-examples/tree/main/kubernetes.prometheus.autoscale) +- [Autoscaling on K8s](../../how-tos/deploy-operate/autoscale-k8s.md) ## Clean up Remove the running container -```bash:no-line-numbers +```bash ``` From c161fdced2803c7673917068ae56c92cb8dbf6c0 Mon Sep 17 00:00:00 2001 From: jfallows Date: Fri, 8 Nov 2024 21:50:21 +0000 Subject: [PATCH 4/4] CI: update version to 2.1.11 --- deploy-versions.json | 2 +- package.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy-versions.json b/deploy-versions.json index f0f6e7d0..d3ba5e88 100644 --- a/deploy-versions.json +++ b/deploy-versions.json @@ -1 +1 @@ -[{"text":"Latest","icon":"fas fa-home","key":"latest","tag":"v2.1.10"}] +[{"text":"Latest","icon":"fas fa-home","key":"latest","tag":"v2.1.11"}] diff --git a/package.json b/package.json index cca449ff..77a6aba6 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "zilla-docs", "type": "module", - "version": "2.1.10", + "version": "2.1.11", "description": "The official documentation for the aklivity/zilla open-source project", "keywords": [], "author": "aklivity.io",