From 059dc73931a90d0ccb1dd498b9e73335a3b4db0b Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Mon, 2 Dec 2024 08:45:31 +1100 Subject: [PATCH 1/3] Redis to valkey (examples) (#1850) --- .../config/docker-compose.yaml | 10 +- docs/src/SUMMARY.md | 6 +- docs/src/examples/redis-clustering-aware.md | 123 ------------ docs/src/examples/redis-clustering-unaware.md | 187 ------------------ docs/src/examples/valkey-clustering-aware.md | 123 ++++++++++++ .../src/examples/valkey-clustering-unaware.md | 88 +++++++++ docs/src/user-guide/getting-started.md | 2 +- docs/src/user-guide/introduction.md | 15 +- .../valkey-cache/docker-compose.yaml | 8 +- .../log-to-file/docker-compose.yaml | 7 +- .../tests/transforms/log_to_file.rs | 10 +- .../valkey_int_tests/basic_driver_tests.rs | 11 +- shotover-proxy/tests/valkey_int_tests/mod.rs | 2 +- shotover/src/frame/mod.rs | 2 +- shotover/src/lib.rs | 2 +- 15 files changed, 243 insertions(+), 353 deletions(-) delete mode 100644 docs/src/examples/redis-clustering-aware.md delete mode 100644 docs/src/examples/redis-clustering-unaware.md create mode 100644 docs/src/examples/valkey-clustering-aware.md create mode 100644 docs/src/examples/valkey-clustering-unaware.md diff --git a/custom-transforms-example/config/docker-compose.yaml b/custom-transforms-example/config/docker-compose.yaml index 24483c51f..f8891e78b 100644 --- a/custom-transforms-example/config/docker-compose.yaml +++ b/custom-transforms-example/config/docker-compose.yaml @@ -1,8 +1,8 @@ services: - redis-one: - image: library/redis:5.0.9 + valkey-one: + image: bitnami/valkey:7.2.5-debian-12-r9 ports: - "1111:6379" - volumes: - - ./redis.conf:/usr/local/etc/redis/redis.conf - command: [ "redis-server", "/usr/local/etc/redis/redis.conf" ] + environment: + ALLOW_EMPTY_PASSWORD: "yes" + VALKEY_TLS_ENABLED: "no" diff --git a/docs/src/SUMMARY.md b/docs/src/SUMMARY.md index 2e704bfab..a6cb0c44c 100644 --- a/docs/src/SUMMARY.md +++ b/docs/src/SUMMARY.md @@ -11,9 +11,9 @@ - [Sources](./sources.md) - [Transforms](./transforms.md) - [Examples]() - - [Redis Cluster]() - - [Unaware client](./examples/redis-clustering-unaware.md) - - [Aware client](./examples/redis-clustering-aware.md) + - [Valkey Cluster]() + - [Unaware client](./examples/valkey-clustering-unaware.md) + - [Aware client](./examples/valkey-clustering-aware.md) - [Cassandra Cluster]() - [Shotover sidecars](./examples/cassandra-cluster-shotover-sidecar.md) - [Contributing](./dev-docs/contributing.md) diff --git a/docs/src/examples/redis-clustering-aware.md b/docs/src/examples/redis-clustering-aware.md deleted file mode 100644 index c78e31039..000000000 --- a/docs/src/examples/redis-clustering-aware.md +++ /dev/null @@ -1,123 +0,0 @@ -# Redis Clustering with cluster aware client - -The following guide shows you how to configure Shotover to support proxying Redis cluster *aware* clients to [Redis cluster](https://redis.io/topics/cluster-spec). - -## Overview - -In this example, we will be connecting to a Redis cluster that has the following topology: - -* `172.16.1.2:6379` -* `172.16.1.3:6379` -* `172.16.1.4:6379` -* `172.16.1.5:6379` -* `172.16.1.6:6379` -* `172.16.1.7:6379` - -Shotover will be deployed as a sidecar to each node in the Redis cluster, listening on `6380`. Use the following [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/redis-cluster-1-1/docker-compose.yaml) to run the Redis cluster and Shotover sidecars. - -```console -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/docker-compose.yaml --output docker-compose.yaml -``` - -Below we can see an example of a Redis node and it's Shotover sidecar. Notice they are running on the same network address (`172.16.1.2`) and the present directory is being mounted to allow Shotover to access the config and topology files. - -```YAML - -redis-node-0: - image: bitnami/redis-cluster:6.2.12-debian-11-r26 - networks: - cluster_subnet: - ipv4_address: 172.16.1.2 - environment: - - 'ALLOW_EMPTY_PASSWORD=yes' - - 'REDIS_NODES=redis-node-0 redis-node-1 redis-node-2' - -shotover-0: - restart: always - depends_on: - - redis-node-0 - image: shotover/shotover-proxy - network_mode: "service:redis-node-0" - volumes: - - type: bind - source: $PWD - target: /config - -``` - -In this example we will use `redis-benchmark` with cluster mode enabled as our Redis cluster aware client application. - -## Configuration - -First we will modify our `topology.yaml` file to have a single Redis source. This will: - -* Define how Shotover listens for incoming connections from our client application (`redis-benchmark`). -* Configure Shotover to connect to the Redis node via our defined remote address. -* Configure Shotover to rewrite all Redis ports with our Shotover port when the cluster aware driver is talking to the cluster, through Shotover. -* Connect our Redis Source to our Redis cluster sink (transform). - -```yaml ---- -sources: - - Redis: - name: "redis" - listen_addr: "0.0.0.0:6380" - chain: - - RedisClusterPortsRewrite: - new_port: 6380 - - RedisSinkSingle: - remote_address: "0.0.0.0:6379" - connect_timeout_ms: 3000 -``` - -Modify an existing `topology.yaml` or create a new one and place the above example as the file's contents. - -You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml) to run Shotover. - -```shell -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml --output config.yaml -``` - -## Starting - -We can now start the services with: - -```shell -docker-compose up -d -``` - -## Testing - -With everything now up and running, we can test out our client application. Let's start it up! - -First we will run `redis-benchmark` directly on our cluster. - -```console -redis-benchmark -h 172.16.1.2 -p 6379 -t set,get --cluster -``` - -If everything works correctly you should see the following, along with the benchmark results which have been omitted for brevity. Notice all traffic is going through the Redis port on `6379`. - -```console -Cluster has 3 master nodes: - -Master 0: d5eaf45804215f80cfb661928c1a84e1da7406a9 172.16.1.3:6379 -Master 1: d774cd063e430d34a71bceaab851d7744134e22f 172.16.1.2:6379 -Master 2: 04b301f1b165d81d5fb86e50312e9cc4898cbcce 172.16.1.4:6379 -``` - -Now run it again but on the Shotover port this time. - -```console -redis-benchmark -h 172.16.1.2 -p 6380 -t set,get --cluster -``` - -You should see the following, notice that all traffic is going through Shotover on `6380` instead of the Redis port of `6379`: - -```console -Cluster has 3 master nodes: - -Master 0: 04b301f1b165d81d5fb86e50312e9cc4898cbcce 172.16.1.4:6380 -Master 1: d5eaf45804215f80cfb661928c1a84e1da7406a9 172.16.1.3:6380 -Master 2: d774cd063e430d34a71bceaab851d7744134e22f 172.16.1.2:6380 -``` diff --git a/docs/src/examples/redis-clustering-unaware.md b/docs/src/examples/redis-clustering-unaware.md deleted file mode 100644 index e80498caf..000000000 --- a/docs/src/examples/redis-clustering-unaware.md +++ /dev/null @@ -1,187 +0,0 @@ -# Redis Clustering - -The following guide shows you how to configure Shotover Proxy to support transparently proxying Redis cluster _unaware_ clients to a [Redis cluster](https://redis.io/topics/cluster-spec). - -## General Configuration - -First you need to setup a Redis cluster and Shotover. - -The easiest way to do this is with this example [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/redis-cluster-1-many/docker-compose.yaml) -You should first inspect the `docker-compose.yaml` to understand what the cluster looks like and how its exposed to the network. - -Then run: - -```shell -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-many/docker-compose.yaml --output docker-compose.yaml -``` - -Alternatively you could spin up a hosted Redis cluster on [any cloud provider that provides it](https://www.instaclustr.com/products/managed-redis). -This more accurately reflects a real production use but will take a bit more setup. -And reduce the docker-compose.yaml to just the shotover part - -```yaml -services: - shotover-0: - networks: - cluster_subnet: - ipv4_address: 172.16.1.9 - image: shotover/shotover-proxy:v0.1.10 - volumes: - - .:/config -networks: - cluster_subnet: - name: cluster_subnet - driver: bridge - ipam: - driver: default - config: - - subnet: 172.16.1.0/24 - gateway: 172.16.1.1 -``` - -## Shotover Configuration - -```yaml ---- -sources: - - Redis: - name: "redis" - # define where shotover listens for incoming connections from our client application (`redis-benchmark`). - listen_addr: "0.0.0.0:6379" - chain: - # configure Shotover to connect to the Redis cluster via our defined contact points - - RedisSinkCluster: - first_contact_points: - - "172.16.1.2:6379" - - "172.16.1.3:6379" - - "172.16.1.4:6379" - - "172.16.1.5:6379" - - "172.16.1.6:6379" - - "172.16.1.7:6379" - connect_timeout_ms: 3000 -``` - -Modify an existing `topology.yaml` or create a new one and place the above example as the file's contents. - -If you didnt use the standard `docker-compose.yaml` setup then you will need to change `first_contact_points` to point to the Redis instances you used. - -You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml) to run Shotover. - -```shell -curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/redis-cluster-1-1/config.yaml --output config.yaml -``` - -## Starting - -We can now start the services with: - -```shell -docker-compose up -d -``` - -## Testing - -With your Redis Cluster and Shotover now up and running, we can test out our client application. Let's start it up! - -```console -redis-benchmark -h 172.16.1.9 -t set,get -``` - -Running against local containerised Redis instances on a Ryzen 9 3900X we get the following: - -```console -user@demo ~$ redis-benchmark -t set,get -====== SET ====== - 100000 requests completed in 0.69 seconds - 50 parallel clients - 3 bytes payload - keep alive: 1 - host configuration "save": - host configuration "appendonly": - multi-thread: no - -Latency by percentile distribution: -0.000% <= 0.079 milliseconds (cumulative count 2) -50.000% <= 0.215 milliseconds (cumulative count 51352) -75.000% <= 0.231 milliseconds (cumulative count 79466) -87.500% <= 0.247 milliseconds (cumulative count 91677) -93.750% <= 0.255 milliseconds (cumulative count 94319) -96.875% <= 0.271 milliseconds (cumulative count 97011) -98.438% <= 0.303 milliseconds (cumulative count 98471) -99.219% <= 0.495 milliseconds (cumulative count 99222) -99.609% <= 0.615 milliseconds (cumulative count 99613) -99.805% <= 0.719 milliseconds (cumulative count 99806) -99.902% <= 0.791 milliseconds (cumulative count 99908) -99.951% <= 0.919 milliseconds (cumulative count 99959) -99.976% <= 0.967 milliseconds (cumulative count 99976) -99.988% <= 0.991 milliseconds (cumulative count 99992) -99.994% <= 1.007 milliseconds (cumulative count 99995) -99.997% <= 1.015 milliseconds (cumulative count 99998) -99.998% <= 1.023 milliseconds (cumulative count 99999) -99.999% <= 1.031 milliseconds (cumulative count 100000) -100.000% <= 1.031 milliseconds (cumulative count 100000) - -Cumulative distribution of latencies: -0.007% <= 0.103 milliseconds (cumulative count 7) -33.204% <= 0.207 milliseconds (cumulative count 33204) -98.471% <= 0.303 milliseconds (cumulative count 98471) -99.044% <= 0.407 milliseconds (cumulative count 99044) -99.236% <= 0.503 milliseconds (cumulative count 99236) -99.571% <= 0.607 milliseconds (cumulative count 99571) -99.793% <= 0.703 milliseconds (cumulative count 99793) -99.926% <= 0.807 milliseconds (cumulative count 99926) -99.949% <= 0.903 milliseconds (cumulative count 99949) -99.995% <= 1.007 milliseconds (cumulative count 99995) -100.000% <= 1.103 milliseconds (cumulative count 100000) - -Summary: - throughput summary: 144092.22 requests per second - latency summary (msec): - avg min p50 p95 p99 max - 0.222 0.072 0.215 0.263 0.391 1.031 -====== GET ====== - 100000 requests completed in 0.69 seconds - 50 parallel clients - 3 bytes payload - keep alive: 1 - host configuration "save": - host configuration "appendonly": - multi-thread: no - -Latency by percentile distribution: -0.000% <= 0.079 milliseconds (cumulative count 1) -50.000% <= 0.215 milliseconds (cumulative count 64586) -75.000% <= 0.223 milliseconds (cumulative count 77139) -87.500% <= 0.239 milliseconds (cumulative count 90521) -93.750% <= 0.255 milliseconds (cumulative count 94985) -96.875% <= 0.287 milliseconds (cumulative count 97262) -98.438% <= 0.311 milliseconds (cumulative count 98588) -99.219% <= 0.367 milliseconds (cumulative count 99232) -99.609% <= 0.495 milliseconds (cumulative count 99613) -99.805% <= 0.583 milliseconds (cumulative count 99808) -99.902% <= 0.631 milliseconds (cumulative count 99913) -99.951% <= 0.647 milliseconds (cumulative count 99955) -99.976% <= 0.663 milliseconds (cumulative count 99978) -99.988% <= 0.679 milliseconds (cumulative count 99990) -99.994% <= 0.703 milliseconds (cumulative count 99995) -99.997% <= 0.711 milliseconds (cumulative count 99997) -99.998% <= 0.751 milliseconds (cumulative count 99999) -99.999% <= 0.775 milliseconds (cumulative count 100000) -100.000% <= 0.775 milliseconds (cumulative count 100000) - -Cumulative distribution of latencies: -0.009% <= 0.103 milliseconds (cumulative count 9) -48.520% <= 0.207 milliseconds (cumulative count 48520) -98.179% <= 0.303 milliseconds (cumulative count 98179) -99.358% <= 0.407 milliseconds (cumulative count 99358) -99.626% <= 0.503 milliseconds (cumulative count 99626) -99.867% <= 0.607 milliseconds (cumulative count 99867) -99.995% <= 0.703 milliseconds (cumulative count 99995) -100.000% <= 0.807 milliseconds (cumulative count 100000) - -Summary: - throughput summary: 143884.89 requests per second - latency summary (msec): - avg min p50 p95 p99 max - 0.214 0.072 0.215 0.263 0.335 0.775 -``` diff --git a/docs/src/examples/valkey-clustering-aware.md b/docs/src/examples/valkey-clustering-aware.md new file mode 100644 index 000000000..60547be85 --- /dev/null +++ b/docs/src/examples/valkey-clustering-aware.md @@ -0,0 +1,123 @@ +# Valkey Clustering with cluster aware client + +The following guide shows you how to configure Shotover to support proxying Valkey cluster *aware* clients to [Valkey cluster](https://valkey.io/topics/cluster-spec). + +## Overview + +In this example, we will be connecting to a Valkey cluster that has the following topology: + +* `172.16.1.2:6379` +* `172.16.1.3:6379` +* `172.16.1.4:6379` +* `172.16.1.5:6379` +* `172.16.1.6:6379` +* `172.16.1.7:6379` + +Shotover will be deployed as a sidecar to each node in the Valkey cluster, listening on `6380`. Use the following [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/valkey-cluster-1-1/docker-compose.yaml) to run the Valkey cluster and Shotover sidecars. + +```console +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/docker-compose.yaml --output docker-compose.yaml +``` + +Below we can see an example of a Valkey node and it's Shotover sidecar. Notice they are running on the same network address (`172.16.1.2`) and the present directory is being mounted to allow Shotover to access the config and topology files. + +```YAML + +valkey-node-0: + image: bitnami/valkey-cluster:7.2.5-debian-12-r4 + networks: + cluster_subnet: + ipv4_address: 172.16.1.2 + environment: + - 'ALLOW_EMPTY_PASSWORD=yes' + - 'VALKEY_NODES=valkey-node-0 valkey-node-1 valkey-node-2' + +shotover-0: + restart: always + depends_on: + - valkey-node-0 + image: shotover/shotover-proxy + network_mode: "service:valkey-node-0" + volumes: + - type: bind + source: $PWD + target: /config + +``` + +In this example we will use `valkey-benchmark` with cluster mode enabled as our Valkey cluster aware client application. + +## Configuration + +First we will modify our `topology.yaml` file to have a single Valkey source. This will: + +* Define how Shotover listens for incoming connections from our client application (`valkey-benchmark`). +* Configure Shotover to connect to the Valkey node via our defined remote address. +* Configure Shotover to rewrite all Valkey ports with our Shotover port when the cluster aware driver is talking to the cluster, through Shotover. +* Connect our Valkey Source to our Valkey cluster sink (transform). + +```yaml +--- +sources: + - Valkey: + name: "valkey" + listen_addr: "0.0.0.0:6380" + chain: + - ValkeyClusterPortsRewrite: + new_port: 6380 + - ValkeySinkSingle: + remote_address: "0.0.0.0:6379" + connect_timeout_ms: 3000 +``` + +Modify an existing `topology.yaml` or create a new one and place the above example as the file's contents. + +You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml) to run Shotover. + +```shell +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml --output config.yaml +``` + +## Starting + +We can now start the services with: + +```shell +docker-compose up -d +``` + +## Testing + +With everything now up and running, we can test out our client application. Let's start it up! + +First we will run `valkey-benchmark` directly on our cluster. + +```console +valkey-benchmark -h 172.16.1.2 -p 6379 -t set,get --cluster +``` + +If everything works correctly you should see the following, along with the benchmark results which have been omitted for brevity. Notice all traffic is going through the Valkey port on `6379`. + +```console +Cluster has 3 master nodes: + +Master 0: d5eaf45804215f80cfb661928c1a84e1da7406a9 172.16.1.3:6379 +Master 1: d774cd063e430d34a71bceaab851d7744134e22f 172.16.1.2:6379 +Master 2: 04b301f1b165d81d5fb86e50312e9cc4898cbcce 172.16.1.4:6379 +``` + +Now run it again but on the Shotover port this time. + +```console +valkey-benchmark -h 172.16.1.2 -p 6380 -t set,get --cluster +``` + +You should see the following, notice that all traffic is going through Shotover on `6380` instead of the Valkey port of `6379`: + +```console +Cluster has 3 master nodes: + +Master 0: 04b301f1b165d81d5fb86e50312e9cc4898cbcce 172.16.1.4:6380 +Master 1: d5eaf45804215f80cfb661928c1a84e1da7406a9 172.16.1.3:6380 +Master 2: d774cd063e430d34a71bceaab851d7744134e22f 172.16.1.2:6380 +``` diff --git a/docs/src/examples/valkey-clustering-unaware.md b/docs/src/examples/valkey-clustering-unaware.md new file mode 100644 index 000000000..140d1a108 --- /dev/null +++ b/docs/src/examples/valkey-clustering-unaware.md @@ -0,0 +1,88 @@ +# Valkey Clustering + +The following guide shows you how to configure Shotover Proxy to support transparently proxying Valkey cluster _unaware_ clients to a [Valkey cluster](https://valkey.io/topics/cluster-spec). + +## General Configuration + +First you need to setup a Valkey cluster and Shotover. + +The easiest way to do this is with this example [docker-compose.yaml](https://github.com/shotover/shotover-examples/blob/main/valkey-cluster-1-many/docker-compose.yaml) +You should first inspect the `docker-compose.yaml` to understand what the cluster looks like and how its exposed to the network. + +Then run: + +```shell +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-many/docker-compose.yaml --output docker-compose.yaml +``` + +Alternatively you could spin up a hosted Valkey cluster on [any cloud provider that provides it](https://www.instaclustr.com/products/managed-valkey). +This more accurately reflects a real production use but will take a bit more setup. +And reduce the docker-compose.yaml to just the shotover part + +```yaml +services: + shotover-0: + networks: + cluster_subnet: + ipv4_address: 172.16.1.9 + image: shotover/shotover-proxy:v0.1.10 + volumes: + - .:/config +networks: + cluster_subnet: + name: cluster_subnet + driver: bridge + ipam: + driver: default + config: + - subnet: 172.16.1.0/24 + gateway: 172.16.1.1 +``` + +## Shotover Configuration + +```yaml +--- +sources: + - Valkey: + name: "valkey" + # define where shotover listens for incoming connections from our client application (`valkey-benchmark`). + listen_addr: "0.0.0.0:6379" + chain: + # configure Shotover to connect to the Valkey cluster via our defined contact points + - ValkeySinkCluster: + first_contact_points: + - "172.16.1.2:6379" + - "172.16.1.3:6379" + - "172.16.1.4:6379" + - "172.16.1.5:6379" + - "172.16.1.6:6379" + - "172.16.1.7:6379" + connect_timeout_ms: 3000 +``` + +Modify an existing `topology.yaml` or create a new one and place the above example as the file's contents. + +If you didnt use the standard `docker-compose.yaml` setup then you will need to change `first_contact_points` to point to the Valkey instances you used. + +You will also need a [config.yaml](https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml) to run Shotover. + +```shell +curl -L https://raw.githubusercontent.com/shotover/shotover-examples/main/valkey-cluster-1-1/config.yaml --output config.yaml +``` + +## Starting + +We can now start the services with: + +```shell +docker-compose up -d +``` + +## Testing + +With your Valkey Cluster and Shotover now up and running, we can test out our client application. Let's start it up! + +```console +valkey-benchmark -h 172.16.1.9 -t set,get +``` diff --git a/docs/src/user-guide/getting-started.md b/docs/src/user-guide/getting-started.md index b4055883a..cf495a5ee 100644 --- a/docs/src/user-guide/getting-started.md +++ b/docs/src/user-guide/getting-started.md @@ -17,4 +17,4 @@ To see Shotover's command line arguments run: `./shotover-proxy --help` Full `topology.yaml` examples configured for a specific use case: -* [Redis clustering](../examples/redis-clustering-unaware.md) +* [valkey clustering](../examples/valkey-clustering-unaware.md) diff --git a/docs/src/user-guide/introduction.md b/docs/src/user-guide/introduction.md index fd1e79e48..5c473af0b 100644 --- a/docs/src/user-guide/introduction.md +++ b/docs/src/user-guide/introduction.md @@ -22,7 +22,7 @@ Shotover aims to make these challenges simpler by providing a point where data l Longer term, Shotover can also leverage the same capability to make operational tasks easier to solve a number of other challenges that come with working multiple databases. Some of these include: * Data encryption at the field level, with a common key management scheme between databases. -* Routing the same data to databases that provide different query capabilities or performance characteristics (e.g. indexing data in Redis in Elasticsearch, easy caching of DynamoDB data in Redis). +* Routing the same data to databases that provide different query capabilities or performance characteristics (e.g. indexing data in Valkey in Elasticsearch, easy caching of DynamoDB data in Valkey). * Routing/replicating data across regions for databases that don't support it natively or the functionality is gated behind proprietary "open-core" implementations. * A common audit and AuthZ/AuthN point for SOX/PCI/HIPAA compliance. @@ -38,18 +38,11 @@ Shotover prioritises the following principals in the order listed: Shotover provides a set of predefined transforms that can modify, route and control queries from any number of sources to a similar number of sinks. As the user you can construct chains of these transforms to achieve the behaviour required. Each chain can then be attached to a "source" that speaks the native protocol of you chosen database. The transform chain will process each request with access to a unified/simplified representation of a generic query, the original raw query and optionally (for SQL like protocols) a parsed AST representing the query. - - Shotover proxy currently supports the following protocols as sources: -* Cassandra (CQLv4) -* Redis (RESP2) +* Cassandra (CQL4 + CQL5) +* Valkey/Redis (RESP2) +* Kafka (Kafka Wire Protocol) ## Shotover performance diff --git a/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml b/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml index fc0ee6f37..a09aaa327 100644 --- a/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml +++ b/shotover-proxy/tests/test-configs/cassandra/valkey-cache/docker-compose.yaml @@ -1,8 +1,12 @@ services: - redis-one: - image: library/redis:5.0.9 + valkey-one: + image: bitnami/valkey:7.2.5-debian-12-r9 ports: - "6379:6379" + environment: + ALLOW_EMPTY_PASSWORD: "yes" + VALKEY_TLS_ENABLED: "no" + cassandra-one: image: shotover/cassandra-test:4.0.6-r1 ports: diff --git a/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml b/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml index 7acef8da1..f8891e78b 100644 --- a/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml +++ b/shotover-proxy/tests/test-configs/log-to-file/docker-compose.yaml @@ -1,5 +1,8 @@ services: - redis-one: - image: library/redis:5.0.9 + valkey-one: + image: bitnami/valkey:7.2.5-debian-12-r9 ports: - "1111:6379" + environment: + ALLOW_EMPTY_PASSWORD: "yes" + VALKEY_TLS_ENABLED: "no" diff --git a/shotover-proxy/tests/transforms/log_to_file.rs b/shotover-proxy/tests/transforms/log_to_file.rs index 55cd96085..76ab14462 100644 --- a/shotover-proxy/tests/transforms/log_to_file.rs +++ b/shotover-proxy/tests/transforms/log_to_file.rs @@ -20,20 +20,14 @@ async fn log_to_file() { "*4\r\n$6\r\nCLIENT\r\n$7\r\nSETINFO\r\n$8\r\nLIB-NAME\r\n$8\r\nredis-rs\r\n", ); let response = std::fs::read("message-log/1/responses/message1.bin").unwrap(); - assert_eq_string( - &response, - "-ERR Unknown subcommand or wrong number of arguments for 'SETINFO'. Try CLIENT HELP\r\n", - ); + assert_eq_string(&response, "+OK\r\n"); let request = std::fs::read("message-log/1/requests/message2.bin").unwrap(); assert_eq_string( &request, "*4\r\n$6\r\nCLIENT\r\n$7\r\nSETINFO\r\n$7\r\nLIB-VER\r\n$6\r\n0.24.0\r\n", ); let response = std::fs::read("message-log/1/responses/message2.bin").unwrap(); - assert_eq_string( - &response, - "-ERR Unknown subcommand or wrong number of arguments for 'SETINFO'. Try CLIENT HELP\r\n", - ); + assert_eq_string(&response, "+OK\r\n"); // SET sent by command assert_ok(redis::cmd("SET").arg("foo").arg(42), &mut connection).await; diff --git a/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs b/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs index b0f16acf7..f9e3722a5 100644 --- a/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs +++ b/shotover-proxy/tests/valkey_int_tests/basic_driver_tests.rs @@ -1292,9 +1292,6 @@ pub async fn test_trigger_transform_failure_driver(client: &RedisClient) { } /// A raw variant of this test case is provided so that we can make a strong assertion about the way shotover handles this case. -/// -/// CAREFUL: This lacks any kind of check that shotover is ready, -/// so make sure shotover_manager.redis_connection is run on 6379 before calling this. pub async fn test_trigger_transform_failure_raw() { // Send invalid valkey command // To correctly handle this shotover should close the connection @@ -1305,7 +1302,7 @@ pub async fn test_trigger_transform_failure_raw() { connection.write_all(b"*1\r\n$4\r\nping\r\n").await.unwrap(); assert_eq!( - read_redis_message(&mut connection).await, + read_valkey_message(&mut connection).await, ValkeyFrame::Error(format!("ERR Internal shotover (or custom transform) bug: Chain failed to send and/or receive messages, the connection will now be closed. Caused by: 0: ValkeySinkSingle transform failed 1: Failed to connect to destination 127.0.0.1:1111 2: Connection refused (os error {CONNECTION_REFUSED_OS_ERROR})").into()) ); @@ -1319,7 +1316,7 @@ pub async fn test_trigger_transform_failure_raw() { assert_eq!(amount, 0); } -async fn read_redis_message(connection: &mut TcpStream) -> ValkeyFrame { +async fn read_valkey_message(connection: &mut TcpStream) -> ValkeyFrame { let mut buffer = BytesMut::new(); loop { if let Ok(Some((result, len))) = @@ -1336,8 +1333,6 @@ async fn read_redis_message(connection: &mut TcpStream) -> ValkeyFrame { } } -/// CAREFUL: This lacks any kind of check that shotover is ready, -/// so make sure shotover_manager.redis_connection is run on 6379 before calling this. pub async fn test_invalid_frame() { // Send invalid valkey command // To correctly handle this shotover should close the connection @@ -1346,7 +1341,7 @@ pub async fn test_invalid_frame() { .unwrap(); connection - .write_all(b"invalid_redis_frame\r\n") + .write_all(b"invalid_valkey_frame\r\n") .await .unwrap(); diff --git a/shotover-proxy/tests/valkey_int_tests/mod.rs b/shotover-proxy/tests/valkey_int_tests/mod.rs index a4eaaa0a7..30d3e4689 100644 --- a/shotover-proxy/tests/valkey_int_tests/mod.rs +++ b/shotover-proxy/tests/valkey_int_tests/mod.rs @@ -50,7 +50,7 @@ async fn passthrough_standard() { } #[tokio::test(flavor = "multi_thread")] -async fn passthrough_redis_down() { +async fn passthrough_valkey_down() { let shotover = shotover_process("tests/test-configs/valkey/passthrough/topology.yaml") .start() .await; diff --git a/shotover/src/frame/mod.rs b/shotover/src/frame/mod.rs index 71f7317ea..098a4a59b 100644 --- a/shotover/src/frame/mod.rs +++ b/shotover/src/frame/mod.rs @@ -60,7 +60,7 @@ impl MessageType { #[cfg(feature = "cassandra")] MessageType::Cassandra => "cql", #[cfg(feature = "valkey")] - MessageType::Valkey => "redis", + MessageType::Valkey => "valkey", #[cfg(feature = "kafka")] MessageType::Kafka => "kafka", #[cfg(feature = "opensearch")] diff --git a/shotover/src/lib.rs b/shotover/src/lib.rs index 63037e559..04dcbfe72 100644 --- a/shotover/src/lib.rs +++ b/shotover/src/lib.rs @@ -57,7 +57,7 @@ If we absolutely need unsafe code, it should be isolated within a separate small not(feature = "opensearch"), ))] compile_error!( - "At least one protocol feature must be enabled, e.g. `cassandra`, `redis`, `kafka` or `opensearch`" + "At least one protocol feature must be enabled, e.g. `cassandra`, `valkey`, `kafka` or `opensearch`" ); pub mod codec; From b846d2eb0943b6a4d41cd4f15a7f886b67a5dc42 Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Tue, 3 Dec 2024 15:43:35 +1100 Subject: [PATCH 2/3] =?UTF-8?q?Update=20to=20rust=201.83=20=F0=9F=A6=80?= =?UTF-8?q?=F0=9F=A6=80=F0=9F=A6=80=20(#1852)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- rust-toolchain.toml | 6 +++--- .../src/transforms/cassandra/sink_cluster/token_ring.rs | 2 +- shotover/src/transforms/kafka/sink_cluster/mod.rs | 2 +- shotover/src/transforms/mod.rs | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 7df8e1f93..53e53ab08 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.82" -components = [ "rustfmt", "clippy" ] -targets = [ "aarch64-unknown-linux-gnu" ] +channel = "1.83" +components = ["rustfmt", "clippy"] +targets = ["aarch64-unknown-linux-gnu"] diff --git a/shotover/src/transforms/cassandra/sink_cluster/token_ring.rs b/shotover/src/transforms/cassandra/sink_cluster/token_ring.rs index f0a57b5ef..873be7701 100644 --- a/shotover/src/transforms/cassandra/sink_cluster/token_ring.rs +++ b/shotover/src/transforms/cassandra/sink_cluster/token_ring.rs @@ -57,7 +57,7 @@ impl TokenRing { nodes: &'a [CassandraNode], token_from_key: Murmur3Token, keyspace: &'a KeyspaceMetadata, - ) -> impl Iterator + '_ { + ) -> impl Iterator + 'a { let mut racks_used = vec![]; self.ring_range(token_from_key) .filter(move |host_id| { diff --git a/shotover/src/transforms/kafka/sink_cluster/mod.rs b/shotover/src/transforms/kafka/sink_cluster/mod.rs index c2c9e0748..ed470fd96 100644 --- a/shotover/src/transforms/kafka/sink_cluster/mod.rs +++ b/shotover/src/transforms/kafka/sink_cluster/mod.rs @@ -3993,7 +3993,7 @@ fn random_broker_id(nodes: &[KafkaNode], rng: &mut SmallRng) -> BrokerId { struct FormatTopicName<'a>(&'a TopicName, &'a Uuid); -impl<'a> std::fmt::Display for FormatTopicName<'a> { +impl std::fmt::Display for FormatTopicName<'_> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if self.0.is_empty() { write!(f, "topic with id {}", self.1) diff --git a/shotover/src/transforms/mod.rs b/shotover/src/transforms/mod.rs index eb9973204..786772acb 100644 --- a/shotover/src/transforms/mod.rs +++ b/shotover/src/transforms/mod.rs @@ -166,7 +166,7 @@ pub struct ChainState<'a> { /// [`Wrapper`] will not (cannot) bring the current list of transforms that it needs to traverse with it /// This is purely to make it convenient to clone all the data within Wrapper rather than it's transform /// state. -impl<'a> Clone for ChainState<'a> { +impl Clone for ChainState<'_> { fn clone(&self) -> Self { ChainState { requests: self.requests.clone(), From af2c8c49f280b0e67c20d50082836af84e4e4934 Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Wed, 4 Dec 2024 06:10:42 +1100 Subject: [PATCH 3/3] publish docs to /docs/main (#1854) --- .github/workflows/publish-to-pages.yaml | 41 +++++++++++++ .gitignore | 1 + Cargo.lock | 16 ++++++ Cargo.toml | 1 + website/Cargo.toml | 12 ++++ website/src/cli.rs | 10 ++++ website/src/docs.rs | 32 +++++++++++ website/src/main.rs | 76 +++++++++++++++++++++++++ 8 files changed, 189 insertions(+) create mode 100644 .github/workflows/publish-to-pages.yaml create mode 100644 website/Cargo.toml create mode 100644 website/src/cli.rs create mode 100644 website/src/docs.rs create mode 100644 website/src/main.rs diff --git a/.github/workflows/publish-to-pages.yaml b/.github/workflows/publish-to-pages.yaml new file mode 100644 index 000000000..d9b8e6d0b --- /dev/null +++ b/.github/workflows/publish-to-pages.yaml @@ -0,0 +1,41 @@ +name: publish to github pages + +on: + push: + branches: [ main ] + +# Cancel already running jobs +concurrency: + group: publish_to_pages_${{ github.head_ref }} + cancel-in-progress: true + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +jobs: + build: + strategy: + matrix: + include: + - name: Publish website to Github Pages + runner: ubuntu-latest + environment: + name: github-pages + url: ${{ steps.setup_pages.outputs.base_url }} + name: ${{ matrix.name }} + runs-on: ${{ matrix.runner }} + steps: + - uses: actions/checkout@v4 + - name: Build website + run: cargo run -p website + - name: Setup Pages + uses: actions/configure-pages@v4 + - name: Upload pages + uses: actions/upload-pages-artifact@v3 + with: + path: 'website/root' + - name: Deploy pages + uses: actions/deploy-pages@v4 diff --git a/.gitignore b/.gitignore index 7b69c6476..218ac7692 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ /.project /docs/book /docs/mdbook_bin +/website/root /shotover-proxy/build/packages /some_local_file /test-helpers/src/connection/kafka/node/node_modules \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 038791f70..e1b5133b1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1622,6 +1622,12 @@ dependencies = [ "cipher", ] +[[package]] +name = "devserver_lib" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edf215dbb8cb1409cca7645aaed35f9e39fb0a21855bba1ac48bc0334903bf66" + [[package]] name = "diff" version = "0.1.13" @@ -5804,6 +5810,16 @@ dependencies = [ "rustls-pki-types", ] +[[package]] +name = "website" +version = "0.1.0" +dependencies = [ + "anyhow", + "clap", + "devserver_lib", + "subprocess", +] + [[package]] name = "winapi" version = "0.3.9" diff --git a/Cargo.toml b/Cargo.toml index 366e90819..f355d903e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "custom-transforms-example", "ec2-cargo", "windsock-cloud-docker", + "website", ] resolver = "2" diff --git a/website/Cargo.toml b/website/Cargo.toml new file mode 100644 index 000000000..f05ab3de4 --- /dev/null +++ b/website/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "website" +version = "0.1.0" +edition = "2021" +license = "Apache-2.0" +publish = false + +[dependencies] +subprocess.workspace = true +anyhow.workspace = true +devserver_lib = { version = "0.4.2", default-features = false } +clap.workspace = true diff --git a/website/src/cli.rs b/website/src/cli.rs new file mode 100644 index 000000000..411979141 --- /dev/null +++ b/website/src/cli.rs @@ -0,0 +1,10 @@ +use clap::Parser; + +/// Generates the shotover website. +#[derive(Parser, Clone)] +#[clap()] +pub struct Args { + /// As well as generating the site, serve the contents of the site over http. + #[clap(long)] + pub serve: bool, +} diff --git a/website/src/docs.rs b/website/src/docs.rs new file mode 100644 index 000000000..33fca764b --- /dev/null +++ b/website/src/docs.rs @@ -0,0 +1,32 @@ +use crate::run_command; +use anyhow::Result; +use std::{fs::create_dir_all, path::Path}; + +pub fn generate_all_docs(current_dir: &Path) -> Result<()> { + let root = current_dir.join("website").join("root"); + println!("Generating main"); + create_dir_all(root.join("docs")).unwrap(); + build_docs( + current_dir, + Path::new("docs"), + &root.join("docs").join("main"), + ); + + Ok(()) +} + +fn build_docs(current_dir: &Path, in_path: &Path, out_path: &Path) { + let temp_docs_dir = current_dir.join("target").join("temp_docs_build"); + std::fs::remove_dir_all(&temp_docs_dir).ok(); + run_command( + in_path, + "mdbook", + &["build", "--dest-dir", temp_docs_dir.to_str().unwrap()], + ) + .ok(); + + std::fs::remove_dir_all(out_path).ok(); + std::fs::rename(temp_docs_dir.join("html"), out_path).unwrap(); + + std::fs::remove_dir_all(&temp_docs_dir).ok(); +} diff --git a/website/src/main.rs b/website/src/main.rs new file mode 100644 index 000000000..519f743ac --- /dev/null +++ b/website/src/main.rs @@ -0,0 +1,76 @@ +use anyhow::{anyhow, Result}; +use clap::Parser; +use cli::Args; +use std::{path::Path, process::Command}; +use subprocess::{Exec, Redirection}; + +mod cli; +mod docs; + +fn main() { + // Set standard path to root of repo so this always runs in the same directory, regardless of where the user ran it from. + let current_dir = Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap(); + std::env::set_current_dir(current_dir).unwrap(); + + let args = Args::parse(); + + println!("Ensuring mdbook is installed"); + // TODO: Once mdbook starts doing macos aarch64 binary releases we should download the release directly instead of compiling. + // https://github.com/rust-lang/mdBook/pull/2500 + if !Command::new("cargo") + .args(["install", "mdbook", "--version", "0.4.43"]) + .status() + .unwrap() + .success() + { + return; + } + + let root = current_dir.join("website").join("root"); + std::fs::remove_dir_all(&root).unwrap(); + std::fs::create_dir_all(&root).unwrap(); + + if let Err(err) = docs::generate_all_docs(current_dir) { + println!("{err}"); + return; + } + + if args.serve { + println!("Hosting website at: http://localhost:8000"); + + devserver_lib::run( + "localhost", + 8000, + current_dir.join("website").join("root").to_str().unwrap(), + false, + "", + ); + } else { + let out = current_dir.join("website").join("root"); + println!( + "Succesfully generated website at: file://{}", + out.to_str().unwrap() + ); + } +} + +pub fn run_command(dir: impl AsRef, command: &str, args: &[&str]) -> Result { + let data = Exec::cmd(command) + .args(args) + .cwd(dir) + .stdout(Redirection::Pipe) + .stderr(Redirection::Merge) + .capture()?; + + if data.exit_status.success() { + Ok(data.stdout_str()) + } else { + Err(anyhow!( + "command {} {:?} exited with {:?} and output:\n{}", + command, + args, + data.exit_status, + data.stdout_str() + )) + } +}